xref: /titanic_51/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 98f04078d5fc5800e80b21e0b18abe0024af1cbe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
431 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
432     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
433 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
434     int cause);
435 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
436     uint32_t state);
437 static struct fcp_port *fcp_get_port(opaque_t port_handle);
438 static void fcp_unsol_callback(fc_packet_t *fpkt);
439 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
440     uchar_t r_ctl, uchar_t type);
441 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
442 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
443     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
444     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
445 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
446 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
447     int nodma, int flags);
448 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
449 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
450     uchar_t *wwn);
451 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
452     uint32_t d_id);
453 static void fcp_icmd_callback(fc_packet_t *fpkt);
454 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
455     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
456 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
457 static void fcp_scsi_callback(fc_packet_t *fpkt);
458 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
459 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
461 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
462     uint16_t lun_num);
463 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
464     int link_cnt, int tgt_cnt, int cause);
465 static void fcp_finish_init(struct fcp_port *pptr);
466 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
467     int tgt_cnt, int cause);
468 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
469     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
470 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
471     int link_cnt, int tgt_cnt, int nowait, int flags);
472 static void fcp_offline_target_now(struct fcp_port *pptr,
473     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
474 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
475     int tgt_cnt, int flags);
476 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
477     int nowait, int flags);
478 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
479     int tgt_cnt);
480 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
481     int tgt_cnt, int flags);
482 static void fcp_scan_offline_luns(struct fcp_port *pptr);
483 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
484 static void fcp_update_offline_flags(struct fcp_lun *plun);
485 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
486 static void fcp_abort_commands(struct fcp_pkt *head, struct
487     fcp_port *pptr);
488 static void fcp_cmd_callback(fc_packet_t *fpkt);
489 static void fcp_complete_pkt(fc_packet_t *fpkt);
490 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
491     struct fcp_port *pptr);
492 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
493     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
494 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
495 static void fcp_dealloc_lun(struct fcp_lun *plun);
496 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
497     fc_portmap_t *map_entry, int link_cnt);
498 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
499 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
500 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
501     int internal);
502 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
503 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
504     uint32_t s_id, int instance);
505 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
506     int instance);
507 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
508 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
509     int);
510 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
511 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
512 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
513     int flags);
514 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
515 static int fcp_reset_target(struct scsi_address *ap, int level);
516 static int fcp_commoncap(struct scsi_address *ap, char *cap,
517     int val, int tgtonly, int doset);
518 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
519 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
520 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
521     int sleep);
522 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
523     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
524 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
525 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
526 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
527     int lcount, int tcount);
528 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
529 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
530 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
531     int tgt_cnt);
532 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
533     dev_info_t *pdip, caddr_t name);
534 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
535     int lcount, int tcount, int flags, int *circ);
536 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
537     int lcount, int tcount, int flags, int *circ);
538 static void fcp_remove_child(struct fcp_lun *plun);
539 static void fcp_watch(void *arg);
540 static void fcp_check_reset_delay(struct fcp_port *pptr);
541 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
542     struct fcp_lun *rlun, int tgt_cnt);
543 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
544 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
545     uchar_t *wwn, uint16_t lun);
546 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
547     struct fcp_lun *plun);
548 static void fcp_post_callback(struct fcp_pkt *cmd);
549 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
550 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
551 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
552     child_info_t *cip);
553 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
554     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
555     int tgt_cnt, int flags);
556 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
557     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
558     int tgt_cnt, int flags, int wait);
559 static void fcp_retransport_cmd(struct fcp_port *pptr,
560     struct fcp_pkt *cmd);
561 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
562     uint_t statistics);
563 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
564 static void fcp_update_targets(struct fcp_port *pptr,
565     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
566 static int fcp_call_finish_init(struct fcp_port *pptr,
567     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
568 static int fcp_call_finish_init_held(struct fcp_port *pptr,
569     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
570 static void fcp_reconfigure_luns(void * tgt_handle);
571 static void fcp_free_targets(struct fcp_port *pptr);
572 static void fcp_free_target(struct fcp_tgt *ptgt);
573 static int fcp_is_retryable(struct fcp_ipkt *icmd);
574 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
575 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
576 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
577 static void fcp_print_error(fc_packet_t *fpkt);
578 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
579     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
580 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
581 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
582     uint32_t *dev_cnt);
583 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
584 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
585 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
586     struct fcp_ioctl *, struct fcp_port **);
587 static char *fcp_get_lun_path(struct fcp_lun *plun);
588 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
589     int *rval);
590 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594     int *rval);
595 static void fcp_reconfig_wait(struct fcp_port *pptr);
596 
597 /*
598  * New functions added for mpxio support
599  */
600 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
601     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
602 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
603     int tcount);
604 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
605     dev_info_t *pdip);
606 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
607 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
608 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
609 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
610 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
611     int what);
612 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
613     fc_packet_t *fpkt);
614 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
615 
616 /*
617  * New functions added for lun masking support
618  */
619 static void fcp_read_blacklist(dev_info_t *dip,
620     struct fcp_black_list_entry **pplun_blacklist);
621 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
622     struct fcp_black_list_entry **pplun_blacklist);
623 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
624     struct fcp_black_list_entry **pplun_blacklist);
625 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
626 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
627 
628 extern struct mod_ops	mod_driverops;
629 /*
630  * This variable is defined in modctl.c and set to '1' after the root driver
631  * and fs are loaded.  It serves as an indication that the root filesystem can
632  * be used.
633  */
634 extern int		modrootloaded;
635 /*
636  * This table contains strings associated with the SCSI sense key codes.  It
637  * is used by FCP to print a clear explanation of the code returned in the
638  * sense information by a device.
639  */
640 extern char		*sense_keys[];
641 /*
642  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
643  * under this device that the paths to a physical device are created when
644  * MPxIO is used.
645  */
646 extern dev_info_t	*scsi_vhci_dip;
647 
648 /*
649  * Report lun processing
650  */
651 #define	FCP_LUN_ADDRESSING		0x80
652 #define	FCP_PD_ADDRESSING		0x00
653 #define	FCP_VOLUME_ADDRESSING		0x40
654 
655 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
656 #define	MAX_INT_DMA			0x7fffffff
657 #define	FCP_MAX_SENSE_LEN		252
658 #define	FCP_MAX_RESPONSE_LEN		0xffffff
659 /*
660  * Property definitions
661  */
662 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
663 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
664 #define	TARGET_PROP	(char *)fcp_target_prop
665 #define	LUN_PROP	(char *)fcp_lun_prop
666 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
667 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
668 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
669 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
670 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
671 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
672 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
673 /*
674  * Short hand macros.
675  */
676 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
677 #define	LUN_TGT		(plun->lun_tgt)
678 
679 /*
680  * Driver private macros
681  */
682 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
683 			((x) >= 'a' && (x) <= 'f') ?			\
684 			((x) - 'a' + 10) : ((x) - 'A' + 10))
685 
686 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
687 
688 #define	FCP_N_NDI_EVENTS						\
689 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
690 
691 #define	FCP_LINK_STATE_CHANGED(p, c)			\
692 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
693 
694 #define	FCP_TGT_STATE_CHANGED(t, c)			\
695 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
696 
697 #define	FCP_STATE_CHANGED(p, t, c)		\
698 	(FCP_TGT_STATE_CHANGED(t, c))
699 
700 #define	FCP_MUST_RETRY(fpkt)				\
701 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
702 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
703 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
704 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
705 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
706 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
707 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
708 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
709 
710 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
711 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
712 	(es)->es_add_code == 0x3f &&		\
713 	(es)->es_qual_code == 0x0e)
714 
715 #define	FCP_SENSE_NO_LUN(es)			\
716 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
717 	(es)->es_add_code == 0x25 &&		\
718 	(es)->es_qual_code == 0x0)
719 
720 #define	FCP_VERSION		"1.189"
721 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
722 
723 #define	FCP_NUM_ELEMENTS(array)			\
724 	(sizeof (array) / sizeof ((array)[0]))
725 
726 /*
727  * Debugging, Error reporting, and tracing
728  */
729 #define	FCP_LOG_SIZE		1024 * 1024
730 
731 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
732 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
733 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
734 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
735 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
736 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
737 #define	FCP_LEVEL_7		0x00040
738 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
739 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
740 
741 
742 
743 /*
744  * Log contents to system messages file
745  */
746 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
747 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
748 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
749 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
750 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
751 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
752 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
753 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
754 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
755 
756 
757 /*
758  * Log contents to trace buffer
759  */
760 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
761 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
762 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
763 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
764 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
765 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
766 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
767 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
768 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
769 
770 
771 /*
772  * Log contents to both system messages file and trace buffer
773  */
774 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
775 				FC_TRACE_LOG_MSG)
776 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
777 				FC_TRACE_LOG_MSG)
778 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
779 				FC_TRACE_LOG_MSG)
780 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
781 				FC_TRACE_LOG_MSG)
782 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
783 				FC_TRACE_LOG_MSG)
784 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
785 				FC_TRACE_LOG_MSG)
786 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
787 				FC_TRACE_LOG_MSG)
788 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
789 				FC_TRACE_LOG_MSG)
790 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
791 				FC_TRACE_LOG_MSG)
792 #ifdef DEBUG
793 #define	FCP_DTRACE	fc_trace_debug
794 #else
795 #define	FCP_DTRACE
796 #endif
797 
798 #define	FCP_TRACE	fc_trace_debug
799 
800 static struct cb_ops fcp_cb_ops = {
801 	fcp_open,			/* open */
802 	fcp_close,			/* close */
803 	nodev,				/* strategy */
804 	nodev,				/* print */
805 	nodev,				/* dump */
806 	nodev,				/* read */
807 	nodev,				/* write */
808 	fcp_ioctl,			/* ioctl */
809 	nodev,				/* devmap */
810 	nodev,				/* mmap */
811 	nodev,				/* segmap */
812 	nochpoll,			/* chpoll */
813 	ddi_prop_op,			/* cb_prop_op */
814 	0,				/* streamtab */
815 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
816 	CB_REV,				/* rev */
817 	nodev,				/* aread */
818 	nodev				/* awrite */
819 };
820 
821 
822 static struct dev_ops fcp_ops = {
823 	DEVO_REV,
824 	0,
825 	ddi_getinfo_1to1,
826 	nulldev,		/* identify */
827 	nulldev,		/* probe */
828 	fcp_attach,		/* attach and detach are mandatory */
829 	fcp_detach,
830 	nodev,			/* reset */
831 	&fcp_cb_ops,		/* cb_ops */
832 	NULL,			/* bus_ops */
833 	NULL,			/* power */
834 };
835 
836 
837 char *fcp_version = FCP_NAME_VERSION;
838 
839 static struct modldrv modldrv = {
840 	&mod_driverops,
841 	FCP_NAME_VERSION,
842 	&fcp_ops
843 };
844 
845 
846 static struct modlinkage modlinkage = {
847 	MODREV_1,
848 	&modldrv,
849 	NULL
850 };
851 
852 
853 static fc_ulp_modinfo_t fcp_modinfo = {
854 	&fcp_modinfo,			/* ulp_handle */
855 	FCTL_ULP_MODREV_4,		/* ulp_rev */
856 	FC4_SCSI_FCP,			/* ulp_type */
857 	"fcp",				/* ulp_name */
858 	FCP_STATEC_MASK,		/* ulp_statec_mask */
859 	fcp_port_attach,		/* ulp_port_attach */
860 	fcp_port_detach,		/* ulp_port_detach */
861 	fcp_port_ioctl,			/* ulp_port_ioctl */
862 	fcp_els_callback,		/* ulp_els_callback */
863 	fcp_data_callback,		/* ulp_data_callback */
864 	fcp_statec_callback		/* ulp_statec_callback */
865 };
866 
867 #ifdef	DEBUG
868 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
869 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
870 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
871 				FCP_LEVEL_6 | FCP_LEVEL_7)
872 #else
873 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
874 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
875 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
876 				FCP_LEVEL_6 | FCP_LEVEL_7)
877 #endif
878 
879 /* FCP global variables */
880 int			fcp_bus_config_debug = 0;
881 static int		fcp_log_size = FCP_LOG_SIZE;
882 static int		fcp_trace = FCP_TRACE_DEFAULT;
883 static fc_trace_logq_t	*fcp_logq = NULL;
884 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
885 /*
886  * The auto-configuration is set by default.  The only way of disabling it is
887  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
888  */
889 static int		fcp_enable_auto_configuration = 1;
890 static int		fcp_max_bus_config_retries	= 4;
891 static int		fcp_lun_ready_retry = 300;
892 /*
893  * The value assigned to the following variable has changed several times due
894  * to a problem with the data underruns reporting of some firmware(s).	The
895  * current value of 50 gives a timeout value of 25 seconds for a max number
896  * of 256 LUNs.
897  */
898 static int		fcp_max_target_retries = 50;
899 /*
900  * Watchdog variables
901  * ------------------
902  *
903  * fcp_watchdog_init
904  *
905  *	Indicates if the watchdog timer is running or not.  This is actually
906  *	a counter of the number of Fibre Channel ports that attached.  When
907  *	the first port attaches the watchdog is started.  When the last port
908  *	detaches the watchdog timer is stopped.
909  *
910  * fcp_watchdog_time
911  *
912  *	This is the watchdog clock counter.  It is incremented by
913  *	fcp_watchdog_time each time the watchdog timer expires.
914  *
915  * fcp_watchdog_timeout
916  *
917  *	Increment value of the variable fcp_watchdog_time as well as the
918  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
919  *	is strange that this is not a #define	but a variable since the code
920  *	never changes this value.  The reason why it can be said that the
921  *	unit is 1 second is because the number of ticks for the watchdog
922  *	timer is determined like this:
923  *
924  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
925  *				  drv_usectohz(1000000);
926  *
927  *	The value 1000000 is hard coded in the code.
928  *
929  * fcp_watchdog_tick
930  *
931  *	Watchdog timer value in ticks.
932  */
933 static int		fcp_watchdog_init = 0;
934 static int		fcp_watchdog_time = 0;
935 static int		fcp_watchdog_timeout = 1;
936 static int		fcp_watchdog_tick;
937 
938 /*
939  * fcp_offline_delay is a global variable to enable customisation of
940  * the timeout on link offlines or RSCNs. The default value is set
941  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
942  * specified in FCP4 Chapter 11 (see www.t10.org).
943  *
944  * The variable fcp_offline_delay is specified in SECONDS.
945  *
946  * If we made this a static var then the user would not be able to
947  * change it. This variable is set in fcp_attach().
948  */
949 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
950 
951 static void		*fcp_softstate = NULL; /* for soft state */
952 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
953 static kmutex_t		fcp_global_mutex;
954 static kmutex_t		fcp_ioctl_mutex;
955 static dev_info_t	*fcp_global_dip = NULL;
956 static timeout_id_t	fcp_watchdog_id;
957 const char		*fcp_lun_prop = "lun";
958 const char		*fcp_sam_lun_prop = "sam-lun";
959 const char		*fcp_target_prop = "target";
960 /*
961  * NOTE: consumers of "node-wwn" property include stmsboot in ON
962  * consolidation.
963  */
964 const char		*fcp_node_wwn_prop = "node-wwn";
965 const char		*fcp_port_wwn_prop = "port-wwn";
966 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
967 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
968 const char		*fcp_manual_config_only = "manual_configuration_only";
969 const char		*fcp_init_port_prop = "initiator-port";
970 const char		*fcp_tgt_port_prop = "target-port";
971 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
972 
973 static struct fcp_port	*fcp_port_head = NULL;
974 static ddi_eventcookie_t	fcp_insert_eid;
975 static ddi_eventcookie_t	fcp_remove_eid;
976 
977 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
978 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
979 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
980 };
981 
982 /*
983  * List of valid commands for the scsi_ioctl call
984  */
985 static uint8_t scsi_ioctl_list[] = {
986 	SCMD_INQUIRY,
987 	SCMD_REPORT_LUN,
988 	SCMD_READ_CAPACITY
989 };
990 
991 /*
992  * this is used to dummy up a report lun response for cases
993  * where the target doesn't support it
994  */
995 static uchar_t fcp_dummy_lun[] = {
996 	0x00,		/* MSB length (length = no of luns * 8) */
997 	0x00,
998 	0x00,
999 	0x08,		/* LSB length */
1000 	0x00,		/* MSB reserved */
1001 	0x00,
1002 	0x00,
1003 	0x00,		/* LSB reserved */
1004 	FCP_PD_ADDRESSING,
1005 	0x00,		/* LUN is ZERO at the first level */
1006 	0x00,
1007 	0x00,		/* second level is zero */
1008 	0x00,
1009 	0x00,		/* third level is zero */
1010 	0x00,
1011 	0x00		/* fourth level is zero */
1012 };
1013 
1014 static uchar_t fcp_alpa_to_switch[] = {
1015 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1016 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1017 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1018 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1019 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1020 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1021 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1022 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1023 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1024 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1025 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1026 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1027 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1028 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1029 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1030 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1031 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1032 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1033 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1034 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1035 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1036 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1037 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1038 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1039 };
1040 
1041 static caddr_t pid = "SESS01	      ";
1042 
1043 #if	!defined(lint)
1044 
1045 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1046     fcp_port::fcp_next fcp_watchdog_id))
1047 
1048 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1049 
1050 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1051     fcp_insert_eid
1052     fcp_remove_eid
1053     fcp_watchdog_time))
1054 
1055 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1056     fcp_cb_ops
1057     fcp_ops
1058     callb_cpr))
1059 
1060 #endif /* lint */
1061 
1062 /*
1063  * This table is used to determine whether or not it's safe to copy in
1064  * the target node name for a lun.  Since all luns behind the same target
1065  * have the same wwnn, only tagets that do not support multiple luns are
1066  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1067  */
1068 
1069 char *fcp_symmetric_disk_table[] = {
1070 	"SEAGATE ST",
1071 	"IBM	 DDYFT",
1072 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1073 	"SUN	 SENA",		/* SES device */
1074 	"SUN	 SESS01"	/* VICOM SVE box */
1075 };
1076 
1077 int fcp_symmetric_disk_table_size =
1078 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1079 
1080 /*
1081  * The _init(9e) return value should be that of mod_install(9f). Under
1082  * some circumstances, a failure may not be related mod_install(9f) and
1083  * one would then require a return value to indicate the failure. Looking
1084  * at mod_install(9f), it is expected to return 0 for success and non-zero
1085  * for failure. mod_install(9f) for device drivers, further goes down the
1086  * calling chain and ends up in ddi_installdrv(), whose return values are
1087  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1088  * calling chain of mod_install(9f) which return values like EINVAL and
1089  * in some even return -1.
1090  *
1091  * To work around the vagaries of the mod_install() calling chain, return
1092  * either 0 or ENODEV depending on the success or failure of mod_install()
1093  */
1094 int
1095 _init(void)
1096 {
1097 	int rval;
1098 
1099 	/*
1100 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1101 	 * before registering with the transport first.
1102 	 */
1103 	if (ddi_soft_state_init(&fcp_softstate,
1104 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1105 		return (EINVAL);
1106 	}
1107 
1108 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1109 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1110 
1111 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1112 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1113 		mutex_destroy(&fcp_global_mutex);
1114 		mutex_destroy(&fcp_ioctl_mutex);
1115 		ddi_soft_state_fini(&fcp_softstate);
1116 		return (ENODEV);
1117 	}
1118 
1119 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1120 
1121 	if ((rval = mod_install(&modlinkage)) != 0) {
1122 		fc_trace_free_logq(fcp_logq);
1123 		(void) fc_ulp_remove(&fcp_modinfo);
1124 		mutex_destroy(&fcp_global_mutex);
1125 		mutex_destroy(&fcp_ioctl_mutex);
1126 		ddi_soft_state_fini(&fcp_softstate);
1127 		rval = ENODEV;
1128 	}
1129 
1130 	return (rval);
1131 }
1132 
1133 
1134 /*
1135  * the system is done with us as a driver, so clean up
1136  */
1137 int
1138 _fini(void)
1139 {
1140 	int rval;
1141 
1142 	/*
1143 	 * don't start cleaning up until we know that the module remove
1144 	 * has worked  -- if this works, then we know that each instance
1145 	 * has successfully been DDI_DETACHed
1146 	 */
1147 	if ((rval = mod_remove(&modlinkage)) != 0) {
1148 		return (rval);
1149 	}
1150 
1151 	(void) fc_ulp_remove(&fcp_modinfo);
1152 
1153 	ddi_soft_state_fini(&fcp_softstate);
1154 	mutex_destroy(&fcp_global_mutex);
1155 	mutex_destroy(&fcp_ioctl_mutex);
1156 	fc_trace_free_logq(fcp_logq);
1157 
1158 	return (rval);
1159 }
1160 
1161 
1162 int
1163 _info(struct modinfo *modinfop)
1164 {
1165 	return (mod_info(&modlinkage, modinfop));
1166 }
1167 
1168 
1169 /*
1170  * attach the module
1171  */
1172 static int
1173 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1174 {
1175 	int rval = DDI_SUCCESS;
1176 
1177 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1178 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1179 
1180 	if (cmd == DDI_ATTACH) {
1181 		/* The FCP pseudo device is created here. */
1182 		mutex_enter(&fcp_global_mutex);
1183 		fcp_global_dip = devi;
1184 		mutex_exit(&fcp_global_mutex);
1185 
1186 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1187 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1188 			ddi_report_dev(fcp_global_dip);
1189 		} else {
1190 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1191 			mutex_enter(&fcp_global_mutex);
1192 			fcp_global_dip = NULL;
1193 			mutex_exit(&fcp_global_mutex);
1194 
1195 			rval = DDI_FAILURE;
1196 		}
1197 		/*
1198 		 * We check the fcp_offline_delay property at this
1199 		 * point. This variable is global for the driver,
1200 		 * not specific to an instance.
1201 		 *
1202 		 * We do not recommend setting the value to less
1203 		 * than 10 seconds (RA_TOV_els), or greater than
1204 		 * 60 seconds.
1205 		 */
1206 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1207 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1208 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1209 		if ((fcp_offline_delay < 10) ||
1210 		    (fcp_offline_delay > 60)) {
1211 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1212 			    "to %d second(s). This is outside the "
1213 			    "recommended range of 10..60 seconds.",
1214 			    fcp_offline_delay);
1215 		}
1216 	}
1217 
1218 	return (rval);
1219 }
1220 
1221 
1222 /*ARGSUSED*/
1223 static int
1224 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1225 {
1226 	int	res = DDI_SUCCESS;
1227 
1228 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1229 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1230 
1231 	if (cmd == DDI_DETACH) {
1232 		/*
1233 		 * Check if there are active ports/threads. If there
1234 		 * are any, we will fail, else we will succeed (there
1235 		 * should not be much to clean up)
1236 		 */
1237 		mutex_enter(&fcp_global_mutex);
1238 		FCP_DTRACE(fcp_logq, "fcp",
1239 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1240 		    (void *) fcp_port_head);
1241 
1242 		if (fcp_port_head == NULL) {
1243 			ddi_remove_minor_node(fcp_global_dip, NULL);
1244 			fcp_global_dip = NULL;
1245 			mutex_exit(&fcp_global_mutex);
1246 		} else {
1247 			mutex_exit(&fcp_global_mutex);
1248 			res = DDI_FAILURE;
1249 		}
1250 	}
1251 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1252 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1253 
1254 	return (res);
1255 }
1256 
1257 
1258 /* ARGSUSED */
1259 static int
1260 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1261 {
1262 	if (otype != OTYP_CHR) {
1263 		return (EINVAL);
1264 	}
1265 
1266 	/*
1267 	 * Allow only root to talk;
1268 	 */
1269 	if (drv_priv(credp)) {
1270 		return (EPERM);
1271 	}
1272 
1273 	mutex_enter(&fcp_global_mutex);
1274 	if (fcp_oflag & FCP_EXCL) {
1275 		mutex_exit(&fcp_global_mutex);
1276 		return (EBUSY);
1277 	}
1278 
1279 	if (flag & FEXCL) {
1280 		if (fcp_oflag & FCP_OPEN) {
1281 			mutex_exit(&fcp_global_mutex);
1282 			return (EBUSY);
1283 		}
1284 		fcp_oflag |= FCP_EXCL;
1285 	}
1286 	fcp_oflag |= FCP_OPEN;
1287 	mutex_exit(&fcp_global_mutex);
1288 
1289 	return (0);
1290 }
1291 
1292 
1293 /* ARGSUSED */
1294 static int
1295 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1296 {
1297 	if (otype != OTYP_CHR) {
1298 		return (EINVAL);
1299 	}
1300 
1301 	mutex_enter(&fcp_global_mutex);
1302 	if (!(fcp_oflag & FCP_OPEN)) {
1303 		mutex_exit(&fcp_global_mutex);
1304 		return (ENODEV);
1305 	}
1306 	fcp_oflag = FCP_IDLE;
1307 	mutex_exit(&fcp_global_mutex);
1308 
1309 	return (0);
1310 }
1311 
1312 
1313 /*
1314  * fcp_ioctl
1315  *	Entry point for the FCP ioctls
1316  *
1317  * Input:
1318  *	See ioctl(9E)
1319  *
1320  * Output:
1321  *	See ioctl(9E)
1322  *
1323  * Returns:
1324  *	See ioctl(9E)
1325  *
1326  * Context:
1327  *	Kernel context.
1328  */
1329 /* ARGSUSED */
1330 static int
1331 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1332     int *rval)
1333 {
1334 	int			ret = 0;
1335 
1336 	mutex_enter(&fcp_global_mutex);
1337 	if (!(fcp_oflag & FCP_OPEN)) {
1338 		mutex_exit(&fcp_global_mutex);
1339 		return (ENXIO);
1340 	}
1341 	mutex_exit(&fcp_global_mutex);
1342 
1343 	switch (cmd) {
1344 	case FCP_TGT_INQUIRY:
1345 	case FCP_TGT_CREATE:
1346 	case FCP_TGT_DELETE:
1347 		ret = fcp_setup_device_data_ioctl(cmd,
1348 		    (struct fcp_ioctl *)data, mode, rval);
1349 		break;
1350 
1351 	case FCP_TGT_SEND_SCSI:
1352 		mutex_enter(&fcp_ioctl_mutex);
1353 		ret = fcp_setup_scsi_ioctl(
1354 		    (struct fcp_scsi_cmd *)data, mode, rval);
1355 		mutex_exit(&fcp_ioctl_mutex);
1356 		break;
1357 
1358 	case FCP_STATE_COUNT:
1359 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1360 		    mode, rval);
1361 		break;
1362 	case FCP_GET_TARGET_MAPPINGS:
1363 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1364 		    mode, rval);
1365 		break;
1366 	default:
1367 		fcp_log(CE_WARN, NULL,
1368 		    "!Invalid ioctl opcode = 0x%x", cmd);
1369 		ret	= EINVAL;
1370 	}
1371 
1372 	return (ret);
1373 }
1374 
1375 
1376 /*
1377  * fcp_setup_device_data_ioctl
1378  *	Setup handler for the "device data" style of
1379  *	ioctl for FCP.	See "fcp_util.h" for data structure
1380  *	definition.
1381  *
1382  * Input:
1383  *	cmd	= FCP ioctl command
1384  *	data	= ioctl data
1385  *	mode	= See ioctl(9E)
1386  *
1387  * Output:
1388  *	data	= ioctl data
1389  *	rval	= return value - see ioctl(9E)
1390  *
1391  * Returns:
1392  *	See ioctl(9E)
1393  *
1394  * Context:
1395  *	Kernel context.
1396  */
1397 /* ARGSUSED */
1398 static int
1399 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1400     int *rval)
1401 {
1402 	struct fcp_port	*pptr;
1403 	struct	device_data	*dev_data;
1404 	uint32_t		link_cnt;
1405 	la_wwn_t		*wwn_ptr = NULL;
1406 	struct fcp_tgt		*ptgt = NULL;
1407 	struct fcp_lun		*plun = NULL;
1408 	int			i, error;
1409 	struct fcp_ioctl	fioctl;
1410 
1411 #ifdef	_MULTI_DATAMODEL
1412 	switch (ddi_model_convert_from(mode & FMODELS)) {
1413 	case DDI_MODEL_ILP32: {
1414 		struct fcp32_ioctl f32_ioctl;
1415 
1416 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1417 		    sizeof (struct fcp32_ioctl), mode)) {
1418 			return (EFAULT);
1419 		}
1420 		fioctl.fp_minor = f32_ioctl.fp_minor;
1421 		fioctl.listlen = f32_ioctl.listlen;
1422 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1423 		break;
1424 	}
1425 	case DDI_MODEL_NONE:
1426 		if (ddi_copyin((void *)data, (void *)&fioctl,
1427 		    sizeof (struct fcp_ioctl), mode)) {
1428 			return (EFAULT);
1429 		}
1430 		break;
1431 	}
1432 
1433 #else	/* _MULTI_DATAMODEL */
1434 	if (ddi_copyin((void *)data, (void *)&fioctl,
1435 	    sizeof (struct fcp_ioctl), mode)) {
1436 		return (EFAULT);
1437 	}
1438 #endif	/* _MULTI_DATAMODEL */
1439 
1440 	/*
1441 	 * Right now we can assume that the minor number matches with
1442 	 * this instance of fp. If this changes we will need to
1443 	 * revisit this logic.
1444 	 */
1445 	mutex_enter(&fcp_global_mutex);
1446 	pptr = fcp_port_head;
1447 	while (pptr) {
1448 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1449 			break;
1450 		} else {
1451 			pptr = pptr->port_next;
1452 		}
1453 	}
1454 	mutex_exit(&fcp_global_mutex);
1455 	if (pptr == NULL) {
1456 		return (ENXIO);
1457 	}
1458 	mutex_enter(&pptr->port_mutex);
1459 
1460 
1461 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1462 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1463 		mutex_exit(&pptr->port_mutex);
1464 		return (ENOMEM);
1465 	}
1466 
1467 	if (ddi_copyin(fioctl.list, dev_data,
1468 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1469 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1470 		mutex_exit(&pptr->port_mutex);
1471 		return (EFAULT);
1472 	}
1473 	link_cnt = pptr->port_link_cnt;
1474 
1475 	if (cmd == FCP_TGT_INQUIRY) {
1476 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1477 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1478 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1479 			/* This ioctl is requesting INQ info of local HBA */
1480 			mutex_exit(&pptr->port_mutex);
1481 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1482 			dev_data[0].dev_status = 0;
1483 			if (ddi_copyout(dev_data, fioctl.list,
1484 			    (sizeof (struct device_data)) * fioctl.listlen,
1485 			    mode)) {
1486 				kmem_free(dev_data,
1487 				    sizeof (*dev_data) * fioctl.listlen);
1488 				return (EFAULT);
1489 			}
1490 			kmem_free(dev_data,
1491 			    sizeof (*dev_data) * fioctl.listlen);
1492 #ifdef	_MULTI_DATAMODEL
1493 			switch (ddi_model_convert_from(mode & FMODELS)) {
1494 			case DDI_MODEL_ILP32: {
1495 				struct fcp32_ioctl f32_ioctl;
1496 				f32_ioctl.fp_minor = fioctl.fp_minor;
1497 				f32_ioctl.listlen = fioctl.listlen;
1498 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1499 				if (ddi_copyout((void *)&f32_ioctl,
1500 				    (void *)data,
1501 				    sizeof (struct fcp32_ioctl), mode)) {
1502 					return (EFAULT);
1503 				}
1504 				break;
1505 			}
1506 			case DDI_MODEL_NONE:
1507 				if (ddi_copyout((void *)&fioctl, (void *)data,
1508 				    sizeof (struct fcp_ioctl), mode)) {
1509 					return (EFAULT);
1510 				}
1511 				break;
1512 			}
1513 #else	/* _MULTI_DATAMODEL */
1514 			if (ddi_copyout((void *)&fioctl, (void *)data,
1515 			    sizeof (struct fcp_ioctl), mode)) {
1516 				return (EFAULT);
1517 			}
1518 #endif	/* _MULTI_DATAMODEL */
1519 			return (0);
1520 		}
1521 	}
1522 
1523 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1524 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1525 		mutex_exit(&pptr->port_mutex);
1526 		return (ENXIO);
1527 	}
1528 
1529 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1530 	    i++) {
1531 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1532 
1533 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1534 
1535 
1536 		dev_data[i].dev_status = ENXIO;
1537 
1538 		if ((ptgt = fcp_lookup_target(pptr,
1539 		    (uchar_t *)wwn_ptr)) == NULL) {
1540 			mutex_exit(&pptr->port_mutex);
1541 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1542 			    wwn_ptr, &error, 0) == NULL) {
1543 				dev_data[i].dev_status = ENODEV;
1544 				mutex_enter(&pptr->port_mutex);
1545 				continue;
1546 			} else {
1547 
1548 				dev_data[i].dev_status = EAGAIN;
1549 
1550 				mutex_enter(&pptr->port_mutex);
1551 				continue;
1552 			}
1553 		} else {
1554 			mutex_enter(&ptgt->tgt_mutex);
1555 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1556 			    FCP_TGT_BUSY)) {
1557 				dev_data[i].dev_status = EAGAIN;
1558 				mutex_exit(&ptgt->tgt_mutex);
1559 				continue;
1560 			}
1561 
1562 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1563 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1564 					dev_data[i].dev_status = ENOTSUP;
1565 				} else {
1566 					dev_data[i].dev_status = ENXIO;
1567 				}
1568 				mutex_exit(&ptgt->tgt_mutex);
1569 				continue;
1570 			}
1571 
1572 			switch (cmd) {
1573 			case FCP_TGT_INQUIRY:
1574 				/*
1575 				 * The reason we give device type of
1576 				 * lun 0 only even though in some
1577 				 * cases(like maxstrat) lun 0 device
1578 				 * type may be 0x3f(invalid) is that
1579 				 * for bridge boxes target will appear
1580 				 * as luns and the first lun could be
1581 				 * a device that utility may not care
1582 				 * about (like a tape device).
1583 				 */
1584 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1585 				dev_data[i].dev_status = 0;
1586 				mutex_exit(&ptgt->tgt_mutex);
1587 
1588 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1589 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1590 				} else {
1591 					dev_data[i].dev0_type = plun->lun_type;
1592 				}
1593 				mutex_enter(&ptgt->tgt_mutex);
1594 				break;
1595 
1596 			case FCP_TGT_CREATE:
1597 				mutex_exit(&ptgt->tgt_mutex);
1598 				mutex_exit(&pptr->port_mutex);
1599 
1600 				/*
1601 				 * serialize state change call backs.
1602 				 * only one call back will be handled
1603 				 * at a time.
1604 				 */
1605 				mutex_enter(&fcp_global_mutex);
1606 				if (fcp_oflag & FCP_BUSY) {
1607 					mutex_exit(&fcp_global_mutex);
1608 					if (dev_data) {
1609 						kmem_free(dev_data,
1610 						    sizeof (*dev_data) *
1611 						    fioctl.listlen);
1612 					}
1613 					return (EBUSY);
1614 				}
1615 				fcp_oflag |= FCP_BUSY;
1616 				mutex_exit(&fcp_global_mutex);
1617 
1618 				dev_data[i].dev_status =
1619 				    fcp_create_on_demand(pptr,
1620 				    wwn_ptr->raw_wwn);
1621 
1622 				if (dev_data[i].dev_status != 0) {
1623 					char	buf[25];
1624 
1625 					for (i = 0; i < FC_WWN_SIZE; i++) {
1626 						(void) sprintf(&buf[i << 1],
1627 						    "%02x",
1628 						    wwn_ptr->raw_wwn[i]);
1629 					}
1630 
1631 					fcp_log(CE_WARN, pptr->port_dip,
1632 					    "!Failed to create nodes for"
1633 					    " pwwn=%s; error=%x", buf,
1634 					    dev_data[i].dev_status);
1635 				}
1636 
1637 				/* allow state change call backs again */
1638 				mutex_enter(&fcp_global_mutex);
1639 				fcp_oflag &= ~FCP_BUSY;
1640 				mutex_exit(&fcp_global_mutex);
1641 
1642 				mutex_enter(&pptr->port_mutex);
1643 				mutex_enter(&ptgt->tgt_mutex);
1644 
1645 				break;
1646 
1647 			case FCP_TGT_DELETE:
1648 				break;
1649 
1650 			default:
1651 				fcp_log(CE_WARN, pptr->port_dip,
1652 				    "!Invalid device data ioctl "
1653 				    "opcode = 0x%x", cmd);
1654 			}
1655 			mutex_exit(&ptgt->tgt_mutex);
1656 		}
1657 	}
1658 	mutex_exit(&pptr->port_mutex);
1659 
1660 	if (ddi_copyout(dev_data, fioctl.list,
1661 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1662 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1663 		return (EFAULT);
1664 	}
1665 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1666 
1667 #ifdef	_MULTI_DATAMODEL
1668 	switch (ddi_model_convert_from(mode & FMODELS)) {
1669 	case DDI_MODEL_ILP32: {
1670 		struct fcp32_ioctl f32_ioctl;
1671 
1672 		f32_ioctl.fp_minor = fioctl.fp_minor;
1673 		f32_ioctl.listlen = fioctl.listlen;
1674 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1675 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1676 		    sizeof (struct fcp32_ioctl), mode)) {
1677 			return (EFAULT);
1678 		}
1679 		break;
1680 	}
1681 	case DDI_MODEL_NONE:
1682 		if (ddi_copyout((void *)&fioctl, (void *)data,
1683 		    sizeof (struct fcp_ioctl), mode)) {
1684 			return (EFAULT);
1685 		}
1686 		break;
1687 	}
1688 #else	/* _MULTI_DATAMODEL */
1689 
1690 	if (ddi_copyout((void *)&fioctl, (void *)data,
1691 	    sizeof (struct fcp_ioctl), mode)) {
1692 		return (EFAULT);
1693 	}
1694 #endif	/* _MULTI_DATAMODEL */
1695 
1696 	return (0);
1697 }
1698 
1699 /*
1700  * Fetch the target mappings (path, etc.) for all LUNs
1701  * on this port.
1702  */
1703 /* ARGSUSED */
1704 static int
1705 fcp_get_target_mappings(struct fcp_ioctl *data,
1706     int mode, int *rval)
1707 {
1708 	struct fcp_port	    *pptr;
1709 	fc_hba_target_mappings_t    *mappings;
1710 	fc_hba_mapping_entry_t	    *map;
1711 	struct fcp_tgt	    *ptgt = NULL;
1712 	struct fcp_lun	    *plun = NULL;
1713 	int			    i, mapIndex, mappingSize;
1714 	int			    listlen;
1715 	struct fcp_ioctl	    fioctl;
1716 	char			    *path;
1717 	fcp_ent_addr_t		    sam_lun_addr;
1718 
1719 #ifdef	_MULTI_DATAMODEL
1720 	switch (ddi_model_convert_from(mode & FMODELS)) {
1721 	case DDI_MODEL_ILP32: {
1722 		struct fcp32_ioctl f32_ioctl;
1723 
1724 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1725 		    sizeof (struct fcp32_ioctl), mode)) {
1726 			return (EFAULT);
1727 		}
1728 		fioctl.fp_minor = f32_ioctl.fp_minor;
1729 		fioctl.listlen = f32_ioctl.listlen;
1730 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1731 		break;
1732 	}
1733 	case DDI_MODEL_NONE:
1734 		if (ddi_copyin((void *)data, (void *)&fioctl,
1735 		    sizeof (struct fcp_ioctl), mode)) {
1736 			return (EFAULT);
1737 		}
1738 		break;
1739 	}
1740 
1741 #else	/* _MULTI_DATAMODEL */
1742 	if (ddi_copyin((void *)data, (void *)&fioctl,
1743 	    sizeof (struct fcp_ioctl), mode)) {
1744 		return (EFAULT);
1745 	}
1746 #endif	/* _MULTI_DATAMODEL */
1747 
1748 	/*
1749 	 * Right now we can assume that the minor number matches with
1750 	 * this instance of fp. If this changes we will need to
1751 	 * revisit this logic.
1752 	 */
1753 	mutex_enter(&fcp_global_mutex);
1754 	pptr = fcp_port_head;
1755 	while (pptr) {
1756 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1757 			break;
1758 		} else {
1759 			pptr = pptr->port_next;
1760 		}
1761 	}
1762 	mutex_exit(&fcp_global_mutex);
1763 	if (pptr == NULL) {
1764 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1765 		    fioctl.fp_minor);
1766 		return (ENXIO);
1767 	}
1768 
1769 
1770 	/* We use listlen to show the total buffer size */
1771 	mappingSize = fioctl.listlen;
1772 
1773 	/* Now calculate how many mapping entries will fit */
1774 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1775 	    - sizeof (fc_hba_target_mappings_t);
1776 	if (listlen <= 0) {
1777 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1778 		return (ENXIO);
1779 	}
1780 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1781 
1782 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1783 		return (ENOMEM);
1784 	}
1785 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1786 
1787 	/* Now get to work */
1788 	mapIndex = 0;
1789 
1790 	mutex_enter(&pptr->port_mutex);
1791 	/* Loop through all targets on this port */
1792 	for (i = 0; i < FCP_NUM_HASH; i++) {
1793 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1794 		    ptgt = ptgt->tgt_next) {
1795 
1796 
1797 			/* Loop through all LUNs on this target */
1798 			for (plun = ptgt->tgt_lun; plun != NULL;
1799 			    plun = plun->lun_next) {
1800 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1801 					continue;
1802 				}
1803 
1804 				path = fcp_get_lun_path(plun);
1805 				if (path == NULL) {
1806 					continue;
1807 				}
1808 
1809 				if (mapIndex >= listlen) {
1810 					mapIndex ++;
1811 					kmem_free(path, MAXPATHLEN);
1812 					continue;
1813 				}
1814 				map = &mappings->entries[mapIndex++];
1815 				bcopy(path, map->targetDriver,
1816 				    sizeof (map->targetDriver));
1817 				map->d_id = ptgt->tgt_d_id;
1818 				map->busNumber = 0;
1819 				map->targetNumber = ptgt->tgt_d_id;
1820 				map->osLUN = plun->lun_num;
1821 
1822 				/*
1823 				 * We had swapped lun when we stored it in
1824 				 * lun_addr. We need to swap it back before
1825 				 * returning it to user land
1826 				 */
1827 
1828 				sam_lun_addr.ent_addr_0 =
1829 				    BE_16(plun->lun_addr.ent_addr_0);
1830 				sam_lun_addr.ent_addr_1 =
1831 				    BE_16(plun->lun_addr.ent_addr_1);
1832 				sam_lun_addr.ent_addr_2 =
1833 				    BE_16(plun->lun_addr.ent_addr_2);
1834 				sam_lun_addr.ent_addr_3 =
1835 				    BE_16(plun->lun_addr.ent_addr_3);
1836 
1837 				bcopy(&sam_lun_addr, &map->samLUN,
1838 				    FCP_LUN_SIZE);
1839 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1840 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1841 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1842 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1843 
1844 				if (plun->lun_guid) {
1845 
1846 					/* convert ascii wwn to bytes */
1847 					fcp_ascii_to_wwn(plun->lun_guid,
1848 					    map->guid, sizeof (map->guid));
1849 
1850 					if ((sizeof (map->guid)) <
1851 					    plun->lun_guid_size / 2) {
1852 						cmn_err(CE_WARN,
1853 						    "fcp_get_target_mappings:"
1854 						    "guid copy space "
1855 						    "insufficient."
1856 						    "Copy Truncation - "
1857 						    "available %d; need %d",
1858 						    (int)sizeof (map->guid),
1859 						    (int)
1860 						    plun->lun_guid_size / 2);
1861 					}
1862 				}
1863 				kmem_free(path, MAXPATHLEN);
1864 			}
1865 		}
1866 	}
1867 	mutex_exit(&pptr->port_mutex);
1868 	mappings->numLuns = mapIndex;
1869 
1870 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1871 		kmem_free(mappings, mappingSize);
1872 		return (EFAULT);
1873 	}
1874 	kmem_free(mappings, mappingSize);
1875 
1876 #ifdef	_MULTI_DATAMODEL
1877 	switch (ddi_model_convert_from(mode & FMODELS)) {
1878 	case DDI_MODEL_ILP32: {
1879 		struct fcp32_ioctl f32_ioctl;
1880 
1881 		f32_ioctl.fp_minor = fioctl.fp_minor;
1882 		f32_ioctl.listlen = fioctl.listlen;
1883 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1884 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1885 		    sizeof (struct fcp32_ioctl), mode)) {
1886 			return (EFAULT);
1887 		}
1888 		break;
1889 	}
1890 	case DDI_MODEL_NONE:
1891 		if (ddi_copyout((void *)&fioctl, (void *)data,
1892 		    sizeof (struct fcp_ioctl), mode)) {
1893 			return (EFAULT);
1894 		}
1895 		break;
1896 	}
1897 #else	/* _MULTI_DATAMODEL */
1898 
1899 	if (ddi_copyout((void *)&fioctl, (void *)data,
1900 	    sizeof (struct fcp_ioctl), mode)) {
1901 		return (EFAULT);
1902 	}
1903 #endif	/* _MULTI_DATAMODEL */
1904 
1905 	return (0);
1906 }
1907 
1908 /*
1909  * fcp_setup_scsi_ioctl
1910  *	Setup handler for the "scsi passthru" style of
1911  *	ioctl for FCP.	See "fcp_util.h" for data structure
1912  *	definition.
1913  *
1914  * Input:
1915  *	u_fscsi	= ioctl data (user address space)
1916  *	mode	= See ioctl(9E)
1917  *
1918  * Output:
1919  *	u_fscsi	= ioctl data (user address space)
1920  *	rval	= return value - see ioctl(9E)
1921  *
1922  * Returns:
1923  *	0	= OK
1924  *	EAGAIN	= See errno.h
1925  *	EBUSY	= See errno.h
1926  *	EFAULT	= See errno.h
1927  *	EINTR	= See errno.h
1928  *	EINVAL	= See errno.h
1929  *	EIO	= See errno.h
1930  *	ENOMEM	= See errno.h
1931  *	ENXIO	= See errno.h
1932  *
1933  * Context:
1934  *	Kernel context.
1935  */
1936 /* ARGSUSED */
1937 static int
1938 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1939     int mode, int *rval)
1940 {
1941 	int			ret		= 0;
1942 	int			temp_ret;
1943 	caddr_t			k_cdbbufaddr	= NULL;
1944 	caddr_t			k_bufaddr	= NULL;
1945 	caddr_t			k_rqbufaddr	= NULL;
1946 	caddr_t			u_cdbbufaddr;
1947 	caddr_t			u_bufaddr;
1948 	caddr_t			u_rqbufaddr;
1949 	struct fcp_scsi_cmd	k_fscsi;
1950 
1951 	/*
1952 	 * Get fcp_scsi_cmd array element from user address space
1953 	 */
1954 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1955 	    != 0) {
1956 		return (ret);
1957 	}
1958 
1959 
1960 	/*
1961 	 * Even though kmem_alloc() checks the validity of the
1962 	 * buffer length, this check is needed when the
1963 	 * kmem_flags set and the zero buffer length is passed.
1964 	 */
1965 	if ((k_fscsi.scsi_cdblen <= 0) ||
1966 	    (k_fscsi.scsi_buflen <= 0) ||
1967 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
1968 	    (k_fscsi.scsi_rqlen <= 0) ||
1969 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
1970 		return (EINVAL);
1971 	}
1972 
1973 	/*
1974 	 * Allocate data for fcp_scsi_cmd pointer fields
1975 	 */
1976 	if (ret == 0) {
1977 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
1978 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
1979 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
1980 
1981 		if (k_cdbbufaddr == NULL ||
1982 		    k_bufaddr	 == NULL ||
1983 		    k_rqbufaddr	 == NULL) {
1984 			ret = ENOMEM;
1985 		}
1986 	}
1987 
1988 	/*
1989 	 * Get fcp_scsi_cmd pointer fields from user
1990 	 * address space
1991 	 */
1992 	if (ret == 0) {
1993 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
1994 		u_bufaddr    = k_fscsi.scsi_bufaddr;
1995 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
1996 
1997 		if (ddi_copyin(u_cdbbufaddr,
1998 		    k_cdbbufaddr,
1999 		    k_fscsi.scsi_cdblen,
2000 		    mode)) {
2001 			ret = EFAULT;
2002 		} else if (ddi_copyin(u_bufaddr,
2003 		    k_bufaddr,
2004 		    k_fscsi.scsi_buflen,
2005 		    mode)) {
2006 			ret = EFAULT;
2007 		} else if (ddi_copyin(u_rqbufaddr,
2008 		    k_rqbufaddr,
2009 		    k_fscsi.scsi_rqlen,
2010 		    mode)) {
2011 			ret = EFAULT;
2012 		}
2013 	}
2014 
2015 	/*
2016 	 * Send scsi command (blocking)
2017 	 */
2018 	if (ret == 0) {
2019 		/*
2020 		 * Prior to sending the scsi command, the
2021 		 * fcp_scsi_cmd data structure must contain kernel,
2022 		 * not user, addresses.
2023 		 */
2024 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2025 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2026 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2027 
2028 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2029 
2030 		/*
2031 		 * After sending the scsi command, the
2032 		 * fcp_scsi_cmd data structure must contain user,
2033 		 * not kernel, addresses.
2034 		 */
2035 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2036 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2037 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2038 	}
2039 
2040 	/*
2041 	 * Put fcp_scsi_cmd pointer fields to user address space
2042 	 */
2043 	if (ret == 0) {
2044 		if (ddi_copyout(k_cdbbufaddr,
2045 		    u_cdbbufaddr,
2046 		    k_fscsi.scsi_cdblen,
2047 		    mode)) {
2048 			ret = EFAULT;
2049 		} else if (ddi_copyout(k_bufaddr,
2050 		    u_bufaddr,
2051 		    k_fscsi.scsi_buflen,
2052 		    mode)) {
2053 			ret = EFAULT;
2054 		} else if (ddi_copyout(k_rqbufaddr,
2055 		    u_rqbufaddr,
2056 		    k_fscsi.scsi_rqlen,
2057 		    mode)) {
2058 			ret = EFAULT;
2059 		}
2060 	}
2061 
2062 	/*
2063 	 * Free data for fcp_scsi_cmd pointer fields
2064 	 */
2065 	if (k_cdbbufaddr != NULL) {
2066 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2067 	}
2068 	if (k_bufaddr != NULL) {
2069 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2070 	}
2071 	if (k_rqbufaddr != NULL) {
2072 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2073 	}
2074 
2075 	/*
2076 	 * Put fcp_scsi_cmd array element to user address space
2077 	 */
2078 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2079 	if (temp_ret != 0) {
2080 		ret = temp_ret;
2081 	}
2082 
2083 	/*
2084 	 * Return status
2085 	 */
2086 	return (ret);
2087 }
2088 
2089 
2090 /*
2091  * fcp_copyin_scsi_cmd
2092  *	Copy in fcp_scsi_cmd data structure from user address space.
2093  *	The data may be in 32 bit or 64 bit modes.
2094  *
2095  * Input:
2096  *	base_addr	= from address (user address space)
2097  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2098  *
2099  * Output:
2100  *	fscsi		= to address (kernel address space)
2101  *
2102  * Returns:
2103  *	0	= OK
2104  *	EFAULT	= Error
2105  *
2106  * Context:
2107  *	Kernel context.
2108  */
2109 static int
2110 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2111 {
2112 #ifdef	_MULTI_DATAMODEL
2113 	struct fcp32_scsi_cmd	f32scsi;
2114 
2115 	switch (ddi_model_convert_from(mode & FMODELS)) {
2116 	case DDI_MODEL_ILP32:
2117 		/*
2118 		 * Copy data from user address space
2119 		 */
2120 		if (ddi_copyin((void *)base_addr,
2121 		    &f32scsi,
2122 		    sizeof (struct fcp32_scsi_cmd),
2123 		    mode)) {
2124 			return (EFAULT);
2125 		}
2126 		/*
2127 		 * Convert from 32 bit to 64 bit
2128 		 */
2129 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2130 		break;
2131 	case DDI_MODEL_NONE:
2132 		/*
2133 		 * Copy data from user address space
2134 		 */
2135 		if (ddi_copyin((void *)base_addr,
2136 		    fscsi,
2137 		    sizeof (struct fcp_scsi_cmd),
2138 		    mode)) {
2139 			return (EFAULT);
2140 		}
2141 		break;
2142 	}
2143 #else	/* _MULTI_DATAMODEL */
2144 	/*
2145 	 * Copy data from user address space
2146 	 */
2147 	if (ddi_copyin((void *)base_addr,
2148 	    fscsi,
2149 	    sizeof (struct fcp_scsi_cmd),
2150 	    mode)) {
2151 		return (EFAULT);
2152 	}
2153 #endif	/* _MULTI_DATAMODEL */
2154 
2155 	return (0);
2156 }
2157 
2158 
2159 /*
2160  * fcp_copyout_scsi_cmd
2161  *	Copy out fcp_scsi_cmd data structure to user address space.
2162  *	The data may be in 32 bit or 64 bit modes.
2163  *
2164  * Input:
2165  *	fscsi		= to address (kernel address space)
2166  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2167  *
2168  * Output:
2169  *	base_addr	= from address (user address space)
2170  *
2171  * Returns:
2172  *	0	= OK
2173  *	EFAULT	= Error
2174  *
2175  * Context:
2176  *	Kernel context.
2177  */
2178 static int
2179 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2180 {
2181 #ifdef	_MULTI_DATAMODEL
2182 	struct fcp32_scsi_cmd	f32scsi;
2183 
2184 	switch (ddi_model_convert_from(mode & FMODELS)) {
2185 	case DDI_MODEL_ILP32:
2186 		/*
2187 		 * Convert from 64 bit to 32 bit
2188 		 */
2189 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2190 		/*
2191 		 * Copy data to user address space
2192 		 */
2193 		if (ddi_copyout(&f32scsi,
2194 		    (void *)base_addr,
2195 		    sizeof (struct fcp32_scsi_cmd),
2196 		    mode)) {
2197 			return (EFAULT);
2198 		}
2199 		break;
2200 	case DDI_MODEL_NONE:
2201 		/*
2202 		 * Copy data to user address space
2203 		 */
2204 		if (ddi_copyout(fscsi,
2205 		    (void *)base_addr,
2206 		    sizeof (struct fcp_scsi_cmd),
2207 		    mode)) {
2208 			return (EFAULT);
2209 		}
2210 		break;
2211 	}
2212 #else	/* _MULTI_DATAMODEL */
2213 	/*
2214 	 * Copy data to user address space
2215 	 */
2216 	if (ddi_copyout(fscsi,
2217 	    (void *)base_addr,
2218 	    sizeof (struct fcp_scsi_cmd),
2219 	    mode)) {
2220 		return (EFAULT);
2221 	}
2222 #endif	/* _MULTI_DATAMODEL */
2223 
2224 	return (0);
2225 }
2226 
2227 
2228 /*
2229  * fcp_send_scsi_ioctl
2230  *	Sends the SCSI command in blocking mode.
2231  *
2232  * Input:
2233  *	fscsi		= SCSI command data structure
2234  *
2235  * Output:
2236  *	fscsi		= SCSI command data structure
2237  *
2238  * Returns:
2239  *	0	= OK
2240  *	EAGAIN	= See errno.h
2241  *	EBUSY	= See errno.h
2242  *	EINTR	= See errno.h
2243  *	EINVAL	= See errno.h
2244  *	EIO	= See errno.h
2245  *	ENOMEM	= See errno.h
2246  *	ENXIO	= See errno.h
2247  *
2248  * Context:
2249  *	Kernel context.
2250  */
2251 static int
2252 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2253 {
2254 	struct fcp_lun	*plun		= NULL;
2255 	struct fcp_port	*pptr		= NULL;
2256 	struct fcp_tgt	*ptgt		= NULL;
2257 	fc_packet_t		*fpkt		= NULL;
2258 	struct fcp_ipkt	*icmd		= NULL;
2259 	int			target_created	= FALSE;
2260 	fc_frame_hdr_t		*hp;
2261 	struct fcp_cmd		fcp_cmd;
2262 	struct fcp_cmd		*fcmd;
2263 	union scsi_cdb		*scsi_cdb;
2264 	la_wwn_t		*wwn_ptr;
2265 	int			nodma;
2266 	struct fcp_rsp		*rsp;
2267 	struct fcp_rsp_info	*rsp_info;
2268 	caddr_t			rsp_sense;
2269 	int			buf_len;
2270 	int			info_len;
2271 	int			sense_len;
2272 	struct scsi_extended_sense	*sense_to = NULL;
2273 	timeout_id_t		tid;
2274 	uint8_t			reconfig_lun = FALSE;
2275 	uint8_t			reconfig_pending = FALSE;
2276 	uint8_t			scsi_cmd;
2277 	int			rsp_len;
2278 	int			cmd_index;
2279 	int			fc_status;
2280 	int			pkt_state;
2281 	int			pkt_action;
2282 	int			pkt_reason;
2283 	int			ret, xport_retval = ~FC_SUCCESS;
2284 	int			lcount;
2285 	int			tcount;
2286 	int			reconfig_status;
2287 	int			port_busy = FALSE;
2288 	uchar_t			*lun_string;
2289 
2290 	/*
2291 	 * Check valid SCSI command
2292 	 */
2293 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2294 	ret = EINVAL;
2295 	for (cmd_index = 0;
2296 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2297 	    ret != 0;
2298 	    cmd_index++) {
2299 		/*
2300 		 * First byte of CDB is the SCSI command
2301 		 */
2302 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2303 			ret = 0;
2304 		}
2305 	}
2306 
2307 	/*
2308 	 * Check inputs
2309 	 */
2310 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2311 		ret = EINVAL;
2312 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2313 		/* no larger than */
2314 		ret = EINVAL;
2315 	}
2316 
2317 
2318 	/*
2319 	 * Find FC port
2320 	 */
2321 	if (ret == 0) {
2322 		/*
2323 		 * Acquire global mutex
2324 		 */
2325 		mutex_enter(&fcp_global_mutex);
2326 
2327 		pptr = fcp_port_head;
2328 		while (pptr) {
2329 			if (pptr->port_instance ==
2330 			    (uint32_t)fscsi->scsi_fc_port_num) {
2331 				break;
2332 			} else {
2333 				pptr = pptr->port_next;
2334 			}
2335 		}
2336 
2337 		if (pptr == NULL) {
2338 			ret = ENXIO;
2339 		} else {
2340 			/*
2341 			 * fc_ulp_busy_port can raise power
2342 			 *  so, we must not hold any mutexes involved in PM
2343 			 */
2344 			mutex_exit(&fcp_global_mutex);
2345 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2346 		}
2347 
2348 		if (ret == 0) {
2349 
2350 			/* remember port is busy, so we will release later */
2351 			port_busy = TRUE;
2352 
2353 			/*
2354 			 * If there is a reconfiguration in progress, wait
2355 			 * for it to complete.
2356 			 */
2357 
2358 			fcp_reconfig_wait(pptr);
2359 
2360 			/* reacquire mutexes in order */
2361 			mutex_enter(&fcp_global_mutex);
2362 			mutex_enter(&pptr->port_mutex);
2363 
2364 			/*
2365 			 * Will port accept DMA?
2366 			 */
2367 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2368 			    ? 1 : 0;
2369 
2370 			/*
2371 			 * If init or offline, device not known
2372 			 *
2373 			 * If we are discovering (onlining), we can
2374 			 * NOT obviously provide reliable data about
2375 			 * devices until it is complete
2376 			 */
2377 			if (pptr->port_state &	  (FCP_STATE_INIT |
2378 			    FCP_STATE_OFFLINE)) {
2379 				ret = ENXIO;
2380 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2381 				ret = EBUSY;
2382 			} else {
2383 				/*
2384 				 * Find target from pwwn
2385 				 *
2386 				 * The wwn must be put into a local
2387 				 * variable to ensure alignment.
2388 				 */
2389 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2390 				ptgt = fcp_lookup_target(pptr,
2391 				    (uchar_t *)wwn_ptr);
2392 
2393 				/*
2394 				 * If target not found,
2395 				 */
2396 				if (ptgt == NULL) {
2397 					/*
2398 					 * Note: Still have global &
2399 					 * port mutexes
2400 					 */
2401 					mutex_exit(&pptr->port_mutex);
2402 					ptgt = fcp_port_create_tgt(pptr,
2403 					    wwn_ptr, &ret, &fc_status,
2404 					    &pkt_state, &pkt_action,
2405 					    &pkt_reason);
2406 					mutex_enter(&pptr->port_mutex);
2407 
2408 					fscsi->scsi_fc_status  = fc_status;
2409 					fscsi->scsi_pkt_state  =
2410 					    (uchar_t)pkt_state;
2411 					fscsi->scsi_pkt_reason = pkt_reason;
2412 					fscsi->scsi_pkt_action =
2413 					    (uchar_t)pkt_action;
2414 
2415 					if (ptgt != NULL) {
2416 						target_created = TRUE;
2417 					} else if (ret == 0) {
2418 						ret = ENOMEM;
2419 					}
2420 				}
2421 
2422 				if (ret == 0) {
2423 					/*
2424 					 * Acquire target
2425 					 */
2426 					mutex_enter(&ptgt->tgt_mutex);
2427 
2428 					/*
2429 					 * If target is mark or busy,
2430 					 * then target can not be used
2431 					 */
2432 					if (ptgt->tgt_state &
2433 					    (FCP_TGT_MARK |
2434 					    FCP_TGT_BUSY)) {
2435 						ret = EBUSY;
2436 					} else {
2437 						/*
2438 						 * Mark target as busy
2439 						 */
2440 						ptgt->tgt_state |=
2441 						    FCP_TGT_BUSY;
2442 					}
2443 
2444 					/*
2445 					 * Release target
2446 					 */
2447 					lcount = pptr->port_link_cnt;
2448 					tcount = ptgt->tgt_change_cnt;
2449 					mutex_exit(&ptgt->tgt_mutex);
2450 				}
2451 			}
2452 
2453 			/*
2454 			 * Release port
2455 			 */
2456 			mutex_exit(&pptr->port_mutex);
2457 		}
2458 
2459 		/*
2460 		 * Release global mutex
2461 		 */
2462 		mutex_exit(&fcp_global_mutex);
2463 	}
2464 
2465 	if (ret == 0) {
2466 		uint64_t belun = BE_64(fscsi->scsi_lun);
2467 
2468 		/*
2469 		 * If it's a target device, find lun from pwwn
2470 		 * The wwn must be put into a local
2471 		 * variable to ensure alignment.
2472 		 */
2473 		mutex_enter(&pptr->port_mutex);
2474 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2475 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2476 			/* this is not a target */
2477 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2478 			ret = ENXIO;
2479 		} else if ((belun << 16) != 0) {
2480 			/*
2481 			 * Since fcp only support PD and LU addressing method
2482 			 * so far, the last 6 bytes of a valid LUN are expected
2483 			 * to be filled with 00h.
2484 			 */
2485 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2486 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2487 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2488 			    (uint8_t)(belun >> 62), belun);
2489 			ret = ENXIO;
2490 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2491 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2492 			/*
2493 			 * This is a SCSI target, but no LUN at this
2494 			 * address.
2495 			 *
2496 			 * In the future, we may want to send this to
2497 			 * the target, and let it respond
2498 			 * appropriately
2499 			 */
2500 			ret = ENXIO;
2501 		}
2502 		mutex_exit(&pptr->port_mutex);
2503 	}
2504 
2505 	/*
2506 	 * Finished grabbing external resources
2507 	 * Allocate internal packet (icmd)
2508 	 */
2509 	if (ret == 0) {
2510 		/*
2511 		 * Calc rsp len assuming rsp info included
2512 		 */
2513 		rsp_len = sizeof (struct fcp_rsp) +
2514 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2515 
2516 		icmd = fcp_icmd_alloc(pptr, ptgt,
2517 		    sizeof (struct fcp_cmd),
2518 		    rsp_len,
2519 		    fscsi->scsi_buflen,
2520 		    nodma,
2521 		    lcount,			/* ipkt_link_cnt */
2522 		    tcount,			/* ipkt_change_cnt */
2523 		    0,				/* cause */
2524 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2525 
2526 		if (icmd == NULL) {
2527 			ret = ENOMEM;
2528 		} else {
2529 			/*
2530 			 * Setup internal packet as sema sync
2531 			 */
2532 			fcp_ipkt_sema_init(icmd);
2533 		}
2534 	}
2535 
2536 	if (ret == 0) {
2537 		/*
2538 		 * Init fpkt pointer for use.
2539 		 */
2540 
2541 		fpkt = icmd->ipkt_fpkt;
2542 
2543 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2544 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2545 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2546 
2547 		/*
2548 		 * Init fcmd pointer for use by SCSI command
2549 		 */
2550 
2551 		if (nodma) {
2552 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2553 		} else {
2554 			fcmd = &fcp_cmd;
2555 		}
2556 		bzero(fcmd, sizeof (struct fcp_cmd));
2557 		ptgt = plun->lun_tgt;
2558 
2559 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2560 
2561 		fcmd->fcp_ent_addr.ent_addr_0 =
2562 		    BE_16(*(uint16_t *)&(lun_string[0]));
2563 		fcmd->fcp_ent_addr.ent_addr_1 =
2564 		    BE_16(*(uint16_t *)&(lun_string[2]));
2565 		fcmd->fcp_ent_addr.ent_addr_2 =
2566 		    BE_16(*(uint16_t *)&(lun_string[4]));
2567 		fcmd->fcp_ent_addr.ent_addr_3 =
2568 		    BE_16(*(uint16_t *)&(lun_string[6]));
2569 
2570 		/*
2571 		 * Setup internal packet(icmd)
2572 		 */
2573 		icmd->ipkt_lun		= plun;
2574 		icmd->ipkt_restart	= 0;
2575 		icmd->ipkt_retries	= 0;
2576 		icmd->ipkt_opcode	= 0;
2577 
2578 		/*
2579 		 * Init the frame HEADER Pointer for use
2580 		 */
2581 		hp = &fpkt->pkt_cmd_fhdr;
2582 
2583 		hp->s_id	= pptr->port_id;
2584 		hp->d_id	= ptgt->tgt_d_id;
2585 		hp->r_ctl	= R_CTL_COMMAND;
2586 		hp->type	= FC_TYPE_SCSI_FCP;
2587 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2588 		hp->rsvd	= 0;
2589 		hp->seq_id	= 0;
2590 		hp->seq_cnt	= 0;
2591 		hp->ox_id	= 0xffff;
2592 		hp->rx_id	= 0xffff;
2593 		hp->ro		= 0;
2594 
2595 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2596 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2597 		fcmd->fcp_cntl.cntl_write_data	= 0;
2598 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2599 
2600 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2601 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2602 		    fscsi->scsi_cdblen);
2603 
2604 		if (!nodma) {
2605 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2606 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2607 		}
2608 
2609 		/*
2610 		 * Send SCSI command to FC transport
2611 		 */
2612 
2613 		if (ret == 0) {
2614 			mutex_enter(&ptgt->tgt_mutex);
2615 
2616 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2617 				mutex_exit(&ptgt->tgt_mutex);
2618 				fscsi->scsi_fc_status = xport_retval =
2619 				    fc_ulp_transport(pptr->port_fp_handle,
2620 				    fpkt);
2621 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2622 					ret = EIO;
2623 				}
2624 			} else {
2625 				mutex_exit(&ptgt->tgt_mutex);
2626 				ret = EBUSY;
2627 			}
2628 		}
2629 	}
2630 
2631 	/*
2632 	 * Wait for completion only if fc_ulp_transport was called and it
2633 	 * returned a success. This is the only time callback will happen.
2634 	 * Otherwise, there is no point in waiting
2635 	 */
2636 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2637 		ret = fcp_ipkt_sema_wait(icmd);
2638 	}
2639 
2640 	/*
2641 	 * Copy data to IOCTL data structures
2642 	 */
2643 	rsp = NULL;
2644 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2645 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2646 
2647 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2648 			fcp_log(CE_WARN, pptr->port_dip,
2649 			    "!SCSI command to d_id=0x%x lun=0x%x"
2650 			    " failed, Bad FCP response values:"
2651 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2652 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2653 			    ptgt->tgt_d_id, plun->lun_num,
2654 			    rsp->reserved_0, rsp->reserved_1,
2655 			    rsp->fcp_u.fcp_status.reserved_0,
2656 			    rsp->fcp_u.fcp_status.reserved_1,
2657 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2658 
2659 			ret = EIO;
2660 		}
2661 	}
2662 
2663 	if ((ret == 0) && (rsp != NULL)) {
2664 		/*
2665 		 * Calc response lengths
2666 		 */
2667 		sense_len = 0;
2668 		info_len = 0;
2669 
2670 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2671 			info_len = rsp->fcp_response_len;
2672 		}
2673 
2674 		rsp_info   = (struct fcp_rsp_info *)
2675 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2676 
2677 		/*
2678 		 * Get SCSI status
2679 		 */
2680 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2681 		/*
2682 		 * If a lun was just added or removed and the next command
2683 		 * comes through this interface, we need to capture the check
2684 		 * condition so we can discover the new topology.
2685 		 */
2686 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2687 		    rsp->fcp_u.fcp_status.sense_len_set) {
2688 			sense_len = rsp->fcp_sense_len;
2689 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2690 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2691 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2692 			    (FCP_SENSE_NO_LUN(sense_to))) {
2693 				reconfig_lun = TRUE;
2694 			}
2695 		}
2696 
2697 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2698 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2699 			if (reconfig_lun == FALSE) {
2700 				reconfig_status =
2701 				    fcp_is_reconfig_needed(ptgt, fpkt);
2702 			}
2703 
2704 			if ((reconfig_lun == TRUE) ||
2705 			    (reconfig_status == TRUE)) {
2706 				mutex_enter(&ptgt->tgt_mutex);
2707 				if (ptgt->tgt_tid == NULL) {
2708 					/*
2709 					 * Either we've been notified the
2710 					 * REPORT_LUN data has changed, or
2711 					 * we've determined on our own that
2712 					 * we're out of date.  Kick off
2713 					 * rediscovery.
2714 					 */
2715 					tid = timeout(fcp_reconfigure_luns,
2716 					    (caddr_t)ptgt, drv_usectohz(1));
2717 
2718 					ptgt->tgt_tid = tid;
2719 					ptgt->tgt_state |= FCP_TGT_BUSY;
2720 					ret = EBUSY;
2721 					reconfig_pending = TRUE;
2722 				}
2723 				mutex_exit(&ptgt->tgt_mutex);
2724 			}
2725 		}
2726 
2727 		/*
2728 		 * Calc residuals and buffer lengths
2729 		 */
2730 
2731 		if (ret == 0) {
2732 			buf_len = fscsi->scsi_buflen;
2733 			fscsi->scsi_bufresid	= 0;
2734 			if (rsp->fcp_u.fcp_status.resid_under) {
2735 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2736 					fscsi->scsi_bufresid = rsp->fcp_resid;
2737 				} else {
2738 					cmn_err(CE_WARN, "fcp: bad residue %x "
2739 					    "for txfer len %x", rsp->fcp_resid,
2740 					    fscsi->scsi_buflen);
2741 					fscsi->scsi_bufresid =
2742 					    fscsi->scsi_buflen;
2743 				}
2744 				buf_len -= fscsi->scsi_bufresid;
2745 			}
2746 			if (rsp->fcp_u.fcp_status.resid_over) {
2747 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2748 			}
2749 
2750 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2751 			if (fscsi->scsi_rqlen < sense_len) {
2752 				sense_len = fscsi->scsi_rqlen;
2753 			}
2754 
2755 			fscsi->scsi_fc_rspcode	= 0;
2756 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2757 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2758 			}
2759 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2760 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2761 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2762 
2763 			/*
2764 			 * Copy data and request sense
2765 			 *
2766 			 * Data must be copied by using the FCP_CP_IN macro.
2767 			 * This will ensure the proper byte order since the data
2768 			 * is being copied directly from the memory mapped
2769 			 * device register.
2770 			 *
2771 			 * The response (and request sense) will be in the
2772 			 * correct byte order.	No special copy is necessary.
2773 			 */
2774 
2775 			if (buf_len) {
2776 				FCP_CP_IN(fpkt->pkt_data,
2777 				    fscsi->scsi_bufaddr,
2778 				    fpkt->pkt_data_acc,
2779 				    buf_len);
2780 			}
2781 			bcopy((void *)rsp_sense,
2782 			    (void *)fscsi->scsi_rqbufaddr,
2783 			    sense_len);
2784 		}
2785 	}
2786 
2787 	/*
2788 	 * Cleanup transport data structures if icmd was alloc-ed
2789 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2790 	 */
2791 	if (icmd != NULL) {
2792 		fcp_ipkt_sema_cleanup(icmd);
2793 	}
2794 
2795 	/* restore pm busy/idle status */
2796 	if (port_busy) {
2797 		fc_ulp_idle_port(pptr->port_fp_handle);
2798 	}
2799 
2800 	/*
2801 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2802 	 * flag, it'll be cleared when the reconfig is complete.
2803 	 */
2804 	if ((ptgt != NULL) && !reconfig_pending) {
2805 		/*
2806 		 * If target was created,
2807 		 */
2808 		if (target_created) {
2809 			mutex_enter(&ptgt->tgt_mutex);
2810 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2811 			mutex_exit(&ptgt->tgt_mutex);
2812 		} else {
2813 			/*
2814 			 * De-mark target as busy
2815 			 */
2816 			mutex_enter(&ptgt->tgt_mutex);
2817 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2818 			mutex_exit(&ptgt->tgt_mutex);
2819 		}
2820 	}
2821 	return (ret);
2822 }
2823 
2824 
2825 static int
2826 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2827     fc_packet_t	*fpkt)
2828 {
2829 	uchar_t			*lun_string;
2830 	uint16_t		lun_num, i;
2831 	int			num_luns;
2832 	int			actual_luns;
2833 	int			num_masked_luns;
2834 	int			lun_buflen;
2835 	struct fcp_lun	*plun	= NULL;
2836 	struct fcp_reportlun_resp	*report_lun;
2837 	uint8_t			reconfig_needed = FALSE;
2838 	uint8_t			lun_exists = FALSE;
2839 
2840 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2841 
2842 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2843 	    fpkt->pkt_datalen);
2844 
2845 	/* get number of luns (which is supplied as LUNS * 8) */
2846 	num_luns = BE_32(report_lun->num_lun) >> 3;
2847 
2848 	/*
2849 	 * Figure out exactly how many lun strings our response buffer
2850 	 * can hold.
2851 	 */
2852 	lun_buflen = (fpkt->pkt_datalen -
2853 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2854 
2855 	/*
2856 	 * Is our response buffer full or not? We don't want to
2857 	 * potentially walk beyond the number of luns we have.
2858 	 */
2859 	if (num_luns <= lun_buflen) {
2860 		actual_luns = num_luns;
2861 	} else {
2862 		actual_luns = lun_buflen;
2863 	}
2864 
2865 	mutex_enter(&ptgt->tgt_mutex);
2866 
2867 	/* Scan each lun to see if we have masked it. */
2868 	num_masked_luns = 0;
2869 	if (fcp_lun_blacklist != NULL) {
2870 		for (i = 0; i < actual_luns; i++) {
2871 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2872 			switch (lun_string[0] & 0xC0) {
2873 			case FCP_LUN_ADDRESSING:
2874 			case FCP_PD_ADDRESSING:
2875 			case FCP_VOLUME_ADDRESSING:
2876 				lun_num = ((lun_string[0] & 0x3F) << 8)
2877 				    | lun_string[1];
2878 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2879 				    lun_num) == TRUE) {
2880 					num_masked_luns++;
2881 				}
2882 				break;
2883 			default:
2884 				break;
2885 			}
2886 		}
2887 	}
2888 
2889 	/*
2890 	 * The quick and easy check.  If the number of LUNs reported
2891 	 * doesn't match the number we currently know about, we need
2892 	 * to reconfigure.
2893 	 */
2894 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2895 		mutex_exit(&ptgt->tgt_mutex);
2896 		kmem_free(report_lun, fpkt->pkt_datalen);
2897 		return (TRUE);
2898 	}
2899 
2900 	/*
2901 	 * If the quick and easy check doesn't turn up anything, we walk
2902 	 * the list of luns from the REPORT_LUN response and look for
2903 	 * any luns we don't know about.  If we find one, we know we need
2904 	 * to reconfigure. We will skip LUNs that are masked because of the
2905 	 * blacklist.
2906 	 */
2907 	for (i = 0; i < actual_luns; i++) {
2908 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2909 		lun_exists = FALSE;
2910 		switch (lun_string[0] & 0xC0) {
2911 		case FCP_LUN_ADDRESSING:
2912 		case FCP_PD_ADDRESSING:
2913 		case FCP_VOLUME_ADDRESSING:
2914 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2915 
2916 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2917 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2918 				lun_exists = TRUE;
2919 				break;
2920 			}
2921 
2922 			for (plun = ptgt->tgt_lun; plun;
2923 			    plun = plun->lun_next) {
2924 				if (plun->lun_num == lun_num) {
2925 					lun_exists = TRUE;
2926 					break;
2927 				}
2928 			}
2929 			break;
2930 		default:
2931 			break;
2932 		}
2933 
2934 		if (lun_exists == FALSE) {
2935 			reconfig_needed = TRUE;
2936 			break;
2937 		}
2938 	}
2939 
2940 	mutex_exit(&ptgt->tgt_mutex);
2941 	kmem_free(report_lun, fpkt->pkt_datalen);
2942 
2943 	return (reconfig_needed);
2944 }
2945 
2946 /*
2947  * This function is called by fcp_handle_page83 and uses inquiry response data
2948  * stored in plun->lun_inq to determine whether or not a device is a member of
2949  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2950  * otherwise 1.
2951  */
2952 static int
2953 fcp_symmetric_device_probe(struct fcp_lun *plun)
2954 {
2955 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2956 	char			*devidptr;
2957 	int			i, len;
2958 
2959 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2960 		devidptr = fcp_symmetric_disk_table[i];
2961 		len = (int)strlen(devidptr);
2962 
2963 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2964 			return (0);
2965 		}
2966 	}
2967 	return (1);
2968 }
2969 
2970 
2971 /*
2972  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
2973  * It basically returns the current count of # of state change callbacks
2974  * i.e the value of tgt_change_cnt.
2975  *
2976  * INPUT:
2977  *   fcp_ioctl.fp_minor -> The minor # of the fp port
2978  *   fcp_ioctl.listlen	-> 1
2979  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
2980  */
2981 /*ARGSUSED2*/
2982 static int
2983 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
2984 {
2985 	int			ret;
2986 	uint32_t		link_cnt;
2987 	struct fcp_ioctl	fioctl;
2988 	struct fcp_port	*pptr = NULL;
2989 
2990 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
2991 	    &pptr)) != 0) {
2992 		return (ret);
2993 	}
2994 
2995 	ASSERT(pptr != NULL);
2996 
2997 	if (fioctl.listlen != 1) {
2998 		return (EINVAL);
2999 	}
3000 
3001 	mutex_enter(&pptr->port_mutex);
3002 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3003 		mutex_exit(&pptr->port_mutex);
3004 		return (ENXIO);
3005 	}
3006 
3007 	/*
3008 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3009 	 * When the fcp initially attaches to the port and there are nothing
3010 	 * hanging out of the port or if there was a repeat offline state change
3011 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3012 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3013 	 * will differentiate the 2 cases.
3014 	 */
3015 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3016 		mutex_exit(&pptr->port_mutex);
3017 		return (ENXIO);
3018 	}
3019 
3020 	link_cnt = pptr->port_link_cnt;
3021 	mutex_exit(&pptr->port_mutex);
3022 
3023 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3024 		return (EFAULT);
3025 	}
3026 
3027 #ifdef	_MULTI_DATAMODEL
3028 	switch (ddi_model_convert_from(mode & FMODELS)) {
3029 	case DDI_MODEL_ILP32: {
3030 		struct fcp32_ioctl f32_ioctl;
3031 
3032 		f32_ioctl.fp_minor = fioctl.fp_minor;
3033 		f32_ioctl.listlen = fioctl.listlen;
3034 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3035 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3036 		    sizeof (struct fcp32_ioctl), mode)) {
3037 			return (EFAULT);
3038 		}
3039 		break;
3040 	}
3041 	case DDI_MODEL_NONE:
3042 		if (ddi_copyout((void *)&fioctl, (void *)data,
3043 		    sizeof (struct fcp_ioctl), mode)) {
3044 			return (EFAULT);
3045 		}
3046 		break;
3047 	}
3048 #else	/* _MULTI_DATAMODEL */
3049 
3050 	if (ddi_copyout((void *)&fioctl, (void *)data,
3051 	    sizeof (struct fcp_ioctl), mode)) {
3052 		return (EFAULT);
3053 	}
3054 #endif	/* _MULTI_DATAMODEL */
3055 
3056 	return (0);
3057 }
3058 
3059 /*
3060  * This function copies the fcp_ioctl structure passed in from user land
3061  * into kernel land. Handles 32 bit applications.
3062  */
3063 /*ARGSUSED*/
3064 static int
3065 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3066     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3067 {
3068 	struct fcp_port	*t_pptr;
3069 
3070 #ifdef	_MULTI_DATAMODEL
3071 	switch (ddi_model_convert_from(mode & FMODELS)) {
3072 	case DDI_MODEL_ILP32: {
3073 		struct fcp32_ioctl f32_ioctl;
3074 
3075 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3076 		    sizeof (struct fcp32_ioctl), mode)) {
3077 			return (EFAULT);
3078 		}
3079 		fioctl->fp_minor = f32_ioctl.fp_minor;
3080 		fioctl->listlen = f32_ioctl.listlen;
3081 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3082 		break;
3083 	}
3084 	case DDI_MODEL_NONE:
3085 		if (ddi_copyin((void *)data, (void *)fioctl,
3086 		    sizeof (struct fcp_ioctl), mode)) {
3087 			return (EFAULT);
3088 		}
3089 		break;
3090 	}
3091 
3092 #else	/* _MULTI_DATAMODEL */
3093 	if (ddi_copyin((void *)data, (void *)fioctl,
3094 	    sizeof (struct fcp_ioctl), mode)) {
3095 		return (EFAULT);
3096 	}
3097 #endif	/* _MULTI_DATAMODEL */
3098 
3099 	/*
3100 	 * Right now we can assume that the minor number matches with
3101 	 * this instance of fp. If this changes we will need to
3102 	 * revisit this logic.
3103 	 */
3104 	mutex_enter(&fcp_global_mutex);
3105 	t_pptr = fcp_port_head;
3106 	while (t_pptr) {
3107 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3108 			break;
3109 		} else {
3110 			t_pptr = t_pptr->port_next;
3111 		}
3112 	}
3113 	*pptr = t_pptr;
3114 	mutex_exit(&fcp_global_mutex);
3115 	if (t_pptr == NULL) {
3116 		return (ENXIO);
3117 	}
3118 
3119 	return (0);
3120 }
3121 
3122 /*
3123  *     Function: fcp_port_create_tgt
3124  *
3125  *  Description: As the name suggest this function creates the target context
3126  *		 specified by the the WWN provided by the caller.  If the
3127  *		 creation goes well and the target is known by fp/fctl a PLOGI
3128  *		 followed by a PRLI are issued.
3129  *
3130  *     Argument: pptr		fcp port structure
3131  *		 pwwn		WWN of the target
3132  *		 ret_val	Address of the return code.  It could be:
3133  *				EIO, ENOMEM or 0.
3134  *		 fc_status	PLOGI or PRLI status completion
3135  *		 fc_pkt_state	PLOGI or PRLI state completion
3136  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3137  *		 fc_pkt_action	PLOGI or PRLI action completion
3138  *
3139  * Return Value: NULL if it failed
3140  *		 Target structure address if it succeeds
3141  */
3142 static struct fcp_tgt *
3143 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3144     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3145 {
3146 	struct fcp_tgt	*ptgt = NULL;
3147 	fc_portmap_t		devlist;
3148 	int			lcount;
3149 	int			error;
3150 
3151 	*ret_val = 0;
3152 
3153 	/*
3154 	 * Check FC port device & get port map
3155 	 */
3156 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3157 	    &error, 1) == NULL) {
3158 		*ret_val = EIO;
3159 	} else {
3160 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3161 		    &devlist) != FC_SUCCESS) {
3162 			*ret_val = EIO;
3163 		}
3164 	}
3165 
3166 	/* Set port map flags */
3167 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3168 
3169 	/* Allocate target */
3170 	if (*ret_val == 0) {
3171 		lcount = pptr->port_link_cnt;
3172 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3173 		if (ptgt == NULL) {
3174 			fcp_log(CE_WARN, pptr->port_dip,
3175 			    "!FC target allocation failed");
3176 			*ret_val = ENOMEM;
3177 		} else {
3178 			/* Setup target */
3179 			mutex_enter(&ptgt->tgt_mutex);
3180 
3181 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3182 			ptgt->tgt_tmp_cnt	= 1;
3183 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3184 			ptgt->tgt_hard_addr	=
3185 			    devlist.map_hard_addr.hard_addr;
3186 			ptgt->tgt_pd_handle	= devlist.map_pd;
3187 			ptgt->tgt_fca_dev	= NULL;
3188 
3189 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3190 			    FC_WWN_SIZE);
3191 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3192 			    FC_WWN_SIZE);
3193 
3194 			mutex_exit(&ptgt->tgt_mutex);
3195 		}
3196 	}
3197 
3198 	/* Release global mutex for PLOGI and PRLI */
3199 	mutex_exit(&fcp_global_mutex);
3200 
3201 	/* Send PLOGI (If necessary) */
3202 	if (*ret_val == 0) {
3203 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3204 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3205 	}
3206 
3207 	/* Send PRLI (If necessary) */
3208 	if (*ret_val == 0) {
3209 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3210 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3211 	}
3212 
3213 	mutex_enter(&fcp_global_mutex);
3214 
3215 	return (ptgt);
3216 }
3217 
3218 /*
3219  *     Function: fcp_tgt_send_plogi
3220  *
3221  *  Description: This function sends a PLOGI to the target specified by the
3222  *		 caller and waits till it completes.
3223  *
3224  *     Argument: ptgt		Target to send the plogi to.
3225  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3226  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3227  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3228  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3229  *
3230  * Return Value: 0
3231  *		 ENOMEM
3232  *		 EIO
3233  *
3234  *	Context: User context.
3235  */
3236 static int
3237 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3238     int *fc_pkt_reason, int *fc_pkt_action)
3239 {
3240 	struct fcp_port	*pptr;
3241 	struct fcp_ipkt	*icmd;
3242 	struct fc_packet	*fpkt;
3243 	fc_frame_hdr_t		*hp;
3244 	struct la_els_logi	logi;
3245 	int			tcount;
3246 	int			lcount;
3247 	int			ret, login_retval = ~FC_SUCCESS;
3248 
3249 	ret = 0;
3250 
3251 	pptr = ptgt->tgt_port;
3252 
3253 	lcount = pptr->port_link_cnt;
3254 	tcount = ptgt->tgt_change_cnt;
3255 
3256 	/* Alloc internal packet */
3257 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3258 	    sizeof (la_els_logi_t), 0, 0, lcount, tcount, 0,
3259 	    FC_INVALID_RSCN_COUNT);
3260 
3261 	if (icmd == NULL) {
3262 		ret = ENOMEM;
3263 	} else {
3264 		/*
3265 		 * Setup internal packet as sema sync
3266 		 */
3267 		fcp_ipkt_sema_init(icmd);
3268 
3269 		/*
3270 		 * Setup internal packet (icmd)
3271 		 */
3272 		icmd->ipkt_lun		= NULL;
3273 		icmd->ipkt_restart	= 0;
3274 		icmd->ipkt_retries	= 0;
3275 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3276 
3277 		/*
3278 		 * Setup fc_packet
3279 		 */
3280 		fpkt = icmd->ipkt_fpkt;
3281 
3282 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3283 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3284 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3285 
3286 		/*
3287 		 * Setup FC frame header
3288 		 */
3289 		hp = &fpkt->pkt_cmd_fhdr;
3290 
3291 		hp->s_id	= pptr->port_id;	/* source ID */
3292 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3293 		hp->r_ctl	= R_CTL_ELS_REQ;
3294 		hp->type	= FC_TYPE_EXTENDED_LS;
3295 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3296 		hp->seq_id	= 0;
3297 		hp->rsvd	= 0;
3298 		hp->df_ctl	= 0;
3299 		hp->seq_cnt	= 0;
3300 		hp->ox_id	= 0xffff;		/* i.e. none */
3301 		hp->rx_id	= 0xffff;		/* i.e. none */
3302 		hp->ro		= 0;
3303 
3304 		/*
3305 		 * Setup PLOGI
3306 		 */
3307 		bzero(&logi, sizeof (struct la_els_logi));
3308 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3309 
3310 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3311 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3312 
3313 		/*
3314 		 * Send PLOGI
3315 		 */
3316 		*fc_status = login_retval =
3317 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3318 		if (*fc_status != FC_SUCCESS) {
3319 			ret = EIO;
3320 		}
3321 	}
3322 
3323 	/*
3324 	 * Wait for completion
3325 	 */
3326 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3327 		ret = fcp_ipkt_sema_wait(icmd);
3328 
3329 		*fc_pkt_state	= fpkt->pkt_state;
3330 		*fc_pkt_reason	= fpkt->pkt_reason;
3331 		*fc_pkt_action	= fpkt->pkt_action;
3332 	}
3333 
3334 	/*
3335 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3336 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3337 	 * Otherwise, cleanup happens in callback routine.
3338 	 */
3339 	if (icmd != NULL) {
3340 		fcp_ipkt_sema_cleanup(icmd);
3341 	}
3342 
3343 	return (ret);
3344 }
3345 
3346 /*
3347  *     Function: fcp_tgt_send_prli
3348  *
3349  *  Description: Does nothing as of today.
3350  *
3351  *     Argument: ptgt		Target to send the prli to.
3352  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3353  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3354  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3355  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3356  *
3357  * Return Value: 0
3358  */
3359 /*ARGSUSED*/
3360 static int
3361 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3362     int *fc_pkt_reason, int *fc_pkt_action)
3363 {
3364 	return (0);
3365 }
3366 
3367 /*
3368  *     Function: fcp_ipkt_sema_init
3369  *
3370  *  Description: Initializes the semaphore contained in the internal packet.
3371  *
3372  *     Argument: icmd	Internal packet the semaphore of which must be
3373  *			initialized.
3374  *
3375  * Return Value: None
3376  *
3377  *	Context: User context only.
3378  */
3379 static void
3380 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3381 {
3382 	struct fc_packet	*fpkt;
3383 
3384 	fpkt = icmd->ipkt_fpkt;
3385 
3386 	/* Create semaphore for sync */
3387 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3388 
3389 	/* Setup the completion callback */
3390 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3391 }
3392 
3393 /*
3394  *     Function: fcp_ipkt_sema_wait
3395  *
3396  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3397  *		 semaphore is released in the callback.
3398  *
3399  *     Argument: icmd	Internal packet to wait on for completion.
3400  *
3401  * Return Value: 0
3402  *		 EIO
3403  *		 EBUSY
3404  *		 EAGAIN
3405  *
3406  *	Context: User context only.
3407  *
3408  * This function does a conversion between the field pkt_state of the fc_packet
3409  * embedded in the internal packet (icmd) and the code it returns.
3410  */
3411 static int
3412 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3413 {
3414 	struct fc_packet	*fpkt;
3415 	int	ret;
3416 
3417 	ret = EIO;
3418 	fpkt = icmd->ipkt_fpkt;
3419 
3420 	/*
3421 	 * Wait on semaphore
3422 	 */
3423 	sema_p(&(icmd->ipkt_sema));
3424 
3425 	/*
3426 	 * Check the status of the FC packet
3427 	 */
3428 	switch (fpkt->pkt_state) {
3429 	case FC_PKT_SUCCESS:
3430 		ret = 0;
3431 		break;
3432 	case FC_PKT_LOCAL_RJT:
3433 		switch (fpkt->pkt_reason) {
3434 		case FC_REASON_SEQ_TIMEOUT:
3435 		case FC_REASON_RX_BUF_TIMEOUT:
3436 			ret = EAGAIN;
3437 			break;
3438 		case FC_REASON_PKT_BUSY:
3439 			ret = EBUSY;
3440 			break;
3441 		}
3442 		break;
3443 	case FC_PKT_TIMEOUT:
3444 		ret = EAGAIN;
3445 		break;
3446 	case FC_PKT_LOCAL_BSY:
3447 	case FC_PKT_TRAN_BSY:
3448 	case FC_PKT_NPORT_BSY:
3449 	case FC_PKT_FABRIC_BSY:
3450 		ret = EBUSY;
3451 		break;
3452 	case FC_PKT_LS_RJT:
3453 	case FC_PKT_BA_RJT:
3454 		switch (fpkt->pkt_reason) {
3455 		case FC_REASON_LOGICAL_BSY:
3456 			ret = EBUSY;
3457 			break;
3458 		}
3459 		break;
3460 	case FC_PKT_FS_RJT:
3461 		switch (fpkt->pkt_reason) {
3462 		case FC_REASON_FS_LOGICAL_BUSY:
3463 			ret = EBUSY;
3464 			break;
3465 		}
3466 		break;
3467 	}
3468 
3469 	return (ret);
3470 }
3471 
3472 /*
3473  *     Function: fcp_ipkt_sema_callback
3474  *
3475  *  Description: Registered as the completion callback function for the FC
3476  *		 transport when the ipkt semaphore is used for sync. This will
3477  *		 cleanup the used data structures, if necessary and wake up
3478  *		 the user thread to complete the transaction.
3479  *
3480  *     Argument: fpkt	FC packet (points to the icmd)
3481  *
3482  * Return Value: None
3483  *
3484  *	Context: User context only
3485  */
3486 static void
3487 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3488 {
3489 	struct fcp_ipkt	*icmd;
3490 
3491 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3492 
3493 	/*
3494 	 * Wake up user thread
3495 	 */
3496 	sema_v(&(icmd->ipkt_sema));
3497 }
3498 
3499 /*
3500  *     Function: fcp_ipkt_sema_cleanup
3501  *
3502  *  Description: Called to cleanup (if necessary) the data structures used
3503  *		 when ipkt sema is used for sync.  This function will detect
3504  *		 whether the caller is the last thread (via counter) and
3505  *		 cleanup only if necessary.
3506  *
3507  *     Argument: icmd	Internal command packet
3508  *
3509  * Return Value: None
3510  *
3511  *	Context: User context only
3512  */
3513 static void
3514 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3515 {
3516 	struct fcp_tgt	*ptgt;
3517 	struct fcp_port	*pptr;
3518 
3519 	ptgt = icmd->ipkt_tgt;
3520 	pptr = icmd->ipkt_port;
3521 
3522 	/*
3523 	 * Acquire data structure
3524 	 */
3525 	mutex_enter(&ptgt->tgt_mutex);
3526 
3527 	/*
3528 	 * Destroy semaphore
3529 	 */
3530 	sema_destroy(&(icmd->ipkt_sema));
3531 
3532 	/*
3533 	 * Cleanup internal packet
3534 	 */
3535 	mutex_exit(&ptgt->tgt_mutex);
3536 	fcp_icmd_free(pptr, icmd);
3537 }
3538 
3539 /*
3540  *     Function: fcp_port_attach
3541  *
3542  *  Description: Called by the transport framework to resume, suspend or
3543  *		 attach a new port.
3544  *
3545  *     Argument: ulph		Port handle
3546  *		 *pinfo		Port information
3547  *		 cmd		Command
3548  *		 s_id		Port ID
3549  *
3550  * Return Value: FC_FAILURE or FC_SUCCESS
3551  */
3552 /*ARGSUSED*/
3553 static int
3554 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3555     fc_attach_cmd_t cmd, uint32_t s_id)
3556 {
3557 	int	instance;
3558 	int	res = FC_FAILURE; /* default result */
3559 
3560 	ASSERT(pinfo != NULL);
3561 
3562 	instance = ddi_get_instance(pinfo->port_dip);
3563 
3564 	switch (cmd) {
3565 	case FC_CMD_ATTACH:
3566 		/*
3567 		 * this port instance attaching for the first time (or after
3568 		 * being detached before)
3569 		 */
3570 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3571 		    instance) == DDI_SUCCESS) {
3572 			res = FC_SUCCESS;
3573 		} else {
3574 			ASSERT(ddi_get_soft_state(fcp_softstate,
3575 			    instance) == NULL);
3576 		}
3577 		break;
3578 
3579 	case FC_CMD_RESUME:
3580 	case FC_CMD_POWER_UP:
3581 		/*
3582 		 * this port instance was attached and the suspended and
3583 		 * will now be resumed
3584 		 */
3585 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3586 		    instance) == DDI_SUCCESS) {
3587 			res = FC_SUCCESS;
3588 		}
3589 		break;
3590 
3591 	default:
3592 		/* shouldn't happen */
3593 		FCP_TRACE(fcp_logq, "fcp",
3594 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3595 		    "port_attach: unknown cmdcommand: %d", cmd);
3596 		break;
3597 	}
3598 
3599 	/* return result */
3600 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3601 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3602 
3603 	return (res);
3604 }
3605 
3606 
3607 /*
3608  * detach or suspend this port instance
3609  *
3610  * acquires and releases the global mutex
3611  *
3612  * acquires and releases the mutex for this port
3613  *
3614  * acquires and releases the hotplug mutex for this port
3615  */
3616 /*ARGSUSED*/
3617 static int
3618 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3619     fc_detach_cmd_t cmd)
3620 {
3621 	int			flag;
3622 	int			instance;
3623 	struct fcp_port		*pptr;
3624 
3625 	instance = ddi_get_instance(info->port_dip);
3626 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3627 
3628 	switch (cmd) {
3629 	case FC_CMD_SUSPEND:
3630 		FCP_DTRACE(fcp_logq, "fcp",
3631 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3632 		    "port suspend called for port %d", instance);
3633 		flag = FCP_STATE_SUSPENDED;
3634 		break;
3635 
3636 	case FC_CMD_POWER_DOWN:
3637 		FCP_DTRACE(fcp_logq, "fcp",
3638 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3639 		    "port power down called for port %d", instance);
3640 		flag = FCP_STATE_POWER_DOWN;
3641 		break;
3642 
3643 	case FC_CMD_DETACH:
3644 		FCP_DTRACE(fcp_logq, "fcp",
3645 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3646 		    "port detach called for port %d", instance);
3647 		flag = FCP_STATE_DETACHING;
3648 		break;
3649 
3650 	default:
3651 		/* shouldn't happen */
3652 		return (FC_FAILURE);
3653 	}
3654 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3655 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3656 
3657 	return (fcp_handle_port_detach(pptr, flag, instance));
3658 }
3659 
3660 
3661 /*
3662  * called for ioctls on the transport's devctl interface, and the transport
3663  * has passed it to us
3664  *
3665  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3666  *
3667  * return FC_SUCCESS if we decide to claim the ioctl,
3668  * else return FC_UNCLAIMED
3669  *
3670  * *rval is set iff we decide to claim the ioctl
3671  */
3672 /*ARGSUSED*/
3673 static int
3674 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3675     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3676 {
3677 	int			retval = FC_UNCLAIMED;	/* return value */
3678 	struct fcp_port		*pptr = NULL;		/* our soft state */
3679 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3680 	dev_info_t		*cdip;
3681 	mdi_pathinfo_t		*pip = NULL;
3682 	char			*ndi_nm;		/* NDI name */
3683 	char			*ndi_addr;		/* NDI addr */
3684 	int			is_mpxio, circ;
3685 	int			devi_entered = 0;
3686 	time_t			end_time;
3687 
3688 	ASSERT(rval != NULL);
3689 
3690 	FCP_DTRACE(fcp_logq, "fcp",
3691 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3692 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3693 
3694 	/* if already claimed then forget it */
3695 	if (claimed) {
3696 		/*
3697 		 * for now, if this ioctl has already been claimed, then
3698 		 * we just ignore it
3699 		 */
3700 		return (retval);
3701 	}
3702 
3703 	/* get our port info */
3704 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3705 		fcp_log(CE_WARN, NULL,
3706 		    "!fcp:Invalid port handle handle in ioctl");
3707 		*rval = ENXIO;
3708 		return (retval);
3709 	}
3710 	is_mpxio = pptr->port_mpxio;
3711 
3712 	switch (cmd) {
3713 	case DEVCTL_BUS_GETSTATE:
3714 	case DEVCTL_BUS_QUIESCE:
3715 	case DEVCTL_BUS_UNQUIESCE:
3716 	case DEVCTL_BUS_RESET:
3717 	case DEVCTL_BUS_RESETALL:
3718 
3719 	case DEVCTL_BUS_DEV_CREATE:
3720 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3721 			return (retval);
3722 		}
3723 		break;
3724 
3725 	case DEVCTL_DEVICE_GETSTATE:
3726 	case DEVCTL_DEVICE_OFFLINE:
3727 	case DEVCTL_DEVICE_ONLINE:
3728 	case DEVCTL_DEVICE_REMOVE:
3729 	case DEVCTL_DEVICE_RESET:
3730 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3731 			return (retval);
3732 		}
3733 
3734 		ASSERT(dcp != NULL);
3735 
3736 		/* ensure we have a name and address */
3737 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3738 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3739 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3740 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3741 			    "ioctl: can't get name (%s) or addr (%s)",
3742 			    ndi_nm ? ndi_nm : "<null ptr>",
3743 			    ndi_addr ? ndi_addr : "<null ptr>");
3744 			ndi_dc_freehdl(dcp);
3745 			return (retval);
3746 		}
3747 
3748 
3749 		/* get our child's DIP */
3750 		ASSERT(pptr != NULL);
3751 		if (is_mpxio) {
3752 			mdi_devi_enter(pptr->port_dip, &circ);
3753 		} else {
3754 			ndi_devi_enter(pptr->port_dip, &circ);
3755 		}
3756 		devi_entered = 1;
3757 
3758 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3759 		    ndi_addr)) == NULL) {
3760 			/* Look for virtually enumerated devices. */
3761 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3762 			if (pip == NULL ||
3763 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3764 				*rval = ENXIO;
3765 				goto out;
3766 			}
3767 		}
3768 		break;
3769 
3770 	default:
3771 		*rval = ENOTTY;
3772 		return (retval);
3773 	}
3774 
3775 	/* this ioctl is ours -- process it */
3776 
3777 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3778 
3779 	/* we assume it will be a success; else we'll set error value */
3780 	*rval = 0;
3781 
3782 
3783 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3784 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3785 	    "ioctl: claiming this one");
3786 
3787 	/* handle ioctls now */
3788 	switch (cmd) {
3789 	case DEVCTL_DEVICE_GETSTATE:
3790 		ASSERT(cdip != NULL);
3791 		ASSERT(dcp != NULL);
3792 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3793 			*rval = EFAULT;
3794 		}
3795 		break;
3796 
3797 	case DEVCTL_DEVICE_REMOVE:
3798 	case DEVCTL_DEVICE_OFFLINE: {
3799 		int			flag = 0;
3800 		int			lcount;
3801 		int			tcount;
3802 		struct fcp_pkt	*head = NULL;
3803 		struct fcp_lun	*plun;
3804 		child_info_t		*cip = CIP(cdip);
3805 		int			all = 1;
3806 		struct fcp_lun	*tplun;
3807 		struct fcp_tgt	*ptgt;
3808 
3809 		ASSERT(pptr != NULL);
3810 		ASSERT(cdip != NULL);
3811 
3812 		mutex_enter(&pptr->port_mutex);
3813 		if (pip != NULL) {
3814 			cip = CIP(pip);
3815 		}
3816 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3817 			mutex_exit(&pptr->port_mutex);
3818 			*rval = ENXIO;
3819 			break;
3820 		}
3821 
3822 		head = fcp_scan_commands(plun);
3823 		if (head != NULL) {
3824 			fcp_abort_commands(head, LUN_PORT);
3825 		}
3826 		lcount = pptr->port_link_cnt;
3827 		tcount = plun->lun_tgt->tgt_change_cnt;
3828 		mutex_exit(&pptr->port_mutex);
3829 
3830 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3831 			flag = NDI_DEVI_REMOVE;
3832 		}
3833 
3834 		if (is_mpxio) {
3835 			mdi_devi_exit(pptr->port_dip, circ);
3836 		} else {
3837 			ndi_devi_exit(pptr->port_dip, circ);
3838 		}
3839 		devi_entered = 0;
3840 
3841 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3842 		    FCP_OFFLINE, lcount, tcount, flag);
3843 
3844 		if (*rval != NDI_SUCCESS) {
3845 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3846 			break;
3847 		}
3848 
3849 		fcp_update_offline_flags(plun);
3850 
3851 		ptgt = plun->lun_tgt;
3852 		mutex_enter(&ptgt->tgt_mutex);
3853 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3854 		    tplun->lun_next) {
3855 			mutex_enter(&tplun->lun_mutex);
3856 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3857 				all = 0;
3858 			}
3859 			mutex_exit(&tplun->lun_mutex);
3860 		}
3861 
3862 		if (all) {
3863 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3864 			/*
3865 			 * The user is unconfiguring/offlining the device.
3866 			 * If fabric and the auto configuration is set
3867 			 * then make sure the user is the only one who
3868 			 * can reconfigure the device.
3869 			 */
3870 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3871 			    fcp_enable_auto_configuration) {
3872 				ptgt->tgt_manual_config_only = 1;
3873 			}
3874 		}
3875 		mutex_exit(&ptgt->tgt_mutex);
3876 		break;
3877 	}
3878 
3879 	case DEVCTL_DEVICE_ONLINE: {
3880 		int			lcount;
3881 		int			tcount;
3882 		struct fcp_lun	*plun;
3883 		child_info_t		*cip = CIP(cdip);
3884 
3885 		ASSERT(cdip != NULL);
3886 		ASSERT(pptr != NULL);
3887 
3888 		mutex_enter(&pptr->port_mutex);
3889 		if (pip != NULL) {
3890 			cip = CIP(pip);
3891 		}
3892 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3893 			mutex_exit(&pptr->port_mutex);
3894 			*rval = ENXIO;
3895 			break;
3896 		}
3897 		lcount = pptr->port_link_cnt;
3898 		tcount = plun->lun_tgt->tgt_change_cnt;
3899 		mutex_exit(&pptr->port_mutex);
3900 
3901 		/*
3902 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3903 		 * to allow the device attach to occur when the device is
3904 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3905 		 * from the scsi_probe()).
3906 		 */
3907 		mutex_enter(&LUN_TGT->tgt_mutex);
3908 		plun->lun_state |= FCP_LUN_ONLINING;
3909 		mutex_exit(&LUN_TGT->tgt_mutex);
3910 
3911 		if (is_mpxio) {
3912 			mdi_devi_exit(pptr->port_dip, circ);
3913 		} else {
3914 			ndi_devi_exit(pptr->port_dip, circ);
3915 		}
3916 		devi_entered = 0;
3917 
3918 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3919 		    FCP_ONLINE, lcount, tcount, 0);
3920 
3921 		if (*rval != NDI_SUCCESS) {
3922 			/* Reset the FCP_LUN_ONLINING bit */
3923 			mutex_enter(&LUN_TGT->tgt_mutex);
3924 			plun->lun_state &= ~FCP_LUN_ONLINING;
3925 			mutex_exit(&LUN_TGT->tgt_mutex);
3926 			*rval = EIO;
3927 			break;
3928 		}
3929 		mutex_enter(&LUN_TGT->tgt_mutex);
3930 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3931 		    FCP_LUN_ONLINING);
3932 		mutex_exit(&LUN_TGT->tgt_mutex);
3933 		break;
3934 	}
3935 
3936 	case DEVCTL_BUS_DEV_CREATE: {
3937 		uchar_t			*bytes = NULL;
3938 		uint_t			nbytes;
3939 		struct fcp_tgt		*ptgt = NULL;
3940 		struct fcp_lun		*plun = NULL;
3941 		dev_info_t		*useless_dip = NULL;
3942 
3943 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3944 		    DEVCTL_CONSTRUCT, &useless_dip);
3945 		if (*rval != 0 || useless_dip == NULL) {
3946 			break;
3947 		}
3948 
3949 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3950 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3951 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3952 			*rval = EINVAL;
3953 			(void) ndi_devi_free(useless_dip);
3954 			if (bytes != NULL) {
3955 				ddi_prop_free(bytes);
3956 			}
3957 			break;
3958 		}
3959 
3960 		*rval = fcp_create_on_demand(pptr, bytes);
3961 		if (*rval == 0) {
3962 			mutex_enter(&pptr->port_mutex);
3963 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
3964 			if (ptgt) {
3965 				/*
3966 				 * We now have a pointer to the target that
3967 				 * was created. Lets point to the first LUN on
3968 				 * this new target.
3969 				 */
3970 				mutex_enter(&ptgt->tgt_mutex);
3971 
3972 				plun = ptgt->tgt_lun;
3973 				/*
3974 				 * There may be stale/offline LUN entries on
3975 				 * this list (this is by design) and so we have
3976 				 * to make sure we point to the first online
3977 				 * LUN
3978 				 */
3979 				while (plun &&
3980 				    plun->lun_state & FCP_LUN_OFFLINE) {
3981 					plun = plun->lun_next;
3982 				}
3983 
3984 				mutex_exit(&ptgt->tgt_mutex);
3985 			}
3986 			mutex_exit(&pptr->port_mutex);
3987 		}
3988 
3989 		if (*rval == 0 && ptgt && plun) {
3990 			mutex_enter(&plun->lun_mutex);
3991 			/*
3992 			 * Allow up to fcp_lun_ready_retry seconds to
3993 			 * configure all the luns behind the target.
3994 			 *
3995 			 * The intent here is to allow targets with long
3996 			 * reboot/reset-recovery times to become available
3997 			 * while limiting the maximum wait time for an
3998 			 * unresponsive target.
3999 			 */
4000 			end_time = ddi_get_lbolt() +
4001 			    SEC_TO_TICK(fcp_lun_ready_retry);
4002 
4003 			while (ddi_get_lbolt() < end_time) {
4004 				retval = FC_SUCCESS;
4005 
4006 				/*
4007 				 * The new ndi interfaces for on-demand creation
4008 				 * are inflexible, Do some more work to pass on
4009 				 * a path name of some LUN (design is broken !)
4010 				 */
4011 				if (plun->lun_cip) {
4012 					if (plun->lun_mpxio == 0) {
4013 						cdip = DIP(plun->lun_cip);
4014 					} else {
4015 						cdip = mdi_pi_get_client(
4016 						    PIP(plun->lun_cip));
4017 					}
4018 					if (cdip == NULL) {
4019 						*rval = ENXIO;
4020 						break;
4021 					}
4022 
4023 					if (!i_ddi_devi_attached(cdip)) {
4024 						mutex_exit(&plun->lun_mutex);
4025 						delay(drv_usectohz(1000000));
4026 						mutex_enter(&plun->lun_mutex);
4027 					} else {
4028 						/*
4029 						 * This Lun is ready, lets
4030 						 * check the next one.
4031 						 */
4032 						mutex_exit(&plun->lun_mutex);
4033 						plun = plun->lun_next;
4034 						while (plun && (plun->lun_state
4035 						    & FCP_LUN_OFFLINE)) {
4036 							plun = plun->lun_next;
4037 						}
4038 						if (!plun) {
4039 							break;
4040 						}
4041 						mutex_enter(&plun->lun_mutex);
4042 					}
4043 				} else {
4044 					/*
4045 					 * lun_cip field for a valid lun
4046 					 * should never be NULL. Fail the
4047 					 * command.
4048 					 */
4049 					*rval = ENXIO;
4050 					break;
4051 				}
4052 			}
4053 			if (plun) {
4054 				mutex_exit(&plun->lun_mutex);
4055 			} else {
4056 				char devnm[MAXNAMELEN];
4057 				int nmlen;
4058 
4059 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4060 				    ddi_node_name(cdip),
4061 				    ddi_get_name_addr(cdip));
4062 
4063 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4064 				    0) {
4065 					*rval = EFAULT;
4066 				}
4067 			}
4068 		} else {
4069 			int	i;
4070 			char	buf[25];
4071 
4072 			for (i = 0; i < FC_WWN_SIZE; i++) {
4073 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4074 			}
4075 
4076 			fcp_log(CE_WARN, pptr->port_dip,
4077 			    "!Failed to create nodes for pwwn=%s; error=%x",
4078 			    buf, *rval);
4079 		}
4080 
4081 		(void) ndi_devi_free(useless_dip);
4082 		ddi_prop_free(bytes);
4083 		break;
4084 	}
4085 
4086 	case DEVCTL_DEVICE_RESET: {
4087 		struct fcp_lun		*plun;
4088 		child_info_t		*cip = CIP(cdip);
4089 
4090 		ASSERT(cdip != NULL);
4091 		ASSERT(pptr != NULL);
4092 		mutex_enter(&pptr->port_mutex);
4093 		if (pip != NULL) {
4094 			cip = CIP(pip);
4095 		}
4096 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4097 			mutex_exit(&pptr->port_mutex);
4098 			*rval = ENXIO;
4099 			break;
4100 		}
4101 		mutex_exit(&pptr->port_mutex);
4102 
4103 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4104 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4105 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4106 
4107 			*rval = ENXIO;
4108 			break;
4109 		}
4110 
4111 		if (plun->lun_sd == NULL) {
4112 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4113 
4114 			*rval = ENXIO;
4115 			break;
4116 		}
4117 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4118 
4119 		/*
4120 		 * set up ap so that fcp_reset can figure out
4121 		 * which target to reset
4122 		 */
4123 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4124 		    RESET_TARGET) == FALSE) {
4125 			*rval = EIO;
4126 		}
4127 		break;
4128 	}
4129 
4130 	case DEVCTL_BUS_GETSTATE:
4131 		ASSERT(dcp != NULL);
4132 		ASSERT(pptr != NULL);
4133 		ASSERT(pptr->port_dip != NULL);
4134 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4135 		    NDI_SUCCESS) {
4136 			*rval = EFAULT;
4137 		}
4138 		break;
4139 
4140 	case DEVCTL_BUS_QUIESCE:
4141 	case DEVCTL_BUS_UNQUIESCE:
4142 		*rval = ENOTSUP;
4143 		break;
4144 
4145 	case DEVCTL_BUS_RESET:
4146 	case DEVCTL_BUS_RESETALL:
4147 		ASSERT(pptr != NULL);
4148 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4149 		break;
4150 
4151 	default:
4152 		ASSERT(dcp != NULL);
4153 		*rval = ENOTTY;
4154 		break;
4155 	}
4156 
4157 	/* all done -- clean up and return */
4158 out:	if (devi_entered) {
4159 		if (is_mpxio) {
4160 			mdi_devi_exit(pptr->port_dip, circ);
4161 		} else {
4162 			ndi_devi_exit(pptr->port_dip, circ);
4163 		}
4164 	}
4165 
4166 	if (dcp != NULL) {
4167 		ndi_dc_freehdl(dcp);
4168 	}
4169 
4170 	return (retval);
4171 }
4172 
4173 
4174 /*ARGSUSED*/
4175 static int
4176 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4177     uint32_t claimed)
4178 {
4179 	uchar_t			r_ctl;
4180 	uchar_t			ls_code;
4181 	struct fcp_port	*pptr;
4182 
4183 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4184 		return (FC_UNCLAIMED);
4185 	}
4186 
4187 	mutex_enter(&pptr->port_mutex);
4188 	if (pptr->port_state & (FCP_STATE_DETACHING |
4189 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4190 		mutex_exit(&pptr->port_mutex);
4191 		return (FC_UNCLAIMED);
4192 	}
4193 	mutex_exit(&pptr->port_mutex);
4194 
4195 	r_ctl = buf->ub_frame.r_ctl;
4196 
4197 	switch (r_ctl & R_CTL_ROUTING) {
4198 	case R_CTL_EXTENDED_SVC:
4199 		if (r_ctl == R_CTL_ELS_REQ) {
4200 			ls_code = buf->ub_buffer[0];
4201 
4202 			switch (ls_code) {
4203 			case LA_ELS_PRLI:
4204 				/*
4205 				 * We really don't care if something fails.
4206 				 * If the PRLI was not sent out, then the
4207 				 * other end will time it out.
4208 				 */
4209 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4210 					return (FC_SUCCESS);
4211 				}
4212 				return (FC_UNCLAIMED);
4213 				/* NOTREACHED */
4214 
4215 			default:
4216 				break;
4217 			}
4218 		}
4219 		/* FALLTHROUGH */
4220 
4221 	default:
4222 		return (FC_UNCLAIMED);
4223 	}
4224 }
4225 
4226 
4227 /*ARGSUSED*/
4228 static int
4229 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4230     uint32_t claimed)
4231 {
4232 	return (FC_UNCLAIMED);
4233 }
4234 
4235 /*
4236  *     Function: fcp_statec_callback
4237  *
4238  *  Description: The purpose of this function is to handle a port state change.
4239  *		 It is called from fp/fctl and, in a few instances, internally.
4240  *
4241  *     Argument: ulph		fp/fctl port handle
4242  *		 port_handle	fcp_port structure
4243  *		 port_state	Physical state of the port
4244  *		 port_top	Topology
4245  *		 *devlist	Pointer to the first entry of a table
4246  *				containing the remote ports that can be
4247  *				reached.
4248  *		 dev_cnt	Number of entries pointed by devlist.
4249  *		 port_sid	Port ID of the local port.
4250  *
4251  * Return Value: None
4252  */
4253 /*ARGSUSED*/
4254 static void
4255 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4256     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4257     uint32_t dev_cnt, uint32_t port_sid)
4258 {
4259 	uint32_t		link_count;
4260 	int			map_len = 0;
4261 	struct fcp_port	*pptr;
4262 	fcp_map_tag_t		*map_tag = NULL;
4263 
4264 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4265 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4266 		return;			/* nothing to work with! */
4267 	}
4268 
4269 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4270 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4271 	    "fcp_statec_callback: port state/dev_cnt/top ="
4272 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4273 	    dev_cnt, port_top);
4274 
4275 	mutex_enter(&pptr->port_mutex);
4276 
4277 	/*
4278 	 * If a thread is in detach, don't do anything.
4279 	 */
4280 	if (pptr->port_state & (FCP_STATE_DETACHING |
4281 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4282 		mutex_exit(&pptr->port_mutex);
4283 		return;
4284 	}
4285 
4286 	/*
4287 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4288 	 * init_pkt is called, it knows whether or not the target's status
4289 	 * (or pd) might be changing.
4290 	 */
4291 
4292 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4293 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4294 	}
4295 
4296 	/*
4297 	 * the transport doesn't allocate or probe unless being
4298 	 * asked to by either the applications or ULPs
4299 	 *
4300 	 * in cases where the port is OFFLINE at the time of port
4301 	 * attach callback and the link comes ONLINE later, for
4302 	 * easier automatic node creation (i.e. without you having to
4303 	 * go out and run the utility to perform LOGINs) the
4304 	 * following conditional is helpful
4305 	 */
4306 	pptr->port_phys_state = port_state;
4307 
4308 	if (dev_cnt) {
4309 		mutex_exit(&pptr->port_mutex);
4310 
4311 		map_len = sizeof (*map_tag) * dev_cnt;
4312 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4313 		if (map_tag == NULL) {
4314 			fcp_log(CE_WARN, pptr->port_dip,
4315 			    "!fcp%d: failed to allocate for map tags; "
4316 			    " state change will not be processed",
4317 			    pptr->port_instance);
4318 
4319 			mutex_enter(&pptr->port_mutex);
4320 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4321 			mutex_exit(&pptr->port_mutex);
4322 
4323 			return;
4324 		}
4325 
4326 		mutex_enter(&pptr->port_mutex);
4327 	}
4328 
4329 	if (pptr->port_id != port_sid) {
4330 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4331 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4332 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4333 		    port_sid);
4334 		/*
4335 		 * The local port changed ID. It is the first time a port ID
4336 		 * is assigned or something drastic happened.  We might have
4337 		 * been unplugged and replugged on another loop or fabric port
4338 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4339 		 * the fabric we were plugged into.
4340 		 */
4341 		pptr->port_id = port_sid;
4342 	}
4343 
4344 	switch (FC_PORT_STATE_MASK(port_state)) {
4345 	case FC_STATE_OFFLINE:
4346 	case FC_STATE_RESET_REQUESTED:
4347 		/*
4348 		 * link has gone from online to offline -- just update the
4349 		 * state of this port to BUSY and MARKed to go offline
4350 		 */
4351 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4352 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4353 		    "link went offline");
4354 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4355 			/*
4356 			 * We were offline a while ago and this one
4357 			 * seems to indicate that the loop has gone
4358 			 * dead forever.
4359 			 */
4360 			pptr->port_tmp_cnt += dev_cnt;
4361 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4362 			pptr->port_state |= FCP_STATE_INIT;
4363 			link_count = pptr->port_link_cnt;
4364 			fcp_handle_devices(pptr, devlist, dev_cnt,
4365 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4366 		} else {
4367 			pptr->port_link_cnt++;
4368 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4369 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4370 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4371 			if (pptr->port_mpxio) {
4372 				fcp_update_mpxio_path_verifybusy(pptr);
4373 			}
4374 			pptr->port_state |= FCP_STATE_OFFLINE;
4375 			pptr->port_state &=
4376 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4377 			pptr->port_tmp_cnt = 0;
4378 		}
4379 		mutex_exit(&pptr->port_mutex);
4380 		break;
4381 
4382 	case FC_STATE_ONLINE:
4383 	case FC_STATE_LIP:
4384 	case FC_STATE_LIP_LBIT_SET:
4385 		/*
4386 		 * link has gone from offline to online
4387 		 */
4388 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 		    "link went online");
4391 
4392 		pptr->port_link_cnt++;
4393 
4394 		while (pptr->port_ipkt_cnt) {
4395 			mutex_exit(&pptr->port_mutex);
4396 			delay(drv_usectohz(1000000));
4397 			mutex_enter(&pptr->port_mutex);
4398 		}
4399 
4400 		pptr->port_topology = port_top;
4401 
4402 		/*
4403 		 * The state of the targets and luns accessible through this
4404 		 * port is updated.
4405 		 */
4406 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4407 		    FCP_CAUSE_LINK_CHANGE);
4408 
4409 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4410 		pptr->port_state |= FCP_STATE_ONLINING;
4411 		pptr->port_tmp_cnt = dev_cnt;
4412 		link_count = pptr->port_link_cnt;
4413 
4414 		pptr->port_deadline = fcp_watchdog_time +
4415 		    FCP_ICMD_DEADLINE;
4416 
4417 		if (!dev_cnt) {
4418 			/*
4419 			 * We go directly to the online state if no remote
4420 			 * ports were discovered.
4421 			 */
4422 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4423 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4424 			    "No remote ports discovered");
4425 
4426 			pptr->port_state &= ~FCP_STATE_ONLINING;
4427 			pptr->port_state |= FCP_STATE_ONLINE;
4428 		}
4429 
4430 		switch (port_top) {
4431 		case FC_TOP_FABRIC:
4432 		case FC_TOP_PUBLIC_LOOP:
4433 		case FC_TOP_PRIVATE_LOOP:
4434 		case FC_TOP_PT_PT:
4435 
4436 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4437 				fcp_retry_ns_registry(pptr, port_sid);
4438 			}
4439 
4440 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4441 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4442 			break;
4443 
4444 		default:
4445 			/*
4446 			 * We got here because we were provided with an unknown
4447 			 * topology.
4448 			 */
4449 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4450 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4451 			}
4452 
4453 			pptr->port_tmp_cnt -= dev_cnt;
4454 			fcp_log(CE_WARN, pptr->port_dip,
4455 			    "!unknown/unsupported topology (0x%x)", port_top);
4456 			break;
4457 		}
4458 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4459 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4460 		    "Notify ssd of the reset to reinstate the reservations");
4461 
4462 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4463 		    &pptr->port_reset_notify_listf);
4464 
4465 		mutex_exit(&pptr->port_mutex);
4466 
4467 		break;
4468 
4469 	case FC_STATE_RESET:
4470 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4471 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4472 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4473 		    "RESET state, waiting for Offline/Online state_cb");
4474 		mutex_exit(&pptr->port_mutex);
4475 		break;
4476 
4477 	case FC_STATE_DEVICE_CHANGE:
4478 		/*
4479 		 * We come here when an application has requested
4480 		 * Dynamic node creation/deletion in Fabric connectivity.
4481 		 */
4482 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4483 		    FCP_STATE_INIT)) {
4484 			/*
4485 			 * This case can happen when the FCTL is in the
4486 			 * process of giving us on online and the host on
4487 			 * the other side issues a PLOGI/PLOGO. Ideally
4488 			 * the state changes should be serialized unless
4489 			 * they are opposite (online-offline).
4490 			 * The transport will give us a final state change
4491 			 * so we can ignore this for the time being.
4492 			 */
4493 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4494 			mutex_exit(&pptr->port_mutex);
4495 			break;
4496 		}
4497 
4498 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4499 			fcp_retry_ns_registry(pptr, port_sid);
4500 		}
4501 
4502 		/*
4503 		 * Extend the deadline under steady state conditions
4504 		 * to provide more time for the device-change-commands
4505 		 */
4506 		if (!pptr->port_ipkt_cnt) {
4507 			pptr->port_deadline = fcp_watchdog_time +
4508 			    FCP_ICMD_DEADLINE;
4509 		}
4510 
4511 		/*
4512 		 * There is another race condition here, where if we were
4513 		 * in ONLINEING state and a devices in the map logs out,
4514 		 * fp will give another state change as DEVICE_CHANGE
4515 		 * and OLD. This will result in that target being offlined.
4516 		 * The pd_handle is freed. If from the first statec callback
4517 		 * we were going to fire a PLOGI/PRLI, the system will
4518 		 * panic in fc_ulp_transport with invalid pd_handle.
4519 		 * The fix is to check for the link_cnt before issuing
4520 		 * any command down.
4521 		 */
4522 		fcp_update_targets(pptr, devlist, dev_cnt,
4523 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4524 
4525 		link_count = pptr->port_link_cnt;
4526 
4527 		fcp_handle_devices(pptr, devlist, dev_cnt,
4528 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4529 
4530 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 
4532 		mutex_exit(&pptr->port_mutex);
4533 		break;
4534 
4535 	case FC_STATE_TARGET_PORT_RESET:
4536 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4537 			fcp_retry_ns_registry(pptr, port_sid);
4538 		}
4539 
4540 		/* Do nothing else */
4541 		mutex_exit(&pptr->port_mutex);
4542 		break;
4543 
4544 	default:
4545 		fcp_log(CE_WARN, pptr->port_dip,
4546 		    "!Invalid state change=0x%x", port_state);
4547 		mutex_exit(&pptr->port_mutex);
4548 		break;
4549 	}
4550 
4551 	if (map_tag) {
4552 		kmem_free(map_tag, map_len);
4553 	}
4554 }
4555 
4556 /*
4557  *     Function: fcp_handle_devices
4558  *
4559  *  Description: This function updates the devices currently known by
4560  *		 walking the list provided by the caller.  The list passed
4561  *		 by the caller is supposed to be the list of reachable
4562  *		 devices.
4563  *
4564  *     Argument: *pptr		Fcp port structure.
4565  *		 *devlist	Pointer to the first entry of a table
4566  *				containing the remote ports that can be
4567  *				reached.
4568  *		 dev_cnt	Number of entries pointed by devlist.
4569  *		 link_cnt	Link state count.
4570  *		 *map_tag	Array of fcp_map_tag_t structures.
4571  *		 cause		What caused this function to be called.
4572  *
4573  * Return Value: None
4574  *
4575  *	  Notes: The pptr->port_mutex must be held.
4576  */
4577 static void
4578 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4579     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4580 {
4581 	int			i;
4582 	int			check_finish_init = 0;
4583 	fc_portmap_t		*map_entry;
4584 	struct fcp_tgt	*ptgt = NULL;
4585 
4586 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4587 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4588 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4589 
4590 	if (dev_cnt) {
4591 		ASSERT(map_tag != NULL);
4592 	}
4593 
4594 	/*
4595 	 * The following code goes through the list of remote ports that are
4596 	 * accessible through this (pptr) local port (The list walked is the
4597 	 * one provided by the caller which is the list of the remote ports
4598 	 * currently reachable).  It checks if any of them was already
4599 	 * known by looking for the corresponding target structure based on
4600 	 * the world wide name.	 If a target is part of the list it is tagged
4601 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4602 	 *
4603 	 * Old comment
4604 	 * -----------
4605 	 * Before we drop port mutex; we MUST get the tags updated; This
4606 	 * two step process is somewhat slow, but more reliable.
4607 	 */
4608 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4609 		map_entry = &(devlist[i]);
4610 
4611 		/*
4612 		 * get ptr to this map entry in our port's
4613 		 * list (if any)
4614 		 */
4615 		ptgt = fcp_lookup_target(pptr,
4616 		    (uchar_t *)&(map_entry->map_pwwn));
4617 
4618 		if (ptgt) {
4619 			map_tag[i] = ptgt->tgt_change_cnt;
4620 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4621 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4622 			}
4623 		}
4624 	}
4625 
4626 	/*
4627 	 * At this point we know which devices of the new list were already
4628 	 * known (The field tgt_aux_state of the target structure has been
4629 	 * set to FCP_TGT_TAGGED).
4630 	 *
4631 	 * The following code goes through the list of targets currently known
4632 	 * by the local port (the list is actually a hashing table).  If a
4633 	 * target is found and is not tagged, it means the target cannot
4634 	 * be reached anymore through the local port (pptr).  It is offlined.
4635 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4636 	 */
4637 	for (i = 0; i < FCP_NUM_HASH; i++) {
4638 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4639 		    ptgt = ptgt->tgt_next) {
4640 			mutex_enter(&ptgt->tgt_mutex);
4641 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4642 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4643 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4644 				fcp_offline_target_now(pptr, ptgt,
4645 				    link_cnt, ptgt->tgt_change_cnt, 0);
4646 			}
4647 			mutex_exit(&ptgt->tgt_mutex);
4648 		}
4649 	}
4650 
4651 	/*
4652 	 * At this point, the devices that were known but cannot be reached
4653 	 * anymore, have most likely been offlined.
4654 	 *
4655 	 * The following section of code seems to go through the list of
4656 	 * remote ports that can now be reached.  For every single one it
4657 	 * checks if it is already known or if it is a new port.
4658 	 */
4659 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4660 
4661 		if (check_finish_init) {
4662 			ASSERT(i > 0);
4663 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4664 			    map_tag[i - 1], cause);
4665 			check_finish_init = 0;
4666 		}
4667 
4668 		/* get a pointer to this map entry */
4669 		map_entry = &(devlist[i]);
4670 
4671 		/*
4672 		 * Check for the duplicate map entry flag. If we have marked
4673 		 * this entry as a duplicate we skip it since the correct
4674 		 * (perhaps even same) state change will be encountered
4675 		 * later in the list.
4676 		 */
4677 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4678 			continue;
4679 		}
4680 
4681 		/* get ptr to this map entry in our port's list (if any) */
4682 		ptgt = fcp_lookup_target(pptr,
4683 		    (uchar_t *)&(map_entry->map_pwwn));
4684 
4685 		if (ptgt) {
4686 			/*
4687 			 * This device was already known.  The field
4688 			 * tgt_aux_state is reset (was probably set to
4689 			 * FCP_TGT_TAGGED previously in this routine).
4690 			 */
4691 			ptgt->tgt_aux_state = 0;
4692 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4693 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4694 			    "handle_devices: map did/state/type/flags = "
4695 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4696 			    "tgt_state=%d",
4697 			    map_entry->map_did.port_id, map_entry->map_state,
4698 			    map_entry->map_type, map_entry->map_flags,
4699 			    ptgt->tgt_d_id, ptgt->tgt_state);
4700 		}
4701 
4702 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4703 		    map_entry->map_type == PORT_DEVICE_NEW ||
4704 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4705 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4706 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4707 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4708 			    "map_type=%x, did = %x",
4709 			    map_entry->map_type,
4710 			    map_entry->map_did.port_id);
4711 		}
4712 
4713 		switch (map_entry->map_type) {
4714 		case PORT_DEVICE_NOCHANGE:
4715 		case PORT_DEVICE_USER_CREATE:
4716 		case PORT_DEVICE_USER_LOGIN:
4717 		case PORT_DEVICE_NEW:
4718 		case PORT_DEVICE_REPORTLUN_CHANGED:
4719 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4720 
4721 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4722 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4723 			    cause) == TRUE) {
4724 
4725 				FCP_TGT_TRACE(ptgt, map_tag[i],
4726 				    FCP_TGT_TRACE_2);
4727 				check_finish_init++;
4728 			}
4729 			break;
4730 
4731 		case PORT_DEVICE_OLD:
4732 			if (ptgt != NULL) {
4733 				FCP_TGT_TRACE(ptgt, map_tag[i],
4734 				    FCP_TGT_TRACE_3);
4735 
4736 				mutex_enter(&ptgt->tgt_mutex);
4737 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4738 					/*
4739 					 * Must do an in-line wait for I/Os
4740 					 * to get drained
4741 					 */
4742 					mutex_exit(&ptgt->tgt_mutex);
4743 					mutex_exit(&pptr->port_mutex);
4744 
4745 					mutex_enter(&ptgt->tgt_mutex);
4746 					while (ptgt->tgt_ipkt_cnt ||
4747 					    fcp_outstanding_lun_cmds(ptgt)
4748 					    == FC_SUCCESS) {
4749 						mutex_exit(&ptgt->tgt_mutex);
4750 						delay(drv_usectohz(1000000));
4751 						mutex_enter(&ptgt->tgt_mutex);
4752 					}
4753 					mutex_exit(&ptgt->tgt_mutex);
4754 
4755 					mutex_enter(&pptr->port_mutex);
4756 					mutex_enter(&ptgt->tgt_mutex);
4757 
4758 					(void) fcp_offline_target(pptr, ptgt,
4759 					    link_cnt, map_tag[i], 0, 0);
4760 				}
4761 				mutex_exit(&ptgt->tgt_mutex);
4762 			}
4763 			check_finish_init++;
4764 			break;
4765 
4766 		case PORT_DEVICE_USER_DELETE:
4767 		case PORT_DEVICE_USER_LOGOUT:
4768 			if (ptgt != NULL) {
4769 				FCP_TGT_TRACE(ptgt, map_tag[i],
4770 				    FCP_TGT_TRACE_4);
4771 
4772 				mutex_enter(&ptgt->tgt_mutex);
4773 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4774 					(void) fcp_offline_target(pptr, ptgt,
4775 					    link_cnt, map_tag[i], 1, 0);
4776 				}
4777 				mutex_exit(&ptgt->tgt_mutex);
4778 			}
4779 			check_finish_init++;
4780 			break;
4781 
4782 		case PORT_DEVICE_CHANGED:
4783 			if (ptgt != NULL) {
4784 				FCP_TGT_TRACE(ptgt, map_tag[i],
4785 				    FCP_TGT_TRACE_5);
4786 
4787 				if (fcp_device_changed(pptr, ptgt,
4788 				    map_entry, link_cnt, map_tag[i],
4789 				    cause) == TRUE) {
4790 					check_finish_init++;
4791 				}
4792 			} else {
4793 				if (fcp_handle_mapflags(pptr, ptgt,
4794 				    map_entry, link_cnt, 0, cause) == TRUE) {
4795 					check_finish_init++;
4796 				}
4797 			}
4798 			break;
4799 
4800 		default:
4801 			fcp_log(CE_WARN, pptr->port_dip,
4802 			    "!Invalid map_type=0x%x", map_entry->map_type);
4803 			check_finish_init++;
4804 			break;
4805 		}
4806 	}
4807 
4808 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4809 		ASSERT(i > 0);
4810 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4811 		    map_tag[i-1], cause);
4812 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4813 		fcp_offline_all(pptr, link_cnt, cause);
4814 	}
4815 }
4816 
4817 static int
4818 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4819 {
4820 	struct fcp_lun	*plun;
4821 	struct fcp_port *pptr;
4822 	int		 rscn_count;
4823 	int		 lun0_newalloc;
4824 	int		 ret  = TRUE;
4825 
4826 	ASSERT(ptgt);
4827 	pptr = ptgt->tgt_port;
4828 	lun0_newalloc = 0;
4829 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4830 		/*
4831 		 * no LUN struct for LUN 0 yet exists,
4832 		 * so create one
4833 		 */
4834 		plun = fcp_alloc_lun(ptgt);
4835 		if (plun == NULL) {
4836 			fcp_log(CE_WARN, pptr->port_dip,
4837 			    "!Failed to allocate lun 0 for"
4838 			    " D_ID=%x", ptgt->tgt_d_id);
4839 			return (ret);
4840 		}
4841 		lun0_newalloc = 1;
4842 	}
4843 
4844 	mutex_enter(&ptgt->tgt_mutex);
4845 	/*
4846 	 * consider lun 0 as device not connected if it is
4847 	 * offlined or newly allocated
4848 	 */
4849 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4850 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4851 	}
4852 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4853 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4854 	ptgt->tgt_lun_cnt = 1;
4855 	ptgt->tgt_report_lun_cnt = 0;
4856 	mutex_exit(&ptgt->tgt_mutex);
4857 
4858 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4859 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4860 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4861 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4862 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4863 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4864 		    "to D_ID=%x", ptgt->tgt_d_id);
4865 	} else {
4866 		ret = FALSE;
4867 	}
4868 
4869 	return (ret);
4870 }
4871 
4872 /*
4873  *     Function: fcp_handle_mapflags
4874  *
4875  *  Description: This function creates a target structure if the ptgt passed
4876  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4877  *		 into the target yet or the PRLI if we are logged into the
4878  *		 target already.  The rest of the treatment is done in the
4879  *		 callbacks of the PLOGI or PRLI.
4880  *
4881  *     Argument: *pptr		FCP Port structure.
4882  *		 *ptgt		Target structure.
4883  *		 *map_entry	Array of fc_portmap_t structures.
4884  *		 link_cnt	Link state count.
4885  *		 tgt_cnt	Target state count.
4886  *		 cause		What caused this function to be called.
4887  *
4888  * Return Value: TRUE	Failed
4889  *		 FALSE	Succeeded
4890  *
4891  *	  Notes: pptr->port_mutex must be owned.
4892  */
4893 static int
4894 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4895     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4896 {
4897 	int			lcount;
4898 	int			tcount;
4899 	int			ret = TRUE;
4900 	int			alloc;
4901 	struct fcp_ipkt	*icmd;
4902 	struct fcp_lun	*pseq_lun = NULL;
4903 	uchar_t			opcode;
4904 	int			valid_ptgt_was_passed = FALSE;
4905 
4906 	ASSERT(mutex_owned(&pptr->port_mutex));
4907 
4908 	/*
4909 	 * This case is possible where the FCTL has come up and done discovery
4910 	 * before FCP was loaded and attached. FCTL would have discovered the
4911 	 * devices and later the ULP came online. In this case ULP's would get
4912 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4913 	 */
4914 	if (ptgt == NULL) {
4915 		/* don't already have a target */
4916 		mutex_exit(&pptr->port_mutex);
4917 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4918 		mutex_enter(&pptr->port_mutex);
4919 
4920 		if (ptgt == NULL) {
4921 			fcp_log(CE_WARN, pptr->port_dip,
4922 			    "!FC target allocation failed");
4923 			return (ret);
4924 		}
4925 		mutex_enter(&ptgt->tgt_mutex);
4926 		ptgt->tgt_statec_cause = cause;
4927 		ptgt->tgt_tmp_cnt = 1;
4928 		mutex_exit(&ptgt->tgt_mutex);
4929 	} else {
4930 		valid_ptgt_was_passed = TRUE;
4931 	}
4932 
4933 	/*
4934 	 * Copy in the target parameters
4935 	 */
4936 	mutex_enter(&ptgt->tgt_mutex);
4937 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4938 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4939 	ptgt->tgt_pd_handle = map_entry->map_pd;
4940 	ptgt->tgt_fca_dev = NULL;
4941 
4942 	/* Copy port and node WWNs */
4943 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4944 	    FC_WWN_SIZE);
4945 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4946 	    FC_WWN_SIZE);
4947 
4948 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4949 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4950 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4951 	    valid_ptgt_was_passed) {
4952 		/*
4953 		 * determine if there are any tape LUNs on this target
4954 		 */
4955 		for (pseq_lun = ptgt->tgt_lun;
4956 		    pseq_lun != NULL;
4957 		    pseq_lun = pseq_lun->lun_next) {
4958 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4959 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4960 				fcp_update_tgt_state(ptgt, FCP_RESET,
4961 				    FCP_LUN_MARK);
4962 				mutex_exit(&ptgt->tgt_mutex);
4963 				return (ret);
4964 			}
4965 		}
4966 	}
4967 
4968 	/*
4969 	 * if UA'REPORT_LUN_CHANGED received,
4970 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
4971 	 */
4972 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
4973 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
4974 		mutex_exit(&ptgt->tgt_mutex);
4975 		mutex_exit(&pptr->port_mutex);
4976 
4977 		ret = fcp_handle_reportlun_changed(ptgt, cause);
4978 
4979 		mutex_enter(&pptr->port_mutex);
4980 		return (ret);
4981 	}
4982 
4983 	/*
4984 	 * If ptgt was NULL when this function was entered, then tgt_node_state
4985 	 * was never specifically initialized but zeroed out which means
4986 	 * FCP_TGT_NODE_NONE.
4987 	 */
4988 	switch (ptgt->tgt_node_state) {
4989 	case FCP_TGT_NODE_NONE:
4990 	case FCP_TGT_NODE_ON_DEMAND:
4991 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4992 		    !fcp_enable_auto_configuration &&
4993 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4994 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
4995 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
4996 		    fcp_enable_auto_configuration &&
4997 		    (ptgt->tgt_manual_config_only == 1) &&
4998 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
4999 			/*
5000 			 * If auto configuration is set and
5001 			 * the tgt_manual_config_only flag is set then
5002 			 * we only want the user to be able to change
5003 			 * the state through create_on_demand.
5004 			 */
5005 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5006 		} else {
5007 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5008 		}
5009 		break;
5010 
5011 	case FCP_TGT_NODE_PRESENT:
5012 		break;
5013 	}
5014 	/*
5015 	 * If we are booting from a fabric device, make sure we
5016 	 * mark the node state appropriately for this target to be
5017 	 * enumerated
5018 	 */
5019 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5020 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5021 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5022 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5023 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5024 		}
5025 	}
5026 	mutex_exit(&ptgt->tgt_mutex);
5027 
5028 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5029 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5030 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5031 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5032 	    map_entry->map_rscn_info.ulp_rscn_count);
5033 
5034 	mutex_enter(&ptgt->tgt_mutex);
5035 
5036 	/*
5037 	 * Reset target OFFLINE state and mark the target BUSY
5038 	 */
5039 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5040 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5041 
5042 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5043 	lcount = link_cnt;
5044 
5045 	mutex_exit(&ptgt->tgt_mutex);
5046 	mutex_exit(&pptr->port_mutex);
5047 
5048 	/*
5049 	 * if we are already logged in, then we do a PRLI, else
5050 	 * we do a PLOGI first (to get logged in)
5051 	 *
5052 	 * We will not check if we are the PLOGI initiator
5053 	 */
5054 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5055 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5056 
5057 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5058 
5059 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0, lcount, tcount,
5060 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5061 
5062 	if (icmd == NULL) {
5063 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5064 		/*
5065 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5066 		 * we need to make sure we reacquire it before returning.
5067 		 */
5068 		mutex_enter(&pptr->port_mutex);
5069 		return (FALSE);
5070 	}
5071 
5072 	/* TRUE is only returned while target is intended skipped */
5073 	ret = FALSE;
5074 	/* discover info about this target */
5075 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5076 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5077 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5078 	} else {
5079 		fcp_icmd_free(pptr, icmd);
5080 		ret = TRUE;
5081 	}
5082 	mutex_enter(&pptr->port_mutex);
5083 
5084 	return (ret);
5085 }
5086 
5087 /*
5088  *     Function: fcp_send_els
5089  *
5090  *  Description: Sends an ELS to the target specified by the caller.  Supports
5091  *		 PLOGI and PRLI.
5092  *
5093  *     Argument: *pptr		Fcp port.
5094  *		 *ptgt		Target to send the ELS to.
5095  *		 *icmd		Internal packet
5096  *		 opcode		ELS opcode
5097  *		 lcount		Link state change counter
5098  *		 tcount		Target state change counter
5099  *		 cause		What caused the call
5100  *
5101  * Return Value: DDI_SUCCESS
5102  *		 Others
5103  */
5104 static int
5105 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5106     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5107 {
5108 	fc_packet_t		*fpkt;
5109 	fc_frame_hdr_t		*hp;
5110 	int			internal = 0;
5111 	int			alloc;
5112 	int			cmd_len;
5113 	int			resp_len;
5114 	int			res = DDI_FAILURE; /* default result */
5115 	int			rval = DDI_FAILURE;
5116 
5117 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5118 	ASSERT(ptgt->tgt_port == pptr);
5119 
5120 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5121 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5122 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5123 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5124 
5125 	if (opcode == LA_ELS_PLOGI) {
5126 		cmd_len = sizeof (la_els_logi_t);
5127 		resp_len = sizeof (la_els_logi_t);
5128 	} else {
5129 		ASSERT(opcode == LA_ELS_PRLI);
5130 		cmd_len = sizeof (la_els_prli_t);
5131 		resp_len = sizeof (la_els_prli_t);
5132 	}
5133 
5134 	if (icmd == NULL) {
5135 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5136 		    sizeof (la_els_prli_t));
5137 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 0,
5138 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5139 		if (icmd == NULL) {
5140 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5141 			return (res);
5142 		}
5143 		internal++;
5144 	}
5145 	fpkt = icmd->ipkt_fpkt;
5146 
5147 	fpkt->pkt_cmdlen = cmd_len;
5148 	fpkt->pkt_rsplen = resp_len;
5149 	fpkt->pkt_datalen = 0;
5150 	icmd->ipkt_retries = 0;
5151 
5152 	/* fill in fpkt info */
5153 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5154 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5155 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5156 
5157 	/* get ptr to frame hdr in fpkt */
5158 	hp = &fpkt->pkt_cmd_fhdr;
5159 
5160 	/*
5161 	 * fill in frame hdr
5162 	 */
5163 	hp->r_ctl = R_CTL_ELS_REQ;
5164 	hp->s_id = pptr->port_id;	/* source ID */
5165 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5166 	hp->type = FC_TYPE_EXTENDED_LS;
5167 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5168 	hp->seq_id = 0;
5169 	hp->rsvd = 0;
5170 	hp->df_ctl  = 0;
5171 	hp->seq_cnt = 0;
5172 	hp->ox_id = 0xffff;		/* i.e. none */
5173 	hp->rx_id = 0xffff;		/* i.e. none */
5174 	hp->ro = 0;
5175 
5176 	/*
5177 	 * at this point we have a filled in cmd pkt
5178 	 *
5179 	 * fill in the respective info, then use the transport to send
5180 	 * the packet
5181 	 *
5182 	 * for a PLOGI call fc_ulp_login(), and
5183 	 * for a PRLI call fc_ulp_issue_els()
5184 	 */
5185 	switch (opcode) {
5186 	case LA_ELS_PLOGI: {
5187 		struct la_els_logi logi;
5188 
5189 		bzero(&logi, sizeof (struct la_els_logi));
5190 
5191 		hp = &fpkt->pkt_cmd_fhdr;
5192 		hp->r_ctl = R_CTL_ELS_REQ;
5193 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5194 		logi.ls_code.mbz = 0;
5195 
5196 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5197 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5198 
5199 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5200 
5201 		mutex_enter(&pptr->port_mutex);
5202 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5203 
5204 			mutex_exit(&pptr->port_mutex);
5205 
5206 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5207 			if (rval == FC_SUCCESS) {
5208 				res = DDI_SUCCESS;
5209 				break;
5210 			}
5211 
5212 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5213 
5214 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5215 			    rval, "PLOGI");
5216 		} else {
5217 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5218 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5219 			    "fcp_send_els1: state change occured"
5220 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5221 			mutex_exit(&pptr->port_mutex);
5222 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5223 		}
5224 		break;
5225 	}
5226 
5227 	case LA_ELS_PRLI: {
5228 		struct la_els_prli	prli;
5229 		struct fcp_prli		*fprli;
5230 
5231 		bzero(&prli, sizeof (struct la_els_prli));
5232 
5233 		hp = &fpkt->pkt_cmd_fhdr;
5234 		hp->r_ctl = R_CTL_ELS_REQ;
5235 
5236 		/* fill in PRLI cmd ELS fields */
5237 		prli.ls_code = LA_ELS_PRLI;
5238 		prli.page_length = 0x10;	/* huh? */
5239 		prli.payload_length = sizeof (struct la_els_prli);
5240 
5241 		icmd->ipkt_opcode = LA_ELS_PRLI;
5242 
5243 		/* get ptr to PRLI service params */
5244 		fprli = (struct fcp_prli *)prli.service_params;
5245 
5246 		/* fill in service params */
5247 		fprli->type = 0x08;
5248 		fprli->resvd1 = 0;
5249 		fprli->orig_process_assoc_valid = 0;
5250 		fprli->resp_process_assoc_valid = 0;
5251 		fprli->establish_image_pair = 1;
5252 		fprli->resvd2 = 0;
5253 		fprli->resvd3 = 0;
5254 		fprli->obsolete_1 = 0;
5255 		fprli->obsolete_2 = 0;
5256 		fprli->data_overlay_allowed = 0;
5257 		fprli->initiator_fn = 1;
5258 		fprli->confirmed_compl_allowed = 1;
5259 
5260 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5261 			fprli->target_fn = 1;
5262 		} else {
5263 			fprli->target_fn = 0;
5264 		}
5265 
5266 		fprli->retry = 1;
5267 		fprli->read_xfer_rdy_disabled = 1;
5268 		fprli->write_xfer_rdy_disabled = 0;
5269 
5270 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5271 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5272 
5273 		/* issue the PRLI request */
5274 
5275 		mutex_enter(&pptr->port_mutex);
5276 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5277 
5278 			mutex_exit(&pptr->port_mutex);
5279 
5280 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5281 			if (rval == FC_SUCCESS) {
5282 				res = DDI_SUCCESS;
5283 				break;
5284 			}
5285 
5286 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5287 
5288 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5289 			    rval, "PRLI");
5290 		} else {
5291 			mutex_exit(&pptr->port_mutex);
5292 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5293 		}
5294 		break;
5295 	}
5296 
5297 	default:
5298 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5299 		break;
5300 	}
5301 
5302 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5303 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5304 	    "fcp_send_els: returning %d", res);
5305 
5306 	if (res != DDI_SUCCESS) {
5307 		if (internal) {
5308 			fcp_icmd_free(pptr, icmd);
5309 		}
5310 	}
5311 
5312 	return (res);
5313 }
5314 
5315 
5316 /*
5317  * called internally update the state of all of the tgts and each LUN
5318  * for this port (i.e. each target  known to be attached to this port)
5319  * if they are not already offline
5320  *
5321  * must be called with the port mutex owned
5322  *
5323  * acquires and releases the target mutexes for each target attached
5324  * to this port
5325  */
5326 void
5327 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5328 {
5329 	int i;
5330 	struct fcp_tgt *ptgt;
5331 
5332 	ASSERT(mutex_owned(&pptr->port_mutex));
5333 
5334 	for (i = 0; i < FCP_NUM_HASH; i++) {
5335 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5336 		    ptgt = ptgt->tgt_next) {
5337 			mutex_enter(&ptgt->tgt_mutex);
5338 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5339 			ptgt->tgt_change_cnt++;
5340 			ptgt->tgt_statec_cause = cause;
5341 			ptgt->tgt_tmp_cnt = 1;
5342 			ptgt->tgt_done = 0;
5343 			mutex_exit(&ptgt->tgt_mutex);
5344 		}
5345 	}
5346 }
5347 
5348 
5349 static void
5350 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5351 {
5352 	int i;
5353 	int ndevs;
5354 	struct fcp_tgt *ptgt;
5355 
5356 	ASSERT(mutex_owned(&pptr->port_mutex));
5357 
5358 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5359 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5360 		    ptgt = ptgt->tgt_next) {
5361 			ndevs++;
5362 		}
5363 	}
5364 
5365 	if (ndevs == 0) {
5366 		return;
5367 	}
5368 	pptr->port_tmp_cnt = ndevs;
5369 
5370 	for (i = 0; i < FCP_NUM_HASH; i++) {
5371 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5372 		    ptgt = ptgt->tgt_next) {
5373 			(void) fcp_call_finish_init_held(pptr, ptgt,
5374 			    lcount, ptgt->tgt_change_cnt, cause);
5375 		}
5376 	}
5377 }
5378 
5379 /*
5380  *     Function: fcp_update_tgt_state
5381  *
5382  *  Description: This function updates the field tgt_state of a target.	 That
5383  *		 field is a bitmap and which bit can be set or reset
5384  *		 individually.	The action applied to the target state is also
5385  *		 applied to all the LUNs belonging to the target (provided the
5386  *		 LUN is not offline).  A side effect of applying the state
5387  *		 modification to the target and the LUNs is the field tgt_trace
5388  *		 of the target and lun_trace of the LUNs is set to zero.
5389  *
5390  *
5391  *     Argument: *ptgt	Target structure.
5392  *		 flag	Flag indication what action to apply (set/reset).
5393  *		 state	State bits to update.
5394  *
5395  * Return Value: None
5396  *
5397  *	Context: Interrupt, Kernel or User context.
5398  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5399  *		 calling this function.
5400  */
5401 void
5402 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5403 {
5404 	struct fcp_lun *plun;
5405 
5406 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5407 
5408 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5409 		/* The target is not offline. */
5410 		if (flag == FCP_SET) {
5411 			ptgt->tgt_state |= state;
5412 			ptgt->tgt_trace = 0;
5413 		} else {
5414 			ptgt->tgt_state &= ~state;
5415 		}
5416 
5417 		for (plun = ptgt->tgt_lun; plun != NULL;
5418 		    plun = plun->lun_next) {
5419 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5420 				/* The LUN is not offline. */
5421 				if (flag == FCP_SET) {
5422 					plun->lun_state |= state;
5423 					plun->lun_trace = 0;
5424 				} else {
5425 					plun->lun_state &= ~state;
5426 				}
5427 			}
5428 		}
5429 	}
5430 }
5431 
5432 /*
5433  *     Function: fcp_update_tgt_state
5434  *
5435  *  Description: This function updates the field lun_state of a LUN.  That
5436  *		 field is a bitmap and which bit can be set or reset
5437  *		 individually.
5438  *
5439  *     Argument: *plun	LUN structure.
5440  *		 flag	Flag indication what action to apply (set/reset).
5441  *		 state	State bits to update.
5442  *
5443  * Return Value: None
5444  *
5445  *	Context: Interrupt, Kernel or User context.
5446  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5447  *		 calling this function.
5448  */
5449 void
5450 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5451 {
5452 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5453 
5454 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5455 
5456 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5457 		if (flag == FCP_SET) {
5458 			plun->lun_state |= state;
5459 		} else {
5460 			plun->lun_state &= ~state;
5461 		}
5462 	}
5463 }
5464 
5465 /*
5466  *     Function: fcp_get_port
5467  *
5468  *  Description: This function returns the fcp_port structure from the opaque
5469  *		 handle passed by the caller.  That opaque handle is the handle
5470  *		 used by fp/fctl to identify a particular local port.  That
5471  *		 handle has been stored in the corresponding fcp_port
5472  *		 structure.  This function is going to walk the global list of
5473  *		 fcp_port structures till one has a port_fp_handle that matches
5474  *		 the handle passed by the caller.  This function enters the
5475  *		 mutex fcp_global_mutex while walking the global list and then
5476  *		 releases it.
5477  *
5478  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5479  *				particular port.
5480  *
5481  * Return Value: NULL		Not found.
5482  *		 Not NULL	Pointer to the fcp_port structure.
5483  *
5484  *	Context: Interrupt, Kernel or User context.
5485  */
5486 static struct fcp_port *
5487 fcp_get_port(opaque_t port_handle)
5488 {
5489 	struct fcp_port *pptr;
5490 
5491 	ASSERT(port_handle != NULL);
5492 
5493 	mutex_enter(&fcp_global_mutex);
5494 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5495 		if (pptr->port_fp_handle == port_handle) {
5496 			break;
5497 		}
5498 	}
5499 	mutex_exit(&fcp_global_mutex);
5500 
5501 	return (pptr);
5502 }
5503 
5504 
5505 static void
5506 fcp_unsol_callback(fc_packet_t *fpkt)
5507 {
5508 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5509 	struct fcp_port *pptr = icmd->ipkt_port;
5510 
5511 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5512 		caddr_t state, reason, action, expln;
5513 
5514 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5515 		    &action, &expln);
5516 
5517 		fcp_log(CE_WARN, pptr->port_dip,
5518 		    "!couldn't post response to unsolicited request: "
5519 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5520 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5521 		    fpkt->pkt_cmd_fhdr.rx_id);
5522 	}
5523 	fcp_icmd_free(pptr, icmd);
5524 }
5525 
5526 
5527 /*
5528  * Perform general purpose preparation of a response to an unsolicited request
5529  */
5530 static void
5531 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5532     uchar_t r_ctl, uchar_t type)
5533 {
5534 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5535 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5536 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5537 	pkt->pkt_cmd_fhdr.type = type;
5538 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5539 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5540 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5541 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5542 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5543 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5544 	pkt->pkt_cmd_fhdr.ro = 0;
5545 	pkt->pkt_cmd_fhdr.rsvd = 0;
5546 	pkt->pkt_comp = fcp_unsol_callback;
5547 	pkt->pkt_pd = NULL;
5548 }
5549 
5550 
5551 /*ARGSUSED*/
5552 static int
5553 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5554 {
5555 	fc_packet_t		*fpkt;
5556 	struct la_els_prli	prli;
5557 	struct fcp_prli		*fprli;
5558 	struct fcp_ipkt	*icmd;
5559 	struct la_els_prli	*from;
5560 	struct fcp_prli		*orig;
5561 	struct fcp_tgt	*ptgt;
5562 	int			tcount = 0;
5563 	int			lcount;
5564 
5565 	from = (struct la_els_prli *)buf->ub_buffer;
5566 	orig = (struct fcp_prli *)from->service_params;
5567 
5568 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5569 	    NULL) {
5570 		mutex_enter(&ptgt->tgt_mutex);
5571 		tcount = ptgt->tgt_change_cnt;
5572 		mutex_exit(&ptgt->tgt_mutex);
5573 	}
5574 	mutex_enter(&pptr->port_mutex);
5575 	lcount = pptr->port_link_cnt;
5576 	mutex_exit(&pptr->port_mutex);
5577 
5578 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5579 	    sizeof (la_els_prli_t), 0, 0, lcount, tcount, 0,
5580 	    FC_INVALID_RSCN_COUNT)) == NULL) {
5581 		return (FC_FAILURE);
5582 	}
5583 	fpkt = icmd->ipkt_fpkt;
5584 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5585 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5586 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5587 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5588 	fpkt->pkt_rsplen = 0;
5589 	fpkt->pkt_datalen = 0;
5590 
5591 	icmd->ipkt_opcode = LA_ELS_PRLI;
5592 
5593 	bzero(&prli, sizeof (struct la_els_prli));
5594 	fprli = (struct fcp_prli *)prli.service_params;
5595 	prli.ls_code = LA_ELS_ACC;
5596 	prli.page_length = 0x10;
5597 	prli.payload_length = sizeof (struct la_els_prli);
5598 
5599 	/* fill in service params */
5600 	fprli->type = 0x08;
5601 	fprli->resvd1 = 0;
5602 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5603 	fprli->orig_process_associator = orig->orig_process_associator;
5604 	fprli->resp_process_assoc_valid = 0;
5605 	fprli->establish_image_pair = 1;
5606 	fprli->resvd2 = 0;
5607 	fprli->resvd3 = 0;
5608 	fprli->obsolete_1 = 0;
5609 	fprli->obsolete_2 = 0;
5610 	fprli->data_overlay_allowed = 0;
5611 	fprli->initiator_fn = 1;
5612 	fprli->confirmed_compl_allowed = 1;
5613 
5614 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5615 		fprli->target_fn = 1;
5616 	} else {
5617 		fprli->target_fn = 0;
5618 	}
5619 
5620 	fprli->retry = 1;
5621 	fprli->read_xfer_rdy_disabled = 1;
5622 	fprli->write_xfer_rdy_disabled = 0;
5623 
5624 	/* save the unsol prli payload first */
5625 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5626 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5627 
5628 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5629 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5630 
5631 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5632 
5633 	mutex_enter(&pptr->port_mutex);
5634 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5635 		int rval;
5636 		mutex_exit(&pptr->port_mutex);
5637 
5638 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5639 		    FC_SUCCESS) {
5640 			if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
5641 				fcp_queue_ipkt(pptr, fpkt);
5642 				return (FC_SUCCESS);
5643 			}
5644 			/* Let it timeout */
5645 			fcp_icmd_free(pptr, icmd);
5646 			return (FC_FAILURE);
5647 		}
5648 	} else {
5649 		mutex_exit(&pptr->port_mutex);
5650 		fcp_icmd_free(pptr, icmd);
5651 		return (FC_FAILURE);
5652 	}
5653 
5654 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5655 
5656 	return (FC_SUCCESS);
5657 }
5658 
5659 /*
5660  *     Function: fcp_icmd_alloc
5661  *
5662  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5663  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5664  *		 modified by the caller (such as fcp_send_scsi).  The
5665  *		 structure is also tied to the state of the line and of the
5666  *		 target at a particular time.  That link is established by
5667  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5668  *		 and tcount which came respectively from pptr->link_cnt and
5669  *		 ptgt->tgt_change_cnt.
5670  *
5671  *     Argument: *pptr		Fcp port.
5672  *		 *ptgt		Target (destination of the command).
5673  *		 cmd_len	Length of the command.
5674  *		 resp_len	Length of the expected response.
5675  *		 data_len	Length of the data.
5676  *		 nodma		Indicates weither the command and response.
5677  *				will be transfer through DMA or not.
5678  *		 lcount		Link state change counter.
5679  *		 tcount		Target state change counter.
5680  *		 cause		Reason that lead to this call.
5681  *
5682  * Return Value: NULL		Failed.
5683  *		 Not NULL	Internal packet address.
5684  */
5685 static struct fcp_ipkt *
5686 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5687     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5688     uint32_t rscn_count)
5689 {
5690 	int			dma_setup = 0;
5691 	fc_packet_t		*fpkt;
5692 	struct fcp_ipkt	*icmd = NULL;
5693 
5694 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5695 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5696 	    KM_NOSLEEP);
5697 	if (icmd == NULL) {
5698 		fcp_log(CE_WARN, pptr->port_dip,
5699 		    "!internal packet allocation failed");
5700 		return (NULL);
5701 	}
5702 
5703 	/*
5704 	 * initialize the allocated packet
5705 	 */
5706 	icmd->ipkt_nodma = nodma;
5707 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5708 	icmd->ipkt_lun = NULL;
5709 
5710 	icmd->ipkt_link_cnt = lcount;
5711 	icmd->ipkt_change_cnt = tcount;
5712 	icmd->ipkt_cause = cause;
5713 
5714 	mutex_enter(&pptr->port_mutex);
5715 	icmd->ipkt_port = pptr;
5716 	mutex_exit(&pptr->port_mutex);
5717 
5718 	/* keep track of amt of data to be sent in pkt */
5719 	icmd->ipkt_cmdlen = cmd_len;
5720 	icmd->ipkt_resplen = resp_len;
5721 	icmd->ipkt_datalen = data_len;
5722 
5723 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5724 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5725 
5726 	/* set pkt's private ptr to point to cmd pkt */
5727 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5728 
5729 	/* set FCA private ptr to memory just beyond */
5730 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5731 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5732 	    pptr->port_dmacookie_sz);
5733 
5734 	/* get ptr to fpkt substruct and fill it in */
5735 	fpkt = icmd->ipkt_fpkt;
5736 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5737 	    sizeof (struct fcp_ipkt));
5738 
5739 	if (ptgt != NULL) {
5740 		icmd->ipkt_tgt = ptgt;
5741 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5742 	}
5743 
5744 	fpkt->pkt_comp = fcp_icmd_callback;
5745 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5746 	fpkt->pkt_cmdlen = cmd_len;
5747 	fpkt->pkt_rsplen = resp_len;
5748 	fpkt->pkt_datalen = data_len;
5749 
5750 	/*
5751 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5752 	 * rscn_count as fcp knows down to the transport. If a valid count was
5753 	 * passed into this function, we allocate memory to actually pass down
5754 	 * this info.
5755 	 *
5756 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5757 	 * basically mean that fcp will not be able to help transport
5758 	 * distinguish if a new RSCN has come after fcp was last informed about
5759 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5760 	 * 5068068 where the device might end up going offline in case of RSCN
5761 	 * storms.
5762 	 */
5763 	fpkt->pkt_ulp_rscn_infop = NULL;
5764 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5765 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5766 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5767 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5768 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5769 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5770 			    "Failed to alloc memory to pass rscn info");
5771 		}
5772 	}
5773 
5774 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5775 		fc_ulp_rscn_info_t	*rscnp;
5776 
5777 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5778 		rscnp->ulp_rscn_count = rscn_count;
5779 	}
5780 
5781 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5782 		goto fail;
5783 	}
5784 	dma_setup++;
5785 
5786 	/*
5787 	 * Must hold target mutex across setting of pkt_pd and call to
5788 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5789 	 * away while we're not looking.
5790 	 */
5791 	if (ptgt != NULL) {
5792 		mutex_enter(&ptgt->tgt_mutex);
5793 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5794 
5795 		/* ask transport to do its initialization on this pkt */
5796 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5797 		    != FC_SUCCESS) {
5798 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5799 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5800 			    "fc_ulp_init_packet failed");
5801 			mutex_exit(&ptgt->tgt_mutex);
5802 			goto fail;
5803 		}
5804 		mutex_exit(&ptgt->tgt_mutex);
5805 	} else {
5806 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5807 		    != FC_SUCCESS) {
5808 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5809 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5810 			    "fc_ulp_init_packet failed");
5811 			goto fail;
5812 		}
5813 	}
5814 
5815 	mutex_enter(&pptr->port_mutex);
5816 	if (pptr->port_state & (FCP_STATE_DETACHING |
5817 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5818 		int rval;
5819 
5820 		mutex_exit(&pptr->port_mutex);
5821 
5822 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5823 		ASSERT(rval == FC_SUCCESS);
5824 
5825 		goto fail;
5826 	}
5827 
5828 	if (ptgt != NULL) {
5829 		mutex_enter(&ptgt->tgt_mutex);
5830 		ptgt->tgt_ipkt_cnt++;
5831 		mutex_exit(&ptgt->tgt_mutex);
5832 	}
5833 
5834 	pptr->port_ipkt_cnt++;
5835 
5836 	mutex_exit(&pptr->port_mutex);
5837 
5838 	return (icmd);
5839 
5840 fail:
5841 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5842 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5843 		    sizeof (fc_ulp_rscn_info_t));
5844 		fpkt->pkt_ulp_rscn_infop = NULL;
5845 	}
5846 
5847 	if (dma_setup) {
5848 		fcp_free_dma(pptr, icmd);
5849 	}
5850 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5851 	    (size_t)pptr->port_dmacookie_sz);
5852 
5853 	return (NULL);
5854 }
5855 
5856 /*
5857  *     Function: fcp_icmd_free
5858  *
5859  *  Description: Frees the internal command passed by the caller.
5860  *
5861  *     Argument: *pptr		Fcp port.
5862  *		 *icmd		Internal packet to free.
5863  *
5864  * Return Value: None
5865  */
5866 static void
5867 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5868 {
5869 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5870 
5871 	/* Let the underlying layers do their cleanup. */
5872 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5873 	    icmd->ipkt_fpkt);
5874 
5875 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5876 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5877 		    sizeof (fc_ulp_rscn_info_t));
5878 	}
5879 
5880 	fcp_free_dma(pptr, icmd);
5881 
5882 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5883 	    (size_t)pptr->port_dmacookie_sz);
5884 
5885 	mutex_enter(&pptr->port_mutex);
5886 
5887 	if (ptgt) {
5888 		mutex_enter(&ptgt->tgt_mutex);
5889 		ptgt->tgt_ipkt_cnt--;
5890 		mutex_exit(&ptgt->tgt_mutex);
5891 	}
5892 
5893 	pptr->port_ipkt_cnt--;
5894 	mutex_exit(&pptr->port_mutex);
5895 }
5896 
5897 /*
5898  *     Function: fcp_alloc_dma
5899  *
5900  *  Description: Allocated the DMA resources required for the internal
5901  *		 packet.
5902  *
5903  *     Argument: *pptr	FCP port.
5904  *		 *icmd	Internal FCP packet.
5905  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5906  *		 flags	Allocation flags (Sleep or NoSleep).
5907  *
5908  * Return Value: FC_SUCCESS
5909  *		 FC_NOMEM
5910  */
5911 static int
5912 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5913     int nodma, int flags)
5914 {
5915 	int		rval;
5916 	size_t		real_size;
5917 	uint_t		ccount;
5918 	int		bound = 0;
5919 	int		cmd_resp = 0;
5920 	fc_packet_t	*fpkt;
5921 	ddi_dma_cookie_t	pkt_data_cookie;
5922 	ddi_dma_cookie_t	*cp;
5923 	uint32_t		cnt;
5924 
5925 	fpkt = &icmd->ipkt_fc_packet;
5926 
5927 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5928 	    fpkt->pkt_resp_dma == NULL);
5929 
5930 	icmd->ipkt_nodma = nodma;
5931 
5932 	if (nodma) {
5933 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5934 		if (fpkt->pkt_cmd == NULL) {
5935 			goto fail;
5936 		}
5937 
5938 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5939 		if (fpkt->pkt_resp == NULL) {
5940 			goto fail;
5941 		}
5942 	} else {
5943 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5944 
5945 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5946 		if (rval == FC_FAILURE) {
5947 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5948 			    fpkt->pkt_resp_dma == NULL);
5949 			goto fail;
5950 		}
5951 		cmd_resp++;
5952 	}
5953 
5954 	if (fpkt->pkt_datalen != 0) {
5955 		/*
5956 		 * set up DMA handle and memory for the data in this packet
5957 		 */
5958 		if (ddi_dma_alloc_handle(pptr->port_dip,
5959 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
5960 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
5961 			goto fail;
5962 		}
5963 
5964 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
5965 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
5966 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
5967 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
5968 			goto fail;
5969 		}
5970 
5971 		/* was DMA mem size gotten < size asked for/needed ?? */
5972 		if (real_size < fpkt->pkt_datalen) {
5973 			goto fail;
5974 		}
5975 
5976 		/* bind DMA address and handle together */
5977 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
5978 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
5979 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
5980 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
5981 			goto fail;
5982 		}
5983 		bound++;
5984 
5985 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
5986 			goto fail;
5987 		}
5988 
5989 		fpkt->pkt_data_cookie_cnt = ccount;
5990 
5991 		cp = fpkt->pkt_data_cookie;
5992 		*cp = pkt_data_cookie;
5993 		cp++;
5994 
5995 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
5996 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
5997 			    &pkt_data_cookie);
5998 			*cp = pkt_data_cookie;
5999 		}
6000 
6001 	}
6002 
6003 	return (FC_SUCCESS);
6004 
6005 fail:
6006 	if (bound) {
6007 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6008 	}
6009 
6010 	if (fpkt->pkt_data_dma) {
6011 		if (fpkt->pkt_data) {
6012 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6013 		}
6014 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6015 	}
6016 
6017 	if (nodma) {
6018 		if (fpkt->pkt_cmd) {
6019 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6020 		}
6021 		if (fpkt->pkt_resp) {
6022 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6023 		}
6024 	} else {
6025 		if (cmd_resp) {
6026 			fcp_free_cmd_resp(pptr, fpkt);
6027 		}
6028 	}
6029 
6030 	return (FC_NOMEM);
6031 }
6032 
6033 
6034 static void
6035 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6036 {
6037 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6038 
6039 	if (fpkt->pkt_data_dma) {
6040 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6041 		if (fpkt->pkt_data) {
6042 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6043 		}
6044 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6045 	}
6046 
6047 	if (icmd->ipkt_nodma) {
6048 		if (fpkt->pkt_cmd) {
6049 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6050 		}
6051 		if (fpkt->pkt_resp) {
6052 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6053 		}
6054 	} else {
6055 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6056 
6057 		fcp_free_cmd_resp(pptr, fpkt);
6058 	}
6059 }
6060 
6061 /*
6062  *     Function: fcp_lookup_target
6063  *
6064  *  Description: Finds a target given a WWN.
6065  *
6066  *     Argument: *pptr	FCP port.
6067  *		 *wwn	World Wide Name of the device to look for.
6068  *
6069  * Return Value: NULL		No target found
6070  *		 Not NULL	Target structure
6071  *
6072  *	Context: Interrupt context.
6073  *		 The mutex pptr->port_mutex must be owned.
6074  */
6075 /* ARGSUSED */
6076 static struct fcp_tgt *
6077 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6078 {
6079 	int			hash;
6080 	struct fcp_tgt	*ptgt;
6081 
6082 	ASSERT(mutex_owned(&pptr->port_mutex));
6083 
6084 	hash = FCP_HASH(wwn);
6085 
6086 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6087 	    ptgt = ptgt->tgt_next) {
6088 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6089 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6090 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6091 			break;
6092 		}
6093 	}
6094 
6095 	return (ptgt);
6096 }
6097 
6098 
6099 /*
6100  * Find target structure given a port identifier
6101  */
6102 static struct fcp_tgt *
6103 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6104 {
6105 	fc_portid_t		port_id;
6106 	la_wwn_t		pwwn;
6107 	struct fcp_tgt	*ptgt = NULL;
6108 
6109 	port_id.priv_lilp_posit = 0;
6110 	port_id.port_id = d_id;
6111 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6112 	    &pwwn) == FC_SUCCESS) {
6113 		mutex_enter(&pptr->port_mutex);
6114 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6115 		mutex_exit(&pptr->port_mutex);
6116 	}
6117 
6118 	return (ptgt);
6119 }
6120 
6121 
6122 /*
6123  * the packet completion callback routine for info cmd pkts
6124  *
6125  * this means fpkt pts to a response to either a PLOGI or a PRLI
6126  *
6127  * if there is an error an attempt is made to call a routine to resend
6128  * the command that failed
6129  */
6130 static void
6131 fcp_icmd_callback(fc_packet_t *fpkt)
6132 {
6133 	struct fcp_ipkt	*icmd;
6134 	struct fcp_port	*pptr;
6135 	struct fcp_tgt	*ptgt;
6136 	struct la_els_prli	*prli;
6137 	struct la_els_prli	prli_s;
6138 	struct fcp_prli		*fprli;
6139 	struct fcp_lun	*plun;
6140 	int		free_pkt = 1;
6141 	int		rval;
6142 	ls_code_t	resp;
6143 	uchar_t		prli_acc = 0;
6144 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6145 	int		lun0_newalloc;
6146 
6147 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6148 
6149 	/* get ptrs to the port and target structs for the cmd */
6150 	pptr = icmd->ipkt_port;
6151 	ptgt = icmd->ipkt_tgt;
6152 
6153 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6154 
6155 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6156 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6157 		    sizeof (prli_s));
6158 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6159 	}
6160 
6161 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6162 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6163 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6164 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6165 	    ptgt->tgt_d_id);
6166 
6167 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6168 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6169 
6170 		mutex_enter(&ptgt->tgt_mutex);
6171 		if (ptgt->tgt_pd_handle == NULL) {
6172 			/*
6173 			 * in a fabric environment the port device handles
6174 			 * get created only after successful LOGIN into the
6175 			 * transport, so the transport makes this port
6176 			 * device (pd) handle available in this packet, so
6177 			 * save it now
6178 			 */
6179 			ASSERT(fpkt->pkt_pd != NULL);
6180 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6181 		}
6182 		mutex_exit(&ptgt->tgt_mutex);
6183 
6184 		/* which ELS cmd is this response for ?? */
6185 		switch (icmd->ipkt_opcode) {
6186 		case LA_ELS_PLOGI:
6187 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6188 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6189 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6190 			    ptgt->tgt_d_id,
6191 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6192 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6193 
6194 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6195 			    FCP_TGT_TRACE_15);
6196 
6197 			/* Note that we are not allocating a new icmd */
6198 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6199 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6200 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6201 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6202 				    FCP_TGT_TRACE_16);
6203 				goto fail;
6204 			}
6205 			break;
6206 
6207 		case LA_ELS_PRLI:
6208 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6209 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6210 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6211 
6212 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6213 			    FCP_TGT_TRACE_17);
6214 
6215 			prli = &prli_s;
6216 
6217 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6218 			    sizeof (prli_s));
6219 
6220 			fprli = (struct fcp_prli *)prli->service_params;
6221 
6222 			mutex_enter(&ptgt->tgt_mutex);
6223 			ptgt->tgt_icap = fprli->initiator_fn;
6224 			ptgt->tgt_tcap = fprli->target_fn;
6225 			mutex_exit(&ptgt->tgt_mutex);
6226 
6227 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6228 				/*
6229 				 * this FCP device does not support target mode
6230 				 */
6231 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6232 				    FCP_TGT_TRACE_18);
6233 				goto fail;
6234 			}
6235 			if (fprli->retry == 1) {
6236 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6237 				    &ptgt->tgt_port_wwn);
6238 			}
6239 
6240 			/* target is no longer offline */
6241 			mutex_enter(&pptr->port_mutex);
6242 			mutex_enter(&ptgt->tgt_mutex);
6243 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6244 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6245 				    FCP_TGT_MARK);
6246 			} else {
6247 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6248 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6249 				    "fcp_icmd_callback,1: state change "
6250 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6251 				mutex_exit(&ptgt->tgt_mutex);
6252 				mutex_exit(&pptr->port_mutex);
6253 				goto fail;
6254 			}
6255 			mutex_exit(&ptgt->tgt_mutex);
6256 			mutex_exit(&pptr->port_mutex);
6257 
6258 			/*
6259 			 * lun 0 should always respond to inquiry, so
6260 			 * get the LUN struct for LUN 0
6261 			 *
6262 			 * Currently we deal with first level of addressing.
6263 			 * If / when we start supporting 0x device types
6264 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6265 			 * this logic will need revisiting.
6266 			 */
6267 			lun0_newalloc = 0;
6268 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6269 				/*
6270 				 * no LUN struct for LUN 0 yet exists,
6271 				 * so create one
6272 				 */
6273 				plun = fcp_alloc_lun(ptgt);
6274 				if (plun == NULL) {
6275 					fcp_log(CE_WARN, pptr->port_dip,
6276 					    "!Failed to allocate lun 0 for"
6277 					    " D_ID=%x", ptgt->tgt_d_id);
6278 					goto fail;
6279 				}
6280 				lun0_newalloc = 1;
6281 			}
6282 
6283 			/* fill in LUN info */
6284 			mutex_enter(&ptgt->tgt_mutex);
6285 			/*
6286 			 * consider lun 0 as device not connected if it is
6287 			 * offlined or newly allocated
6288 			 */
6289 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6290 			    lun0_newalloc) {
6291 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6292 			}
6293 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6294 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6295 			ptgt->tgt_lun_cnt = 1;
6296 			ptgt->tgt_report_lun_cnt = 0;
6297 			mutex_exit(&ptgt->tgt_mutex);
6298 
6299 			/* Retrieve the rscn count (if a valid one exists) */
6300 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6301 				rscn_count = ((fc_ulp_rscn_info_t *)
6302 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6303 				    ->ulp_rscn_count;
6304 			} else {
6305 				rscn_count = FC_INVALID_RSCN_COUNT;
6306 			}
6307 
6308 			/* send Report Lun request to target */
6309 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6310 			    sizeof (struct fcp_reportlun_resp),
6311 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6312 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6313 				mutex_enter(&pptr->port_mutex);
6314 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6315 					fcp_log(CE_WARN, pptr->port_dip,
6316 					    "!Failed to send REPORT LUN to"
6317 					    "  D_ID=%x", ptgt->tgt_d_id);
6318 				} else {
6319 					FCP_TRACE(fcp_logq,
6320 					    pptr->port_instbuf, fcp_trace,
6321 					    FCP_BUF_LEVEL_5, 0,
6322 					    "fcp_icmd_callback,2:state change"
6323 					    " occured for D_ID=0x%x",
6324 					    ptgt->tgt_d_id);
6325 				}
6326 				mutex_exit(&pptr->port_mutex);
6327 
6328 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6329 				    FCP_TGT_TRACE_19);
6330 
6331 				goto fail;
6332 			} else {
6333 				free_pkt = 0;
6334 				fcp_icmd_free(pptr, icmd);
6335 			}
6336 			break;
6337 
6338 		default:
6339 			fcp_log(CE_WARN, pptr->port_dip,
6340 			    "!fcp_icmd_callback Invalid opcode");
6341 			goto fail;
6342 		}
6343 
6344 		return;
6345 	}
6346 
6347 
6348 	/*
6349 	 * Other PLOGI failures are not retried as the
6350 	 * transport does it already
6351 	 */
6352 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6353 		if (fcp_is_retryable(icmd) &&
6354 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6355 
6356 			if (FCP_MUST_RETRY(fpkt)) {
6357 				fcp_queue_ipkt(pptr, fpkt);
6358 				return;
6359 			}
6360 
6361 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6362 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6363 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6364 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6365 			    fpkt->pkt_reason);
6366 
6367 			/*
6368 			 * Retry by recalling the routine that
6369 			 * originally queued this packet
6370 			 */
6371 			mutex_enter(&pptr->port_mutex);
6372 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6373 				caddr_t msg;
6374 
6375 				mutex_exit(&pptr->port_mutex);
6376 
6377 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6378 
6379 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6380 					fpkt->pkt_timeout +=
6381 					    FCP_TIMEOUT_DELTA;
6382 				}
6383 
6384 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6385 				    fpkt);
6386 				if (rval == FC_SUCCESS) {
6387 					return;
6388 				}
6389 
6390 				if (rval == FC_STATEC_BUSY ||
6391 				    rval == FC_OFFLINE) {
6392 					fcp_queue_ipkt(pptr, fpkt);
6393 					return;
6394 				}
6395 				(void) fc_ulp_error(rval, &msg);
6396 
6397 				fcp_log(CE_NOTE, pptr->port_dip,
6398 				    "!ELS 0x%x failed to d_id=0x%x;"
6399 				    " %s", icmd->ipkt_opcode,
6400 				    ptgt->tgt_d_id, msg);
6401 			} else {
6402 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6403 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6404 				    "fcp_icmd_callback,3: state change "
6405 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6406 				mutex_exit(&pptr->port_mutex);
6407 			}
6408 		}
6409 	} else {
6410 		if (fcp_is_retryable(icmd) &&
6411 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6412 			if (FCP_MUST_RETRY(fpkt)) {
6413 				fcp_queue_ipkt(pptr, fpkt);
6414 				return;
6415 			}
6416 		}
6417 		mutex_enter(&pptr->port_mutex);
6418 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6419 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6420 			mutex_exit(&pptr->port_mutex);
6421 			fcp_print_error(fpkt);
6422 		} else {
6423 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6424 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6425 			    "fcp_icmd_callback,4: state change occured"
6426 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6427 			mutex_exit(&pptr->port_mutex);
6428 		}
6429 	}
6430 
6431 fail:
6432 	if (free_pkt) {
6433 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6434 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6435 		fcp_icmd_free(pptr, icmd);
6436 	}
6437 }
6438 
6439 
6440 /*
6441  * called internally to send an info cmd using the transport
6442  *
6443  * sends either an INQ or a REPORT_LUN
6444  *
6445  * when the packet is completed fcp_scsi_callback is called
6446  */
6447 static int
6448 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6449     int lcount, int tcount, int cause, uint32_t rscn_count)
6450 {
6451 	int			nodma;
6452 	struct fcp_ipkt		*icmd;
6453 	struct fcp_tgt		*ptgt;
6454 	struct fcp_port		*pptr;
6455 	fc_frame_hdr_t		*hp;
6456 	fc_packet_t		*fpkt;
6457 	struct fcp_cmd		fcp_cmd;
6458 	struct fcp_cmd		*fcmd;
6459 	union scsi_cdb		*scsi_cdb;
6460 
6461 	ASSERT(plun != NULL);
6462 
6463 	ptgt = plun->lun_tgt;
6464 	ASSERT(ptgt != NULL);
6465 
6466 	pptr = ptgt->tgt_port;
6467 	ASSERT(pptr != NULL);
6468 
6469 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6470 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6471 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6472 
6473 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6474 
6475 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6476 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6477 	    rscn_count);
6478 
6479 	if (icmd == NULL) {
6480 		return (DDI_FAILURE);
6481 	}
6482 
6483 	fpkt = icmd->ipkt_fpkt;
6484 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6485 	icmd->ipkt_retries = 0;
6486 	icmd->ipkt_opcode = opcode;
6487 	icmd->ipkt_lun = plun;
6488 
6489 	if (nodma) {
6490 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6491 	} else {
6492 		fcmd = &fcp_cmd;
6493 	}
6494 	bzero(fcmd, sizeof (struct fcp_cmd));
6495 
6496 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6497 
6498 	hp = &fpkt->pkt_cmd_fhdr;
6499 
6500 	hp->s_id = pptr->port_id;
6501 	hp->d_id = ptgt->tgt_d_id;
6502 	hp->r_ctl = R_CTL_COMMAND;
6503 	hp->type = FC_TYPE_SCSI_FCP;
6504 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6505 	hp->rsvd = 0;
6506 	hp->seq_id = 0;
6507 	hp->seq_cnt = 0;
6508 	hp->ox_id = 0xffff;
6509 	hp->rx_id = 0xffff;
6510 	hp->ro = 0;
6511 
6512 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6513 
6514 	/*
6515 	 * Request SCSI target for expedited processing
6516 	 */
6517 
6518 	/*
6519 	 * Set up for untagged queuing because we do not
6520 	 * know if the fibre device supports queuing.
6521 	 */
6522 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6523 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6524 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6525 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6526 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6527 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6528 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6529 
6530 	switch (opcode) {
6531 	case SCMD_INQUIRY_PAGE83:
6532 		/*
6533 		 * Prepare to get the Inquiry VPD page 83 information
6534 		 */
6535 		fcmd->fcp_cntl.cntl_read_data = 1;
6536 		fcmd->fcp_cntl.cntl_write_data = 0;
6537 		fcmd->fcp_data_len = alloc_len;
6538 
6539 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6540 		fpkt->pkt_comp = fcp_scsi_callback;
6541 
6542 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6543 		scsi_cdb->g0_addr2 = 0x01;
6544 		scsi_cdb->g0_addr1 = 0x83;
6545 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6546 		break;
6547 
6548 	case SCMD_INQUIRY:
6549 		fcmd->fcp_cntl.cntl_read_data = 1;
6550 		fcmd->fcp_cntl.cntl_write_data = 0;
6551 		fcmd->fcp_data_len = alloc_len;
6552 
6553 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6554 		fpkt->pkt_comp = fcp_scsi_callback;
6555 
6556 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6557 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6558 		break;
6559 
6560 	case SCMD_REPORT_LUN: {
6561 		fc_portid_t	d_id;
6562 		opaque_t	fca_dev;
6563 
6564 		ASSERT(alloc_len >= 16);
6565 
6566 		d_id.priv_lilp_posit = 0;
6567 		d_id.port_id = ptgt->tgt_d_id;
6568 
6569 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6570 
6571 		mutex_enter(&ptgt->tgt_mutex);
6572 		ptgt->tgt_fca_dev = fca_dev;
6573 		mutex_exit(&ptgt->tgt_mutex);
6574 
6575 		fcmd->fcp_cntl.cntl_read_data = 1;
6576 		fcmd->fcp_cntl.cntl_write_data = 0;
6577 		fcmd->fcp_data_len = alloc_len;
6578 
6579 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6580 		fpkt->pkt_comp = fcp_scsi_callback;
6581 
6582 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6583 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6584 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6585 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6586 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6587 		break;
6588 	}
6589 
6590 	default:
6591 		fcp_log(CE_WARN, pptr->port_dip,
6592 		    "!fcp_send_scsi Invalid opcode");
6593 		break;
6594 	}
6595 
6596 	if (!nodma) {
6597 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6598 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6599 	}
6600 
6601 	mutex_enter(&pptr->port_mutex);
6602 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6603 
6604 		mutex_exit(&pptr->port_mutex);
6605 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6606 		    FC_SUCCESS) {
6607 			fcp_icmd_free(pptr, icmd);
6608 			return (DDI_FAILURE);
6609 		}
6610 		return (DDI_SUCCESS);
6611 	} else {
6612 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6613 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6614 		    "fcp_send_scsi,1: state change occured"
6615 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6616 		mutex_exit(&pptr->port_mutex);
6617 		fcp_icmd_free(pptr, icmd);
6618 		return (DDI_FAILURE);
6619 	}
6620 }
6621 
6622 
6623 /*
6624  * called by fcp_scsi_callback to check to handle the case where
6625  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6626  */
6627 static int
6628 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6629 {
6630 	uchar_t				rqlen;
6631 	int				rval = DDI_FAILURE;
6632 	struct scsi_extended_sense	sense_info, *sense;
6633 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6634 	    fpkt->pkt_ulp_private;
6635 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6636 	struct fcp_port		*pptr = ptgt->tgt_port;
6637 
6638 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6639 
6640 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6641 		/*
6642 		 * SCSI-II Reserve Release support. Some older FC drives return
6643 		 * Reservation conflict for Report Luns command.
6644 		 */
6645 		if (icmd->ipkt_nodma) {
6646 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6647 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6648 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6649 		} else {
6650 			fcp_rsp_t	new_resp;
6651 
6652 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6653 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6654 
6655 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6656 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6657 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6658 
6659 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6660 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6661 		}
6662 
6663 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6664 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6665 
6666 		return (DDI_SUCCESS);
6667 	}
6668 
6669 	sense = &sense_info;
6670 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6671 		/* no need to continue if sense length is not set */
6672 		return (rval);
6673 	}
6674 
6675 	/* casting 64-bit integer to 8-bit */
6676 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6677 	    sizeof (struct scsi_extended_sense));
6678 
6679 	if (rqlen < 14) {
6680 		/* no need to continue if request length isn't long enough */
6681 		return (rval);
6682 	}
6683 
6684 	if (icmd->ipkt_nodma) {
6685 		/*
6686 		 * We can safely use fcp_response_len here since the
6687 		 * only path that calls fcp_check_reportlun,
6688 		 * fcp_scsi_callback, has already called
6689 		 * fcp_validate_fcp_response.
6690 		 */
6691 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6692 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6693 	} else {
6694 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6695 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6696 		    sizeof (struct scsi_extended_sense));
6697 	}
6698 
6699 	if (!FCP_SENSE_NO_LUN(sense)) {
6700 		mutex_enter(&ptgt->tgt_mutex);
6701 		/* clear the flag if any */
6702 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6703 		mutex_exit(&ptgt->tgt_mutex);
6704 	}
6705 
6706 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6707 	    (sense->es_add_code == 0x20)) {
6708 		if (icmd->ipkt_nodma) {
6709 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6710 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6711 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6712 		} else {
6713 			fcp_rsp_t	new_resp;
6714 
6715 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6716 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6717 
6718 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6719 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6720 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 
6722 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6723 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6724 		}
6725 
6726 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6727 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6728 
6729 		return (DDI_SUCCESS);
6730 	}
6731 
6732 	/*
6733 	 * This is for the STK library which returns a check condition,
6734 	 * to indicate device is not ready, manual assistance needed.
6735 	 * This is to a report lun command when the door is open.
6736 	 */
6737 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6738 		if (icmd->ipkt_nodma) {
6739 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6740 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6741 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6742 		} else {
6743 			fcp_rsp_t	new_resp;
6744 
6745 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6746 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6747 
6748 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6749 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6750 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6751 
6752 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6753 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6754 		}
6755 
6756 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6757 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6758 
6759 		return (DDI_SUCCESS);
6760 	}
6761 
6762 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6763 	    (FCP_SENSE_NO_LUN(sense))) {
6764 		mutex_enter(&ptgt->tgt_mutex);
6765 		if ((FCP_SENSE_NO_LUN(sense)) &&
6766 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6767 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6768 			mutex_exit(&ptgt->tgt_mutex);
6769 			/*
6770 			 * reconfig was triggred by ILLEGAL REQUEST but
6771 			 * got ILLEGAL REQUEST again
6772 			 */
6773 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6774 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6775 			    "!FCP: Unable to obtain Report Lun data"
6776 			    " target=%x", ptgt->tgt_d_id);
6777 		} else {
6778 			if (ptgt->tgt_tid == NULL) {
6779 				timeout_id_t	tid;
6780 				/*
6781 				 * REPORT LUN data has changed.	 Kick off
6782 				 * rediscovery
6783 				 */
6784 				tid = timeout(fcp_reconfigure_luns,
6785 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6786 
6787 				ptgt->tgt_tid = tid;
6788 				ptgt->tgt_state |= FCP_TGT_BUSY;
6789 			}
6790 			if (FCP_SENSE_NO_LUN(sense)) {
6791 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6792 			}
6793 			mutex_exit(&ptgt->tgt_mutex);
6794 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6795 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6796 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6797 				    "!FCP:Report Lun Has Changed"
6798 				    " target=%x", ptgt->tgt_d_id);
6799 			} else if (FCP_SENSE_NO_LUN(sense)) {
6800 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6801 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6802 				    "!FCP:LU Not Supported"
6803 				    " target=%x", ptgt->tgt_d_id);
6804 			}
6805 		}
6806 		rval = DDI_SUCCESS;
6807 	}
6808 
6809 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6810 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6811 	    "D_ID=%x, sense=%x, status=%x",
6812 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6813 	    rsp->fcp_u.fcp_status.scsi_status);
6814 
6815 	return (rval);
6816 }
6817 
6818 /*
6819  *     Function: fcp_scsi_callback
6820  *
6821  *  Description: This is the callback routine set by fcp_send_scsi() after
6822  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6823  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6824  *		 INQUIRY_PAGE83.
6825  *
6826  *     Argument: *fpkt	 FC packet used to convey the command
6827  *
6828  * Return Value: None
6829  */
6830 static void
6831 fcp_scsi_callback(fc_packet_t *fpkt)
6832 {
6833 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6834 	    fpkt->pkt_ulp_private;
6835 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6836 	struct fcp_port	*pptr;
6837 	struct fcp_tgt	*ptgt;
6838 	struct fcp_lun	*plun;
6839 	struct fcp_rsp		response, *rsp;
6840 
6841 	if (icmd->ipkt_nodma) {
6842 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6843 	} else {
6844 		rsp = &response;
6845 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6846 		    sizeof (struct fcp_rsp));
6847 	}
6848 
6849 	ptgt = icmd->ipkt_tgt;
6850 	pptr = ptgt->tgt_port;
6851 	plun = icmd->ipkt_lun;
6852 
6853 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6854 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6855 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6856 	    "status=%x, lun num=%x",
6857 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6858 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6859 
6860 	/*
6861 	 * Pre-init LUN GUID with NWWN if it is not a device that
6862 	 * supports multiple luns and we know it's not page83
6863 	 * compliant.  Although using a NWWN is not lun unique,
6864 	 * we will be fine since there is only one lun behind the taget
6865 	 * in this case.
6866 	 */
6867 	if ((plun->lun_guid_size == 0) &&
6868 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6869 	    (fcp_symmetric_device_probe(plun) == 0)) {
6870 
6871 		char ascii_wwn[FC_WWN_SIZE*2+1];
6872 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6873 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6874 	}
6875 
6876 	/*
6877 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6878 	 * when thay have more data than what is asked in CDB. An overrun
6879 	 * is really when FCP_DL is smaller than the data length in CDB.
6880 	 * In the case here we know that REPORT LUN command we formed within
6881 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6882 	 * behavior. In reality this is FC_SUCCESS.
6883 	 */
6884 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6885 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6886 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6887 		fpkt->pkt_state = FC_PKT_SUCCESS;
6888 	}
6889 
6890 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6891 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6892 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6893 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6894 		    ptgt->tgt_d_id);
6895 
6896 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6897 			/*
6898 			 * Inquiry VPD page command on A5K SES devices would
6899 			 * result in data CRC errors.
6900 			 */
6901 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6902 				(void) fcp_handle_page83(fpkt, icmd, 1);
6903 				return;
6904 			}
6905 		}
6906 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6907 		    FCP_MUST_RETRY(fpkt)) {
6908 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6909 			fcp_retry_scsi_cmd(fpkt);
6910 			return;
6911 		}
6912 
6913 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6914 		    FCP_TGT_TRACE_20);
6915 
6916 		mutex_enter(&pptr->port_mutex);
6917 		mutex_enter(&ptgt->tgt_mutex);
6918 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6919 			mutex_exit(&ptgt->tgt_mutex);
6920 			mutex_exit(&pptr->port_mutex);
6921 			fcp_print_error(fpkt);
6922 		} else {
6923 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6924 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6925 			    "fcp_scsi_callback,1: state change occured"
6926 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6927 			mutex_exit(&ptgt->tgt_mutex);
6928 			mutex_exit(&pptr->port_mutex);
6929 		}
6930 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6931 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6932 		fcp_icmd_free(pptr, icmd);
6933 		return;
6934 	}
6935 
6936 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
6937 
6938 	mutex_enter(&pptr->port_mutex);
6939 	mutex_enter(&ptgt->tgt_mutex);
6940 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6941 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6942 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6943 		    "fcp_scsi_callback,2: state change occured"
6944 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6945 		mutex_exit(&ptgt->tgt_mutex);
6946 		mutex_exit(&pptr->port_mutex);
6947 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6948 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6949 		fcp_icmd_free(pptr, icmd);
6950 		return;
6951 	}
6952 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
6953 
6954 	mutex_exit(&ptgt->tgt_mutex);
6955 	mutex_exit(&pptr->port_mutex);
6956 
6957 	if (icmd->ipkt_nodma) {
6958 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
6959 		    sizeof (struct fcp_rsp));
6960 	} else {
6961 		bep = &fcp_rsp_err;
6962 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
6963 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
6964 	}
6965 
6966 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
6967 		fcp_retry_scsi_cmd(fpkt);
6968 		return;
6969 	}
6970 
6971 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
6972 	    FCP_NO_FAILURE) {
6973 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6974 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6975 		    "rsp_code=0x%x, rsp_len_set=0x%x",
6976 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
6977 		fcp_retry_scsi_cmd(fpkt);
6978 		return;
6979 	}
6980 
6981 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
6982 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
6983 		fcp_queue_ipkt(pptr, fpkt);
6984 		return;
6985 	}
6986 
6987 	/*
6988 	 * Devices that do not support INQUIRY_PAGE83, return check condition
6989 	 * with illegal request as per SCSI spec.
6990 	 * Crossbridge is one such device and Daktari's SES node is another.
6991 	 * We want to ideally enumerate these devices as a non-mpxio devices.
6992 	 * SES nodes (Daktari only currently) are an exception to this.
6993 	 */
6994 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6995 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
6996 
6997 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6998 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
6999 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7000 		    "check condition. May enumerate as non-mpxio device",
7001 		    ptgt->tgt_d_id, plun->lun_type);
7002 
7003 		/*
7004 		 * If we let Daktari's SES be enumerated as a non-mpxio
7005 		 * device, there will be a discrepency in that the other
7006 		 * internal FC disks will get enumerated as mpxio devices.
7007 		 * Applications like luxadm expect this to be consistent.
7008 		 *
7009 		 * So, we put in a hack here to check if this is an SES device
7010 		 * and handle it here.
7011 		 */
7012 		if (plun->lun_type == DTYPE_ESI) {
7013 			/*
7014 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7015 			 * at this stage, we fake a failure here so that
7016 			 * fcp_handle_page83 will create a device path using
7017 			 * the WWN instead of the GUID which is not there anyway
7018 			 */
7019 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7020 			(void) fcp_handle_page83(fpkt, icmd, 1);
7021 			return;
7022 		}
7023 
7024 		mutex_enter(&ptgt->tgt_mutex);
7025 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7026 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7027 		mutex_exit(&ptgt->tgt_mutex);
7028 
7029 		(void) fcp_call_finish_init(pptr, ptgt,
7030 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7031 		    icmd->ipkt_cause);
7032 		fcp_icmd_free(pptr, icmd);
7033 		return;
7034 	}
7035 
7036 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7037 		int rval = DDI_FAILURE;
7038 
7039 		/*
7040 		 * handle cases where report lun isn't supported
7041 		 * by faking up our own REPORT_LUN response or
7042 		 * UNIT ATTENTION
7043 		 */
7044 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7045 			rval = fcp_check_reportlun(rsp, fpkt);
7046 
7047 			/*
7048 			 * fcp_check_reportlun might have modified the
7049 			 * FCP response. Copy it in again to get an updated
7050 			 * FCP response
7051 			 */
7052 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7053 				rsp = &response;
7054 
7055 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7056 				    fpkt->pkt_resp_acc,
7057 				    sizeof (struct fcp_rsp));
7058 			}
7059 		}
7060 
7061 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7062 			if (rval == DDI_SUCCESS) {
7063 				(void) fcp_call_finish_init(pptr, ptgt,
7064 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7065 				    icmd->ipkt_cause);
7066 				fcp_icmd_free(pptr, icmd);
7067 			} else {
7068 				fcp_retry_scsi_cmd(fpkt);
7069 			}
7070 
7071 			return;
7072 		}
7073 	} else {
7074 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7075 			mutex_enter(&ptgt->tgt_mutex);
7076 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7077 			mutex_exit(&ptgt->tgt_mutex);
7078 		}
7079 	}
7080 
7081 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7082 
7083 	(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, DDI_DMA_SYNC_FORCPU);
7084 
7085 	switch (icmd->ipkt_opcode) {
7086 	case SCMD_INQUIRY:
7087 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7088 		fcp_handle_inquiry(fpkt, icmd);
7089 		break;
7090 
7091 	case SCMD_REPORT_LUN:
7092 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7093 		    FCP_TGT_TRACE_22);
7094 		fcp_handle_reportlun(fpkt, icmd);
7095 		break;
7096 
7097 	case SCMD_INQUIRY_PAGE83:
7098 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7099 		(void) fcp_handle_page83(fpkt, icmd, 0);
7100 		break;
7101 
7102 	default:
7103 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7104 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7105 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7106 		fcp_icmd_free(pptr, icmd);
7107 		break;
7108 	}
7109 }
7110 
7111 
7112 static void
7113 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7114 {
7115 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7116 	    fpkt->pkt_ulp_private;
7117 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7118 	struct fcp_port	*pptr = ptgt->tgt_port;
7119 
7120 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7121 	    fcp_is_retryable(icmd)) {
7122 		mutex_enter(&pptr->port_mutex);
7123 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7124 			mutex_exit(&pptr->port_mutex);
7125 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7126 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7127 			    "Retrying %s to %x; state=%x, reason=%x",
7128 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7129 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7130 			    fpkt->pkt_state, fpkt->pkt_reason);
7131 
7132 			fcp_queue_ipkt(pptr, fpkt);
7133 		} else {
7134 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7135 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7136 			    "fcp_retry_scsi_cmd,1: state change occured"
7137 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7138 			mutex_exit(&pptr->port_mutex);
7139 			(void) fcp_call_finish_init(pptr, ptgt,
7140 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7141 			    icmd->ipkt_cause);
7142 			fcp_icmd_free(pptr, icmd);
7143 		}
7144 	} else {
7145 		fcp_print_error(fpkt);
7146 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7147 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7148 		fcp_icmd_free(pptr, icmd);
7149 	}
7150 }
7151 
7152 /*
7153  *     Function: fcp_handle_page83
7154  *
7155  *  Description: Treats the response to INQUIRY_PAGE83.
7156  *
7157  *     Argument: *fpkt	FC packet used to convey the command.
7158  *		 *icmd	Original fcp_ipkt structure.
7159  *		 ignore_page83_data
7160  *			if it's 1, that means it's a special devices's
7161  *			page83 response, it should be enumerated under mpxio
7162  *
7163  * Return Value: None
7164  */
7165 static void
7166 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7167     int ignore_page83_data)
7168 {
7169 	struct fcp_port	*pptr;
7170 	struct fcp_lun	*plun;
7171 	struct fcp_tgt	*ptgt;
7172 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7173 	int			fail = 0;
7174 	ddi_devid_t		devid;
7175 	char			*guid = NULL;
7176 	int			ret;
7177 
7178 	ASSERT(icmd != NULL && fpkt != NULL);
7179 
7180 	pptr = icmd->ipkt_port;
7181 	ptgt = icmd->ipkt_tgt;
7182 	plun = icmd->ipkt_lun;
7183 
7184 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7185 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7186 
7187 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7188 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7189 
7190 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7191 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7192 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7193 		    "dtype=0x%x, lun num=%x",
7194 		    pptr->port_instance, ptgt->tgt_d_id,
7195 		    dev_id_page[0], plun->lun_num);
7196 
7197 		ret = ddi_devid_scsi_encode(
7198 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7199 		    NULL,		/* driver name */
7200 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7201 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7202 		    NULL,		/* page 80 data */
7203 		    0,		/* page 80 len */
7204 		    dev_id_page,	/* page 83 data */
7205 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7206 		    &devid);
7207 
7208 		if (ret == DDI_SUCCESS) {
7209 
7210 			guid = ddi_devid_to_guid(devid);
7211 
7212 			if (guid) {
7213 				/*
7214 				 * Check our current guid.  If it's non null
7215 				 * and it has changed, we need to copy it into
7216 				 * lun_old_guid since we might still need it.
7217 				 */
7218 				if (plun->lun_guid &&
7219 				    strcmp(guid, plun->lun_guid)) {
7220 					unsigned int len;
7221 
7222 					/*
7223 					 * If the guid of the LUN changes,
7224 					 * reconfiguration should be triggered
7225 					 * to reflect the changes.
7226 					 * i.e. we should offline the LUN with
7227 					 * the old guid, and online the LUN with
7228 					 * the new guid.
7229 					 */
7230 					plun->lun_state |= FCP_LUN_CHANGED;
7231 
7232 					if (plun->lun_old_guid) {
7233 						kmem_free(plun->lun_old_guid,
7234 						    plun->lun_old_guid_size);
7235 					}
7236 
7237 					len = plun->lun_guid_size;
7238 					plun->lun_old_guid_size = len;
7239 
7240 					plun->lun_old_guid = kmem_zalloc(len,
7241 					    KM_NOSLEEP);
7242 
7243 					if (plun->lun_old_guid) {
7244 						/*
7245 						 * The alloc was successful then
7246 						 * let's do the copy.
7247 						 */
7248 						bcopy(plun->lun_guid,
7249 						    plun->lun_old_guid, len);
7250 					} else {
7251 						fail = 1;
7252 						plun->lun_old_guid_size = 0;
7253 					}
7254 				}
7255 				if (!fail) {
7256 					if (fcp_copy_guid_2_lun_block(
7257 					    plun, guid)) {
7258 						fail = 1;
7259 					}
7260 				}
7261 				ddi_devid_free_guid(guid);
7262 
7263 			} else {
7264 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7265 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7266 				    "fcp_handle_page83: unable to create "
7267 				    "GUID");
7268 
7269 				/* couldn't create good guid from devid */
7270 				fail = 1;
7271 			}
7272 			ddi_devid_free(devid);
7273 
7274 		} else if (ret == DDI_NOT_WELL_FORMED) {
7275 			/* NULL filled data for page 83 */
7276 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7277 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7278 			    "fcp_handle_page83: retry GUID");
7279 
7280 			icmd->ipkt_retries = 0;
7281 			fcp_retry_scsi_cmd(fpkt);
7282 			return;
7283 		} else {
7284 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7285 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7286 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7287 			    ret);
7288 			/*
7289 			 * Since the page83 validation
7290 			 * introduced late, we are being
7291 			 * tolerant to the existing devices
7292 			 * that already found to be working
7293 			 * under mpxio, like A5200's SES device,
7294 			 * its page83 response will not be standard-compliant,
7295 			 * but we still want it to be enumerated under mpxio.
7296 			 */
7297 			if (fcp_symmetric_device_probe(plun) != 0) {
7298 				fail = 1;
7299 			}
7300 		}
7301 
7302 	} else {
7303 		/* bad packet state */
7304 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7305 
7306 		/*
7307 		 * For some special devices (A5K SES and Daktari's SES devices),
7308 		 * they should be enumerated under mpxio
7309 		 * or "luxadm dis" will fail
7310 		 */
7311 		if (ignore_page83_data) {
7312 			fail = 0;
7313 		} else {
7314 			fail = 1;
7315 		}
7316 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7317 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7318 		    "!Devid page cmd failed. "
7319 		    "fpkt_state: %x fpkt_reason: %x",
7320 		    "ignore_page83: %d",
7321 		    fpkt->pkt_state, fpkt->pkt_reason,
7322 		    ignore_page83_data);
7323 	}
7324 
7325 	mutex_enter(&pptr->port_mutex);
7326 	mutex_enter(&plun->lun_mutex);
7327 	/*
7328 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7329 	 * mismatch between lun_cip and lun_mpxio.
7330 	 */
7331 	if (plun->lun_cip == NULL) {
7332 		/*
7333 		 * If we don't have a guid for this lun it's because we were
7334 		 * unable to glean one from the page 83 response.  Set the
7335 		 * control flag to 0 here to make sure that we don't attempt to
7336 		 * enumerate it under mpxio.
7337 		 */
7338 		if (fail || pptr->port_mpxio == 0) {
7339 			plun->lun_mpxio = 0;
7340 		} else {
7341 			plun->lun_mpxio = 1;
7342 		}
7343 	}
7344 	mutex_exit(&plun->lun_mutex);
7345 	mutex_exit(&pptr->port_mutex);
7346 
7347 	mutex_enter(&ptgt->tgt_mutex);
7348 	plun->lun_state &=
7349 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7350 	mutex_exit(&ptgt->tgt_mutex);
7351 
7352 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7353 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7354 
7355 	fcp_icmd_free(pptr, icmd);
7356 }
7357 
7358 /*
7359  *     Function: fcp_handle_inquiry
7360  *
7361  *  Description: Called by fcp_scsi_callback to handle the response to an
7362  *		 INQUIRY request.
7363  *
7364  *     Argument: *fpkt	FC packet used to convey the command.
7365  *		 *icmd	Original fcp_ipkt structure.
7366  *
7367  * Return Value: None
7368  */
7369 static void
7370 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7371 {
7372 	struct fcp_port	*pptr;
7373 	struct fcp_lun	*plun;
7374 	struct fcp_tgt	*ptgt;
7375 	uchar_t		dtype;
7376 	uchar_t		pqual;
7377 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7378 
7379 	ASSERT(icmd != NULL && fpkt != NULL);
7380 
7381 	pptr = icmd->ipkt_port;
7382 	ptgt = icmd->ipkt_tgt;
7383 	plun = icmd->ipkt_lun;
7384 
7385 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7386 	    sizeof (struct scsi_inquiry));
7387 
7388 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7389 	pqual = plun->lun_inq.inq_dtype >> 5;
7390 
7391 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7392 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7393 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7394 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7395 	    plun->lun_num, dtype, pqual);
7396 
7397 	if (pqual != 0) {
7398 		/*
7399 		 * Non-zero peripheral qualifier
7400 		 */
7401 		fcp_log(CE_CONT, pptr->port_dip,
7402 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7403 		    "Device type=0x%x Peripheral qual=0x%x\n",
7404 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7405 
7406 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7407 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7408 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7409 		    "Device type=0x%x Peripheral qual=0x%x\n",
7410 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7411 
7412 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7413 
7414 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7415 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7416 		fcp_icmd_free(pptr, icmd);
7417 		return;
7418 	}
7419 
7420 	/*
7421 	 * If the device is already initialized, check the dtype
7422 	 * for a change. If it has changed then update the flags
7423 	 * so the create_luns will offline the old device and
7424 	 * create the new device. Refer to bug: 4764752
7425 	 */
7426 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7427 		plun->lun_state |= FCP_LUN_CHANGED;
7428 	}
7429 	plun->lun_type = plun->lun_inq.inq_dtype;
7430 
7431 	/*
7432 	 * This code is setting/initializing the throttling in the FCA
7433 	 * driver.
7434 	 */
7435 	mutex_enter(&pptr->port_mutex);
7436 	if (!pptr->port_notify) {
7437 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7438 			uint32_t cmd = 0;
7439 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7440 			    ((cmd & 0xFFFFFF00 >> 8) |
7441 			    FCP_SVE_THROTTLE << 8));
7442 			pptr->port_notify = 1;
7443 			mutex_exit(&pptr->port_mutex);
7444 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7445 			mutex_enter(&pptr->port_mutex);
7446 		}
7447 	}
7448 
7449 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7450 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7451 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7452 		    "fcp_handle_inquiry,1:state change occured"
7453 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7454 		mutex_exit(&pptr->port_mutex);
7455 
7456 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7457 		(void) fcp_call_finish_init(pptr, ptgt,
7458 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7459 		    icmd->ipkt_cause);
7460 		fcp_icmd_free(pptr, icmd);
7461 		return;
7462 	}
7463 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7464 	mutex_exit(&pptr->port_mutex);
7465 
7466 	/* Retrieve the rscn count (if a valid one exists) */
7467 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7468 		rscn_count = ((fc_ulp_rscn_info_t *)
7469 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7470 	} else {
7471 		rscn_count = FC_INVALID_RSCN_COUNT;
7472 	}
7473 
7474 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7475 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7476 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7477 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7478 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7479 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7480 		(void) fcp_call_finish_init(pptr, ptgt,
7481 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7482 		    icmd->ipkt_cause);
7483 	}
7484 
7485 	/*
7486 	 * Read Inquiry VPD Page 0x83 to uniquely
7487 	 * identify this logical unit.
7488 	 */
7489 	fcp_icmd_free(pptr, icmd);
7490 }
7491 
7492 /*
7493  *     Function: fcp_handle_reportlun
7494  *
7495  *  Description: Called by fcp_scsi_callback to handle the response to a
7496  *		 REPORT_LUN request.
7497  *
7498  *     Argument: *fpkt	FC packet used to convey the command.
7499  *		 *icmd	Original fcp_ipkt structure.
7500  *
7501  * Return Value: None
7502  */
7503 static void
7504 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7505 {
7506 	int				i;
7507 	int				nluns_claimed;
7508 	int				nluns_bufmax;
7509 	int				len;
7510 	uint16_t			lun_num;
7511 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7512 	struct fcp_port			*pptr;
7513 	struct fcp_tgt			*ptgt;
7514 	struct fcp_lun			*plun;
7515 	struct fcp_reportlun_resp	*report_lun;
7516 
7517 	pptr = icmd->ipkt_port;
7518 	ptgt = icmd->ipkt_tgt;
7519 	len = fpkt->pkt_datalen;
7520 
7521 	if ((len < FCP_LUN_HEADER) ||
7522 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7523 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7524 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7525 		fcp_icmd_free(pptr, icmd);
7526 		return;
7527 	}
7528 
7529 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7530 	    fpkt->pkt_datalen);
7531 
7532 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7533 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7534 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7535 	    pptr->port_instance, ptgt->tgt_d_id);
7536 
7537 	/*
7538 	 * Get the number of luns (which is supplied as LUNS * 8) the
7539 	 * device claims it has.
7540 	 */
7541 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7542 
7543 	/*
7544 	 * Get the maximum number of luns the buffer submitted can hold.
7545 	 */
7546 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7547 
7548 	/*
7549 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7550 	 */
7551 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7552 		kmem_free(report_lun, len);
7553 
7554 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7555 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7556 		    ptgt->tgt_d_id);
7557 
7558 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7559 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7560 		fcp_icmd_free(pptr, icmd);
7561 		return;
7562 	}
7563 
7564 	/*
7565 	 * If there are more LUNs than we have allocated memory for,
7566 	 * allocate more space and send down yet another report lun if
7567 	 * the maximum number of attempts hasn't been reached.
7568 	 */
7569 	mutex_enter(&ptgt->tgt_mutex);
7570 
7571 	if ((nluns_claimed > nluns_bufmax) &&
7572 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7573 
7574 		struct fcp_lun *plun;
7575 
7576 		ptgt->tgt_report_lun_cnt++;
7577 		plun = ptgt->tgt_lun;
7578 		ASSERT(plun != NULL);
7579 		mutex_exit(&ptgt->tgt_mutex);
7580 
7581 		kmem_free(report_lun, len);
7582 
7583 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7584 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7585 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7586 		    nluns_claimed, ptgt->tgt_d_id);
7587 
7588 		/* Retrieve the rscn count (if a valid one exists) */
7589 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7590 			rscn_count = ((fc_ulp_rscn_info_t *)
7591 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7592 			    ulp_rscn_count;
7593 		} else {
7594 			rscn_count = FC_INVALID_RSCN_COUNT;
7595 		}
7596 
7597 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7598 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7599 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7600 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7601 			(void) fcp_call_finish_init(pptr, ptgt,
7602 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7603 			    icmd->ipkt_cause);
7604 		}
7605 
7606 		fcp_icmd_free(pptr, icmd);
7607 		return;
7608 	}
7609 
7610 	if (nluns_claimed > nluns_bufmax) {
7611 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7612 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7613 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7614 		    "	 Number of LUNs lost=%x",
7615 		    ptgt->tgt_port_wwn.raw_wwn[0],
7616 		    ptgt->tgt_port_wwn.raw_wwn[1],
7617 		    ptgt->tgt_port_wwn.raw_wwn[2],
7618 		    ptgt->tgt_port_wwn.raw_wwn[3],
7619 		    ptgt->tgt_port_wwn.raw_wwn[4],
7620 		    ptgt->tgt_port_wwn.raw_wwn[5],
7621 		    ptgt->tgt_port_wwn.raw_wwn[6],
7622 		    ptgt->tgt_port_wwn.raw_wwn[7],
7623 		    nluns_claimed - nluns_bufmax);
7624 
7625 		nluns_claimed = nluns_bufmax;
7626 	}
7627 	ptgt->tgt_lun_cnt = nluns_claimed;
7628 
7629 	/*
7630 	 * Identify missing LUNs and print warning messages
7631 	 */
7632 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7633 		int offline;
7634 		int exists = 0;
7635 
7636 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7637 
7638 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7639 			uchar_t		*lun_string;
7640 
7641 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7642 
7643 			switch (lun_string[0] & 0xC0) {
7644 			case FCP_LUN_ADDRESSING:
7645 			case FCP_PD_ADDRESSING:
7646 			case FCP_VOLUME_ADDRESSING:
7647 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7648 				    lun_string[1];
7649 				if (plun->lun_num == lun_num) {
7650 					exists++;
7651 					break;
7652 				}
7653 				break;
7654 
7655 			default:
7656 				break;
7657 			}
7658 		}
7659 
7660 		if (!exists && !offline) {
7661 			mutex_exit(&ptgt->tgt_mutex);
7662 
7663 			mutex_enter(&pptr->port_mutex);
7664 			mutex_enter(&ptgt->tgt_mutex);
7665 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7666 				/*
7667 				 * set disappear flag when device was connected
7668 				 */
7669 				if (!(plun->lun_state &
7670 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7671 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7672 				}
7673 				mutex_exit(&ptgt->tgt_mutex);
7674 				mutex_exit(&pptr->port_mutex);
7675 				if (!(plun->lun_state &
7676 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7677 					fcp_log(CE_NOTE, pptr->port_dip,
7678 					    "!Lun=%x for target=%x disappeared",
7679 					    plun->lun_num, ptgt->tgt_d_id);
7680 				}
7681 				mutex_enter(&ptgt->tgt_mutex);
7682 			} else {
7683 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7684 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7685 				    "fcp_handle_reportlun,1: state change"
7686 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7687 				mutex_exit(&ptgt->tgt_mutex);
7688 				mutex_exit(&pptr->port_mutex);
7689 				kmem_free(report_lun, len);
7690 				(void) fcp_call_finish_init(pptr, ptgt,
7691 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7692 				    icmd->ipkt_cause);
7693 				fcp_icmd_free(pptr, icmd);
7694 				return;
7695 			}
7696 		} else if (exists) {
7697 			/*
7698 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7699 			 * actually exists in REPORT_LUN response
7700 			 */
7701 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7702 				plun->lun_state &=
7703 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7704 			}
7705 			if (offline || plun->lun_num == 0) {
7706 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7707 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7708 					mutex_exit(&ptgt->tgt_mutex);
7709 					fcp_log(CE_NOTE, pptr->port_dip,
7710 					    "!Lun=%x for target=%x reappeared",
7711 					    plun->lun_num, ptgt->tgt_d_id);
7712 					mutex_enter(&ptgt->tgt_mutex);
7713 				}
7714 			}
7715 		}
7716 	}
7717 
7718 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7719 	mutex_exit(&ptgt->tgt_mutex);
7720 
7721 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7722 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7723 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7724 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7725 
7726 	/* scan each lun */
7727 	for (i = 0; i < nluns_claimed; i++) {
7728 		uchar_t	*lun_string;
7729 
7730 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7731 
7732 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7733 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7734 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7735 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7736 		    lun_string[0]);
7737 
7738 		switch (lun_string[0] & 0xC0) {
7739 		case FCP_LUN_ADDRESSING:
7740 		case FCP_PD_ADDRESSING:
7741 		case FCP_VOLUME_ADDRESSING:
7742 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7743 
7744 			/* We will skip masked LUNs because of the blacklist. */
7745 			if (fcp_lun_blacklist != NULL) {
7746 				mutex_enter(&ptgt->tgt_mutex);
7747 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7748 				    lun_num) == TRUE) {
7749 					ptgt->tgt_lun_cnt--;
7750 					mutex_exit(&ptgt->tgt_mutex);
7751 					break;
7752 				}
7753 				mutex_exit(&ptgt->tgt_mutex);
7754 			}
7755 
7756 			/* see if this LUN is already allocated */
7757 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7758 				plun = fcp_alloc_lun(ptgt);
7759 				if (plun == NULL) {
7760 					fcp_log(CE_NOTE, pptr->port_dip,
7761 					    "!Lun allocation failed"
7762 					    " target=%x lun=%x",
7763 					    ptgt->tgt_d_id, lun_num);
7764 					break;
7765 				}
7766 			}
7767 
7768 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7769 			/* convert to LUN */
7770 			plun->lun_addr.ent_addr_0 =
7771 			    BE_16(*(uint16_t *)&(lun_string[0]));
7772 			plun->lun_addr.ent_addr_1 =
7773 			    BE_16(*(uint16_t *)&(lun_string[2]));
7774 			plun->lun_addr.ent_addr_2 =
7775 			    BE_16(*(uint16_t *)&(lun_string[4]));
7776 			plun->lun_addr.ent_addr_3 =
7777 			    BE_16(*(uint16_t *)&(lun_string[6]));
7778 
7779 			plun->lun_num = lun_num;
7780 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7781 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7782 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7783 
7784 			/* Retrieve the rscn count (if a valid one exists) */
7785 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7786 				rscn_count = ((fc_ulp_rscn_info_t *)
7787 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7788 				    ulp_rscn_count;
7789 			} else {
7790 				rscn_count = FC_INVALID_RSCN_COUNT;
7791 			}
7792 
7793 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7794 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7795 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7796 				mutex_enter(&pptr->port_mutex);
7797 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7798 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7799 					fcp_log(CE_NOTE, pptr->port_dip,
7800 					    "!failed to send INQUIRY"
7801 					    " target=%x lun=%x",
7802 					    ptgt->tgt_d_id, plun->lun_num);
7803 				} else {
7804 					FCP_TRACE(fcp_logq,
7805 					    pptr->port_instbuf, fcp_trace,
7806 					    FCP_BUF_LEVEL_5, 0,
7807 					    "fcp_handle_reportlun,2: state"
7808 					    " change occured for D_ID=0x%x",
7809 					    ptgt->tgt_d_id);
7810 				}
7811 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7812 				mutex_exit(&pptr->port_mutex);
7813 			} else {
7814 				continue;
7815 			}
7816 			break;
7817 
7818 		default:
7819 			fcp_log(CE_WARN, NULL,
7820 			    "!Unsupported LUN Addressing method %x "
7821 			    "in response to REPORT_LUN", lun_string[0]);
7822 			break;
7823 		}
7824 
7825 		/*
7826 		 * each time through this loop we should decrement
7827 		 * the tmp_cnt by one -- since we go through this loop
7828 		 * one time for each LUN, the tmp_cnt should never be <=0
7829 		 */
7830 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7831 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7832 	}
7833 
7834 	if (i == 0) {
7835 		fcp_log(CE_WARN, pptr->port_dip,
7836 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7837 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7838 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7839 	}
7840 
7841 	kmem_free(report_lun, len);
7842 	fcp_icmd_free(pptr, icmd);
7843 }
7844 
7845 
7846 /*
7847  * called internally to return a LUN given a target and a LUN number
7848  */
7849 static struct fcp_lun *
7850 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7851 {
7852 	struct fcp_lun	*plun;
7853 
7854 	mutex_enter(&ptgt->tgt_mutex);
7855 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7856 		if (plun->lun_num == lun_num) {
7857 			mutex_exit(&ptgt->tgt_mutex);
7858 			return (plun);
7859 		}
7860 	}
7861 	mutex_exit(&ptgt->tgt_mutex);
7862 
7863 	return (NULL);
7864 }
7865 
7866 
7867 /*
7868  * handle finishing one target for fcp_finish_init
7869  *
7870  * return true (non-zero) if we want finish_init to continue with the
7871  * next target
7872  *
7873  * called with the port mutex held
7874  */
7875 /*ARGSUSED*/
7876 static int
7877 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7878     int link_cnt, int tgt_cnt, int cause)
7879 {
7880 	int	rval = 1;
7881 	ASSERT(pptr != NULL);
7882 	ASSERT(ptgt != NULL);
7883 
7884 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7885 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7886 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7887 	    ptgt->tgt_state);
7888 
7889 	ASSERT(mutex_owned(&pptr->port_mutex));
7890 
7891 	if ((pptr->port_link_cnt != link_cnt) ||
7892 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7893 		/*
7894 		 * oh oh -- another link reset or target change
7895 		 * must have occurred while we are in here
7896 		 */
7897 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7898 
7899 		return (0);
7900 	} else {
7901 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7902 	}
7903 
7904 	mutex_enter(&ptgt->tgt_mutex);
7905 
7906 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7907 		/*
7908 		 * tgt is not offline -- is it marked (i.e. needs
7909 		 * to be offlined) ??
7910 		 */
7911 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7912 			/*
7913 			 * this target not offline *and*
7914 			 * marked
7915 			 */
7916 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7917 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7918 			    tgt_cnt, 0, 0);
7919 		} else {
7920 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7921 
7922 			/* create the LUNs */
7923 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7924 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7925 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7926 				    cause);
7927 				ptgt->tgt_device_created = 1;
7928 			} else {
7929 				fcp_update_tgt_state(ptgt, FCP_RESET,
7930 				    FCP_LUN_BUSY);
7931 			}
7932 		}
7933 	}
7934 
7935 	mutex_exit(&ptgt->tgt_mutex);
7936 
7937 	return (rval);
7938 }
7939 
7940 
7941 /*
7942  * this routine is called to finish port initialization
7943  *
7944  * Each port has a "temp" counter -- when a state change happens (e.g.
7945  * port online), the temp count is set to the number of devices in the map.
7946  * Then, as each device gets "discovered", the temp counter is decremented
7947  * by one.  When this count reaches zero we know that all of the devices
7948  * in the map have been discovered (or an error has occurred), so we can
7949  * then finish initialization -- which is done by this routine (well, this
7950  * and fcp-finish_tgt())
7951  *
7952  * acquires and releases the global mutex
7953  *
7954  * called with the port mutex owned
7955  */
7956 static void
7957 fcp_finish_init(struct fcp_port *pptr)
7958 {
7959 #ifdef	DEBUG
7960 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
7961 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
7962 	    FCP_STACK_DEPTH);
7963 #endif /* DEBUG */
7964 
7965 	ASSERT(mutex_owned(&pptr->port_mutex));
7966 
7967 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7968 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
7969 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
7970 
7971 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
7972 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
7973 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
7974 		pptr->port_state &= ~FCP_STATE_ONLINING;
7975 		pptr->port_state |= FCP_STATE_ONLINE;
7976 	}
7977 
7978 	/* Wake up threads waiting on config done */
7979 	cv_broadcast(&pptr->port_config_cv);
7980 }
7981 
7982 
7983 /*
7984  * called from fcp_finish_init to create the LUNs for a target
7985  *
7986  * called with the port mutex owned
7987  */
7988 static void
7989 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
7990 {
7991 	struct fcp_lun	*plun;
7992 	struct fcp_port	*pptr;
7993 	child_info_t		*cip = NULL;
7994 
7995 	ASSERT(ptgt != NULL);
7996 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
7997 
7998 	pptr = ptgt->tgt_port;
7999 
8000 	ASSERT(pptr != NULL);
8001 
8002 	/* scan all LUNs for this target */
8003 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8004 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8005 			continue;
8006 		}
8007 
8008 		if (plun->lun_state & FCP_LUN_MARK) {
8009 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8010 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8011 			    "fcp_create_luns: offlining marked LUN!");
8012 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8013 			continue;
8014 		}
8015 
8016 		plun->lun_state &= ~FCP_LUN_BUSY;
8017 
8018 		/*
8019 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8020 		 * but we have a valid plun->lun_cip. To cover this case also
8021 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8022 		 */
8023 		if (plun->lun_mpxio && plun->lun_cip &&
8024 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8025 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8026 		    0, 0))) {
8027 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8028 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8029 			    "fcp_create_luns: enable lun %p failed!",
8030 			    plun);
8031 		}
8032 
8033 		if (plun->lun_state & FCP_LUN_INIT &&
8034 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8035 			continue;
8036 		}
8037 
8038 		if (cause == FCP_CAUSE_USER_CREATE) {
8039 			continue;
8040 		}
8041 
8042 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8043 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8044 		    "create_luns: passing ONLINE elem to HP thread");
8045 
8046 		/*
8047 		 * If lun has changed, prepare for offlining the old path.
8048 		 * Do not offline the old path right now, since it may be
8049 		 * still opened.
8050 		 */
8051 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8052 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8053 		}
8054 
8055 		/* pass an ONLINE element to the hotplug thread */
8056 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8057 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8058 
8059 			/*
8060 			 * We can not synchronous attach (i.e pass
8061 			 * NDI_ONLINE_ATTACH) here as we might be
8062 			 * coming from an interrupt or callback
8063 			 * thread.
8064 			 */
8065 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8066 			    link_cnt, tgt_cnt, 0, 0)) {
8067 				fcp_log(CE_CONT, pptr->port_dip,
8068 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8069 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8070 			}
8071 		}
8072 	}
8073 }
8074 
8075 
8076 /*
8077  * function to online/offline devices
8078  */
8079 static int
8080 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8081     int online, int lcount, int tcount, int flags)
8082 {
8083 	int			rval = NDI_FAILURE;
8084 	int			circ;
8085 	child_info_t		*ccip;
8086 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8087 	int			is_mpxio = pptr->port_mpxio;
8088 	dev_info_t		*cdip, *pdip;
8089 	char			*devname;
8090 
8091 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8092 		/*
8093 		 * When this event gets serviced, lun_cip and lun_mpxio
8094 		 * has changed, so it should be invalidated now.
8095 		 */
8096 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8097 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8098 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8099 		return (rval);
8100 	}
8101 
8102 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8103 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8104 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8105 	    "flags=%x mpxio=%x\n",
8106 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8107 	    plun->lun_mpxio);
8108 
8109 	/*
8110 	 * lun_mpxio needs checking here because we can end up in a race
8111 	 * condition where this task has been dispatched while lun_mpxio is
8112 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8113 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8114 	 * the flag. We rely on the serialization of the tasks here. We return
8115 	 * NDI_SUCCESS so any callers continue without reporting spurious
8116 	 * errors, and the still think we're an MPXIO LUN.
8117 	 */
8118 
8119 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8120 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8121 		if (plun->lun_mpxio) {
8122 			rval = fcp_update_mpxio_path(plun, cip, online);
8123 		} else {
8124 			rval = NDI_SUCCESS;
8125 		}
8126 		return (rval);
8127 	}
8128 
8129 	/*
8130 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8131 	 * executing devfs_clean() if parent lock is held.
8132 	 */
8133 	ASSERT(!servicing_interrupt());
8134 	if (online == FCP_OFFLINE) {
8135 		if (plun->lun_mpxio == 0) {
8136 			if (plun->lun_cip == cip) {
8137 				cdip = DIP(plun->lun_cip);
8138 			} else {
8139 				cdip = DIP(cip);
8140 			}
8141 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8142 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8143 		} else if ((plun->lun_cip != cip) && cip) {
8144 			/*
8145 			 * This means a DTYPE/GUID change, we shall get the
8146 			 * dip of the old cip instead of the current lun_cip.
8147 			 */
8148 			cdip = mdi_pi_get_client(PIP(cip));
8149 		}
8150 		if (cdip) {
8151 			if (i_ddi_devi_attached(cdip)) {
8152 				pdip = ddi_get_parent(cdip);
8153 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8154 				ndi_devi_enter(pdip, &circ);
8155 				(void) ddi_deviname(cdip, devname);
8156 				ndi_devi_exit(pdip, circ);
8157 				/*
8158 				 * Release parent lock before calling
8159 				 * devfs_clean().
8160 				 */
8161 				rval = devfs_clean(pdip, devname + 1,
8162 				    DV_CLEAN_FORCE);
8163 				kmem_free(devname, MAXNAMELEN + 1);
8164 				/*
8165 				 * Return if devfs_clean() fails for
8166 				 * non-MPXIO case.
8167 				 * For MPXIO case, another path could be
8168 				 * offlined.
8169 				 */
8170 				if (rval && plun->lun_mpxio == 0) {
8171 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8172 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8173 					    "fcp_trigger_lun: devfs_clean "
8174 					    "failed rval=%x  dip=%p",
8175 					    rval, pdip);
8176 					return (NDI_FAILURE);
8177 				}
8178 			}
8179 		}
8180 	}
8181 
8182 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8183 		return (NDI_FAILURE);
8184 	}
8185 
8186 	if (is_mpxio) {
8187 		mdi_devi_enter(pptr->port_dip, &circ);
8188 	} else {
8189 		ndi_devi_enter(pptr->port_dip, &circ);
8190 	}
8191 
8192 	mutex_enter(&pptr->port_mutex);
8193 	mutex_enter(&plun->lun_mutex);
8194 
8195 	if (online == FCP_ONLINE) {
8196 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8197 		if (ccip == NULL) {
8198 			goto fail;
8199 		}
8200 	} else {
8201 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8202 			goto fail;
8203 		}
8204 		ccip = cip;
8205 	}
8206 
8207 	if (online == FCP_ONLINE) {
8208 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8209 		    &circ);
8210 		fc_ulp_log_device_event(pptr->port_fp_handle,
8211 		    FC_ULP_DEVICE_ONLINE);
8212 	} else {
8213 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8214 		    &circ);
8215 		fc_ulp_log_device_event(pptr->port_fp_handle,
8216 		    FC_ULP_DEVICE_OFFLINE);
8217 	}
8218 
8219 fail:	mutex_exit(&plun->lun_mutex);
8220 	mutex_exit(&pptr->port_mutex);
8221 
8222 	if (is_mpxio) {
8223 		mdi_devi_exit(pptr->port_dip, circ);
8224 	} else {
8225 		ndi_devi_exit(pptr->port_dip, circ);
8226 	}
8227 
8228 	fc_ulp_idle_port(pptr->port_fp_handle);
8229 
8230 	return (rval);
8231 }
8232 
8233 
8234 /*
8235  * take a target offline by taking all of its LUNs offline
8236  */
8237 /*ARGSUSED*/
8238 static int
8239 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8240     int link_cnt, int tgt_cnt, int nowait, int flags)
8241 {
8242 	struct fcp_tgt_elem	*elem;
8243 
8244 	ASSERT(mutex_owned(&pptr->port_mutex));
8245 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8246 
8247 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8248 
8249 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8250 	    ptgt->tgt_change_cnt)) {
8251 		mutex_exit(&ptgt->tgt_mutex);
8252 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8253 		mutex_enter(&ptgt->tgt_mutex);
8254 
8255 		return (0);
8256 	}
8257 
8258 	ptgt->tgt_pd_handle = NULL;
8259 	mutex_exit(&ptgt->tgt_mutex);
8260 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8261 	mutex_enter(&ptgt->tgt_mutex);
8262 
8263 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8264 
8265 	if (ptgt->tgt_tcap &&
8266 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8267 		elem->flags = flags;
8268 		elem->time = fcp_watchdog_time;
8269 		if (nowait == 0) {
8270 			elem->time += fcp_offline_delay;
8271 		}
8272 		elem->ptgt = ptgt;
8273 		elem->link_cnt = link_cnt;
8274 		elem->tgt_cnt = tgt_cnt;
8275 		elem->next = pptr->port_offline_tgts;
8276 		pptr->port_offline_tgts = elem;
8277 	} else {
8278 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8279 	}
8280 
8281 	return (1);
8282 }
8283 
8284 
8285 static void
8286 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8287     int link_cnt, int tgt_cnt, int flags)
8288 {
8289 	ASSERT(mutex_owned(&pptr->port_mutex));
8290 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8291 
8292 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8293 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8294 	ptgt->tgt_pd_handle = NULL;
8295 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8296 }
8297 
8298 
8299 static void
8300 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8301     int flags)
8302 {
8303 	struct	fcp_lun	*plun;
8304 
8305 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8306 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8307 
8308 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8309 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8310 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8311 		}
8312 	}
8313 }
8314 
8315 
8316 /*
8317  * take a LUN offline
8318  *
8319  * enters and leaves with the target mutex held, releasing it in the process
8320  *
8321  * allocates memory in non-sleep mode
8322  */
8323 static void
8324 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8325     int nowait, int flags)
8326 {
8327 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8328 	struct fcp_lun_elem	*elem;
8329 
8330 	ASSERT(plun != NULL);
8331 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8332 
8333 	if (nowait) {
8334 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8335 		return;
8336 	}
8337 
8338 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8339 		elem->flags = flags;
8340 		elem->time = fcp_watchdog_time;
8341 		if (nowait == 0) {
8342 			elem->time += fcp_offline_delay;
8343 		}
8344 		elem->plun = plun;
8345 		elem->link_cnt = link_cnt;
8346 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8347 		elem->next = pptr->port_offline_luns;
8348 		pptr->port_offline_luns = elem;
8349 	} else {
8350 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8351 	}
8352 }
8353 
8354 
8355 static void
8356 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8357 {
8358 	struct fcp_pkt	*head = NULL;
8359 
8360 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8361 
8362 	mutex_exit(&LUN_TGT->tgt_mutex);
8363 
8364 	head = fcp_scan_commands(plun);
8365 	if (head != NULL) {
8366 		fcp_abort_commands(head, LUN_PORT);
8367 	}
8368 
8369 	mutex_enter(&LUN_TGT->tgt_mutex);
8370 
8371 	if (plun->lun_cip && plun->lun_mpxio) {
8372 		/*
8373 		 * Intimate MPxIO lun busy is cleared
8374 		 */
8375 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8376 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8377 		    0, 0)) {
8378 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8379 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8380 			    LUN_TGT->tgt_d_id, plun->lun_num);
8381 		}
8382 		/*
8383 		 * Intimate MPxIO that the lun is now marked for offline
8384 		 */
8385 		mutex_exit(&LUN_TGT->tgt_mutex);
8386 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8387 		mutex_enter(&LUN_TGT->tgt_mutex);
8388 	}
8389 }
8390 
8391 static void
8392 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8393     int flags)
8394 {
8395 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8396 
8397 	mutex_exit(&LUN_TGT->tgt_mutex);
8398 	fcp_update_offline_flags(plun);
8399 	mutex_enter(&LUN_TGT->tgt_mutex);
8400 
8401 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8402 
8403 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8404 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8405 	    "offline_lun: passing OFFLINE elem to HP thread");
8406 
8407 	if (plun->lun_cip) {
8408 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8409 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8410 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8411 		    LUN_TGT->tgt_trace);
8412 
8413 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8414 		    link_cnt, tgt_cnt, flags, 0)) {
8415 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8416 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8417 			    LUN_TGT->tgt_d_id, plun->lun_num);
8418 		}
8419 	}
8420 }
8421 
8422 static void
8423 fcp_scan_offline_luns(struct fcp_port *pptr)
8424 {
8425 	struct fcp_lun_elem	*elem;
8426 	struct fcp_lun_elem	*prev;
8427 	struct fcp_lun_elem	*next;
8428 
8429 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8430 
8431 	prev = NULL;
8432 	elem = pptr->port_offline_luns;
8433 	while (elem) {
8434 		next = elem->next;
8435 		if (elem->time <= fcp_watchdog_time) {
8436 			int			changed = 1;
8437 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8438 
8439 			mutex_enter(&ptgt->tgt_mutex);
8440 			if (pptr->port_link_cnt == elem->link_cnt &&
8441 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8442 				changed = 0;
8443 			}
8444 
8445 			if (!changed &&
8446 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8447 				fcp_offline_lun_now(elem->plun,
8448 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8449 			}
8450 			mutex_exit(&ptgt->tgt_mutex);
8451 
8452 			kmem_free(elem, sizeof (*elem));
8453 
8454 			if (prev) {
8455 				prev->next = next;
8456 			} else {
8457 				pptr->port_offline_luns = next;
8458 			}
8459 		} else {
8460 			prev = elem;
8461 		}
8462 		elem = next;
8463 	}
8464 }
8465 
8466 
8467 static void
8468 fcp_scan_offline_tgts(struct fcp_port *pptr)
8469 {
8470 	struct fcp_tgt_elem	*elem;
8471 	struct fcp_tgt_elem	*prev;
8472 	struct fcp_tgt_elem	*next;
8473 
8474 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8475 
8476 	prev = NULL;
8477 	elem = pptr->port_offline_tgts;
8478 	while (elem) {
8479 		next = elem->next;
8480 		if (elem->time <= fcp_watchdog_time) {
8481 			int			changed = 1;
8482 			struct fcp_tgt	*ptgt = elem->ptgt;
8483 
8484 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8485 				changed = 0;
8486 			}
8487 
8488 			mutex_enter(&ptgt->tgt_mutex);
8489 			if (!changed && !(ptgt->tgt_state &
8490 			    FCP_TGT_OFFLINE)) {
8491 				fcp_offline_target_now(pptr,
8492 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8493 				    elem->flags);
8494 			}
8495 			mutex_exit(&ptgt->tgt_mutex);
8496 
8497 			kmem_free(elem, sizeof (*elem));
8498 
8499 			if (prev) {
8500 				prev->next = next;
8501 			} else {
8502 				pptr->port_offline_tgts = next;
8503 			}
8504 		} else {
8505 			prev = elem;
8506 		}
8507 		elem = next;
8508 	}
8509 }
8510 
8511 
8512 static void
8513 fcp_update_offline_flags(struct fcp_lun *plun)
8514 {
8515 	struct fcp_port	*pptr = LUN_PORT;
8516 	ASSERT(plun != NULL);
8517 
8518 	mutex_enter(&LUN_TGT->tgt_mutex);
8519 	plun->lun_state |= FCP_LUN_OFFLINE;
8520 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8521 
8522 	mutex_enter(&plun->lun_mutex);
8523 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8524 		dev_info_t *cdip = NULL;
8525 
8526 		mutex_exit(&LUN_TGT->tgt_mutex);
8527 
8528 		if (plun->lun_mpxio == 0) {
8529 			cdip = DIP(plun->lun_cip);
8530 		} else if (plun->lun_cip) {
8531 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8532 		}
8533 
8534 		mutex_exit(&plun->lun_mutex);
8535 		if (cdip) {
8536 			(void) ndi_event_retrieve_cookie(
8537 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8538 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8539 			(void) ndi_event_run_callbacks(
8540 			    pptr->port_ndi_event_hdl, cdip,
8541 			    fcp_remove_eid, NULL);
8542 		}
8543 	} else {
8544 		mutex_exit(&plun->lun_mutex);
8545 		mutex_exit(&LUN_TGT->tgt_mutex);
8546 	}
8547 }
8548 
8549 
8550 /*
8551  * Scan all of the command pkts for this port, moving pkts that
8552  * match our LUN onto our own list (headed by "head")
8553  */
8554 static struct fcp_pkt *
8555 fcp_scan_commands(struct fcp_lun *plun)
8556 {
8557 	struct fcp_port	*pptr = LUN_PORT;
8558 
8559 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8560 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8561 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8562 
8563 	struct fcp_pkt	*head = NULL;	/* head of our list */
8564 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8565 
8566 	int			cmds_found = 0;
8567 
8568 	mutex_enter(&pptr->port_pkt_mutex);
8569 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8570 		struct fcp_lun *tlun =
8571 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8572 
8573 		ncmd = cmd->cmd_next;	/* set next command */
8574 
8575 		/*
8576 		 * if this pkt is for a different LUN  or the
8577 		 * command is sent down, skip it.
8578 		 */
8579 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8580 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8581 			pcmd = cmd;
8582 			continue;
8583 		}
8584 		cmds_found++;
8585 		if (pcmd != NULL) {
8586 			ASSERT(pptr->port_pkt_head != cmd);
8587 			pcmd->cmd_next = cmd->cmd_next;
8588 		} else {
8589 			ASSERT(cmd == pptr->port_pkt_head);
8590 			pptr->port_pkt_head = cmd->cmd_next;
8591 		}
8592 
8593 		if (cmd == pptr->port_pkt_tail) {
8594 			pptr->port_pkt_tail = pcmd;
8595 			if (pcmd) {
8596 				pcmd->cmd_next = NULL;
8597 			}
8598 		}
8599 
8600 		if (head == NULL) {
8601 			head = tail = cmd;
8602 		} else {
8603 			ASSERT(tail != NULL);
8604 
8605 			tail->cmd_next = cmd;
8606 			tail = cmd;
8607 		}
8608 		cmd->cmd_next = NULL;
8609 	}
8610 	mutex_exit(&pptr->port_pkt_mutex);
8611 
8612 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8613 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8614 	    "scan commands: %d cmd(s) found", cmds_found);
8615 
8616 	return (head);
8617 }
8618 
8619 
8620 /*
8621  * Abort all the commands in the command queue
8622  */
8623 static void
8624 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8625 {
8626 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8627 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8628 
8629 	ASSERT(mutex_owned(&pptr->port_mutex));
8630 
8631 	/* scan through the pkts and invalid them */
8632 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8633 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8634 
8635 		ncmd = cmd->cmd_next;
8636 		ASSERT(pkt != NULL);
8637 
8638 		/*
8639 		 * The lun is going to be marked offline. Indicate
8640 		 * the target driver not to requeue or retry this command
8641 		 * as the device is going to be offlined pretty soon.
8642 		 */
8643 		pkt->pkt_reason = CMD_DEV_GONE;
8644 		pkt->pkt_statistics = 0;
8645 		pkt->pkt_state = 0;
8646 
8647 		/* reset cmd flags/state */
8648 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8649 		cmd->cmd_state = FCP_PKT_IDLE;
8650 
8651 		/*
8652 		 * ensure we have a packet completion routine,
8653 		 * then call it.
8654 		 */
8655 		ASSERT(pkt->pkt_comp != NULL);
8656 
8657 		mutex_exit(&pptr->port_mutex);
8658 		fcp_post_callback(cmd);
8659 		mutex_enter(&pptr->port_mutex);
8660 	}
8661 }
8662 
8663 
8664 /*
8665  * the pkt_comp callback for command packets
8666  */
8667 static void
8668 fcp_cmd_callback(fc_packet_t *fpkt)
8669 {
8670 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8671 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8672 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8673 
8674 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8675 
8676 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8677 		cmn_err(CE_PANIC, "Packet already completed %p",
8678 		    (void *)cmd);
8679 	}
8680 
8681 	/*
8682 	 * Watch thread should be freeing the packet, ignore the pkt.
8683 	 */
8684 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8685 		fcp_log(CE_CONT, pptr->port_dip,
8686 		    "!FCP: Pkt completed while aborting\n");
8687 		return;
8688 	}
8689 	cmd->cmd_state = FCP_PKT_IDLE;
8690 
8691 	fcp_complete_pkt(fpkt);
8692 
8693 #ifdef	DEBUG
8694 	mutex_enter(&pptr->port_pkt_mutex);
8695 	pptr->port_npkts--;
8696 	mutex_exit(&pptr->port_pkt_mutex);
8697 #endif /* DEBUG */
8698 
8699 	fcp_post_callback(cmd);
8700 }
8701 
8702 
8703 static void
8704 fcp_complete_pkt(fc_packet_t *fpkt)
8705 {
8706 	int			error = 0;
8707 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8708 	    fpkt->pkt_ulp_private;
8709 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8710 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8711 	struct fcp_lun	*plun;
8712 	struct fcp_tgt	*ptgt;
8713 	struct fcp_rsp		*rsp;
8714 	struct scsi_address	save;
8715 
8716 #ifdef	DEBUG
8717 	save = pkt->pkt_address;
8718 #endif /* DEBUG */
8719 
8720 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8721 
8722 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8723 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8724 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8725 			    sizeof (struct fcp_rsp));
8726 		}
8727 
8728 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8729 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8730 
8731 		pkt->pkt_resid = 0;
8732 
8733 		if (cmd->cmd_pkt->pkt_numcookies) {
8734 			pkt->pkt_state |= STATE_XFERRED_DATA;
8735 			if (fpkt->pkt_data_resid) {
8736 				error++;
8737 			}
8738 		}
8739 
8740 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8741 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8742 			/*
8743 			 * The next two checks make sure that if there
8744 			 * is no sense data or a valid response and
8745 			 * the command came back with check condition,
8746 			 * the command should be retried.
8747 			 */
8748 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8749 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8750 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8751 				pkt->pkt_resid = cmd->cmd_dmacount;
8752 			}
8753 		}
8754 
8755 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8756 			return;
8757 		}
8758 
8759 		plun = ADDR2LUN(&pkt->pkt_address);
8760 		ptgt = plun->lun_tgt;
8761 		ASSERT(ptgt != NULL);
8762 
8763 		/*
8764 		 * Update the transfer resid, if appropriate
8765 		 */
8766 		if (rsp->fcp_u.fcp_status.resid_over ||
8767 		    rsp->fcp_u.fcp_status.resid_under) {
8768 			pkt->pkt_resid = rsp->fcp_resid;
8769 		}
8770 
8771 		/*
8772 		 * First see if we got a FCP protocol error.
8773 		 */
8774 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8775 			struct fcp_rsp_info	*bep;
8776 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8777 			    sizeof (struct fcp_rsp));
8778 
8779 			if (fcp_validate_fcp_response(rsp, pptr) !=
8780 			    FC_SUCCESS) {
8781 				pkt->pkt_reason = CMD_CMPLT;
8782 				*(pkt->pkt_scbp) = STATUS_CHECK;
8783 
8784 				fcp_log(CE_WARN, pptr->port_dip,
8785 				    "!SCSI command to d_id=0x%x lun=0x%x"
8786 				    " failed, Bad FCP response values:"
8787 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8788 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8789 				    ptgt->tgt_d_id, plun->lun_num,
8790 				    rsp->reserved_0, rsp->reserved_1,
8791 				    rsp->fcp_u.fcp_status.reserved_0,
8792 				    rsp->fcp_u.fcp_status.reserved_1,
8793 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8794 
8795 				return;
8796 			}
8797 
8798 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8799 				FCP_CP_IN(fpkt->pkt_resp +
8800 				    sizeof (struct fcp_rsp), bep,
8801 				    fpkt->pkt_resp_acc,
8802 				    sizeof (struct fcp_rsp_info));
8803 			}
8804 
8805 			if (bep->rsp_code != FCP_NO_FAILURE) {
8806 				child_info_t	*cip;
8807 
8808 				pkt->pkt_reason = CMD_TRAN_ERR;
8809 
8810 				mutex_enter(&plun->lun_mutex);
8811 				cip = plun->lun_cip;
8812 				mutex_exit(&plun->lun_mutex);
8813 
8814 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8815 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8816 				    "FCP response error on cmd=%p"
8817 				    " target=0x%x, cip=%p", cmd,
8818 				    ptgt->tgt_d_id, cip);
8819 			}
8820 		}
8821 
8822 		/*
8823 		 * See if we got a SCSI error with sense data
8824 		 */
8825 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8826 			uchar_t				rqlen;
8827 			caddr_t				sense_from;
8828 			child_info_t			*cip;
8829 			timeout_id_t			tid;
8830 			struct scsi_arq_status		*arq;
8831 			struct scsi_extended_sense	*sense_to;
8832 
8833 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8834 			sense_to = &arq->sts_sensedata;
8835 
8836 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8837 			    sizeof (struct scsi_extended_sense));
8838 
8839 			sense_from = (caddr_t)fpkt->pkt_resp +
8840 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8841 
8842 			if (fcp_validate_fcp_response(rsp, pptr) !=
8843 			    FC_SUCCESS) {
8844 				pkt->pkt_reason = CMD_CMPLT;
8845 				*(pkt->pkt_scbp) = STATUS_CHECK;
8846 
8847 				fcp_log(CE_WARN, pptr->port_dip,
8848 				    "!SCSI command to d_id=0x%x lun=0x%x"
8849 				    " failed, Bad FCP response values:"
8850 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8851 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8852 				    ptgt->tgt_d_id, plun->lun_num,
8853 				    rsp->reserved_0, rsp->reserved_1,
8854 				    rsp->fcp_u.fcp_status.reserved_0,
8855 				    rsp->fcp_u.fcp_status.reserved_1,
8856 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8857 
8858 				return;
8859 			}
8860 
8861 			/*
8862 			 * copy in sense information
8863 			 */
8864 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8865 				FCP_CP_IN(sense_from, sense_to,
8866 				    fpkt->pkt_resp_acc, rqlen);
8867 			} else {
8868 				bcopy(sense_from, sense_to, rqlen);
8869 			}
8870 
8871 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8872 			    (FCP_SENSE_NO_LUN(sense_to))) {
8873 				mutex_enter(&ptgt->tgt_mutex);
8874 				if (ptgt->tgt_tid == NULL) {
8875 					/*
8876 					 * Kick off rediscovery
8877 					 */
8878 					tid = timeout(fcp_reconfigure_luns,
8879 					    (caddr_t)ptgt, drv_usectohz(1));
8880 
8881 					ptgt->tgt_tid = tid;
8882 					ptgt->tgt_state |= FCP_TGT_BUSY;
8883 				}
8884 				mutex_exit(&ptgt->tgt_mutex);
8885 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8886 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8887 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8888 					    "!FCP: Report Lun Has Changed"
8889 					    " target=%x", ptgt->tgt_d_id);
8890 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8891 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8892 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8893 					    "!FCP: LU Not Supported"
8894 					    " target=%x", ptgt->tgt_d_id);
8895 				}
8896 			}
8897 			ASSERT(pkt->pkt_scbp != NULL);
8898 
8899 			pkt->pkt_state |= STATE_ARQ_DONE;
8900 
8901 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8902 
8903 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8904 			arq->sts_rqpkt_reason = 0;
8905 			arq->sts_rqpkt_statistics = 0;
8906 
8907 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8908 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8909 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8910 			    STATE_XFERRED_DATA;
8911 
8912 			mutex_enter(&plun->lun_mutex);
8913 			cip = plun->lun_cip;
8914 			mutex_exit(&plun->lun_mutex);
8915 
8916 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8917 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8918 			    "SCSI Check condition on cmd=%p target=0x%x"
8919 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8920 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8921 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8922 			    rsp->fcp_u.fcp_status.scsi_status,
8923 			    sense_to->es_key, sense_to->es_add_code,
8924 			    sense_to->es_qual_code);
8925 		}
8926 	} else {
8927 		plun = ADDR2LUN(&pkt->pkt_address);
8928 		ptgt = plun->lun_tgt;
8929 		ASSERT(ptgt != NULL);
8930 
8931 		/*
8932 		 * Work harder to translate errors into target driver
8933 		 * understandable ones. Note with despair that the target
8934 		 * drivers don't decode pkt_state and pkt_reason exhaustively
8935 		 * They resort to using the big hammer most often, which
8936 		 * may not get fixed in the life time of this driver.
8937 		 */
8938 		pkt->pkt_state = 0;
8939 		pkt->pkt_statistics = 0;
8940 
8941 		switch (fpkt->pkt_state) {
8942 		case FC_PKT_TRAN_ERROR:
8943 			switch (fpkt->pkt_reason) {
8944 			case FC_REASON_OVERRUN:
8945 				pkt->pkt_reason = CMD_CMD_OVR;
8946 				pkt->pkt_statistics |= STAT_ABORTED;
8947 				break;
8948 
8949 			case FC_REASON_XCHG_BSY: {
8950 				caddr_t ptr;
8951 
8952 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
8953 
8954 				ptr = (caddr_t)pkt->pkt_scbp;
8955 				if (ptr) {
8956 					*ptr = STATUS_BUSY;
8957 				}
8958 				break;
8959 			}
8960 
8961 			case FC_REASON_ABORTED:
8962 				pkt->pkt_reason = CMD_TRAN_ERR;
8963 				pkt->pkt_statistics |= STAT_ABORTED;
8964 				break;
8965 
8966 			case FC_REASON_ABORT_FAILED:
8967 				pkt->pkt_reason = CMD_ABORT_FAIL;
8968 				break;
8969 
8970 			case FC_REASON_NO_SEQ_INIT:
8971 			case FC_REASON_CRC_ERROR:
8972 				pkt->pkt_reason = CMD_TRAN_ERR;
8973 				pkt->pkt_statistics |= STAT_ABORTED;
8974 				break;
8975 			default:
8976 				pkt->pkt_reason = CMD_TRAN_ERR;
8977 				break;
8978 			}
8979 			break;
8980 
8981 		case FC_PKT_PORT_OFFLINE: {
8982 			dev_info_t	*cdip = NULL;
8983 			caddr_t		ptr;
8984 
8985 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
8986 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8987 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
8988 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
8989 				    ptgt->tgt_d_id);
8990 			}
8991 
8992 			mutex_enter(&plun->lun_mutex);
8993 			if (plun->lun_mpxio == 0) {
8994 				cdip = DIP(plun->lun_cip);
8995 			} else if (plun->lun_cip) {
8996 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8997 			}
8998 
8999 			mutex_exit(&plun->lun_mutex);
9000 
9001 			if (cdip) {
9002 				(void) ndi_event_retrieve_cookie(
9003 				    pptr->port_ndi_event_hdl, cdip,
9004 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9005 				    NDI_EVENT_NOPASS);
9006 				(void) ndi_event_run_callbacks(
9007 				    pptr->port_ndi_event_hdl, cdip,
9008 				    fcp_remove_eid, NULL);
9009 			}
9010 
9011 			/*
9012 			 * If the link goes off-line for a lip,
9013 			 * this will cause a error to the ST SG
9014 			 * SGEN drivers. By setting BUSY we will
9015 			 * give the drivers the chance to retry
9016 			 * before it blows of the job. ST will
9017 			 * remember how many times it has retried.
9018 			 */
9019 
9020 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9021 			    (plun->lun_type == DTYPE_CHANGER)) {
9022 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9023 				ptr = (caddr_t)pkt->pkt_scbp;
9024 				if (ptr) {
9025 					*ptr = STATUS_BUSY;
9026 				}
9027 			} else {
9028 				pkt->pkt_reason = CMD_TRAN_ERR;
9029 				pkt->pkt_statistics |= STAT_BUS_RESET;
9030 			}
9031 			break;
9032 		}
9033 
9034 		case FC_PKT_TRAN_BSY:
9035 			/*
9036 			 * Use the ssd Qfull handling here.
9037 			 */
9038 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9039 			pkt->pkt_state = STATE_GOT_BUS;
9040 			break;
9041 
9042 		case FC_PKT_TIMEOUT:
9043 			pkt->pkt_reason = CMD_TIMEOUT;
9044 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9045 				pkt->pkt_statistics |= STAT_TIMEOUT;
9046 			} else {
9047 				pkt->pkt_statistics |= STAT_ABORTED;
9048 			}
9049 			break;
9050 
9051 		case FC_PKT_LOCAL_RJT:
9052 			switch (fpkt->pkt_reason) {
9053 			case FC_REASON_OFFLINE: {
9054 				dev_info_t	*cdip = NULL;
9055 
9056 				mutex_enter(&plun->lun_mutex);
9057 				if (plun->lun_mpxio == 0) {
9058 					cdip = DIP(plun->lun_cip);
9059 				} else if (plun->lun_cip) {
9060 					cdip = mdi_pi_get_client(
9061 					    PIP(plun->lun_cip));
9062 				}
9063 				mutex_exit(&plun->lun_mutex);
9064 
9065 				if (cdip) {
9066 					(void) ndi_event_retrieve_cookie(
9067 					    pptr->port_ndi_event_hdl, cdip,
9068 					    FCAL_REMOVE_EVENT,
9069 					    &fcp_remove_eid,
9070 					    NDI_EVENT_NOPASS);
9071 					(void) ndi_event_run_callbacks(
9072 					    pptr->port_ndi_event_hdl,
9073 					    cdip, fcp_remove_eid, NULL);
9074 				}
9075 
9076 				pkt->pkt_reason = CMD_TRAN_ERR;
9077 				pkt->pkt_statistics |= STAT_BUS_RESET;
9078 
9079 				break;
9080 			}
9081 
9082 			case FC_REASON_NOMEM:
9083 			case FC_REASON_QFULL: {
9084 				caddr_t ptr;
9085 
9086 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9087 				ptr = (caddr_t)pkt->pkt_scbp;
9088 				if (ptr) {
9089 					*ptr = STATUS_BUSY;
9090 				}
9091 				break;
9092 			}
9093 
9094 			case FC_REASON_DMA_ERROR:
9095 				pkt->pkt_reason = CMD_DMA_DERR;
9096 				pkt->pkt_statistics |= STAT_ABORTED;
9097 				break;
9098 
9099 			case FC_REASON_CRC_ERROR:
9100 			case FC_REASON_UNDERRUN: {
9101 				uchar_t		status;
9102 				/*
9103 				 * Work around for Bugid: 4240945.
9104 				 * IB on A5k doesn't set the Underrun bit
9105 				 * in the fcp status, when it is transferring
9106 				 * less than requested amount of data. Work
9107 				 * around the ses problem to keep luxadm
9108 				 * happy till ibfirmware is fixed.
9109 				 */
9110 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9111 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9112 					    fpkt->pkt_resp_acc,
9113 					    sizeof (struct fcp_rsp));
9114 				}
9115 				status = rsp->fcp_u.fcp_status.scsi_status;
9116 				if (((plun->lun_type & DTYPE_MASK) ==
9117 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9118 					pkt->pkt_reason = CMD_CMPLT;
9119 					*pkt->pkt_scbp = status;
9120 					pkt->pkt_resid = 0;
9121 				} else {
9122 					pkt->pkt_reason = CMD_TRAN_ERR;
9123 					pkt->pkt_statistics |= STAT_ABORTED;
9124 				}
9125 				break;
9126 			}
9127 
9128 			case FC_REASON_NO_CONNECTION:
9129 			case FC_REASON_UNSUPPORTED:
9130 			case FC_REASON_ILLEGAL_REQ:
9131 			case FC_REASON_BAD_SID:
9132 			case FC_REASON_DIAG_BUSY:
9133 			case FC_REASON_FCAL_OPN_FAIL:
9134 			case FC_REASON_BAD_XID:
9135 			default:
9136 				pkt->pkt_reason = CMD_TRAN_ERR;
9137 				pkt->pkt_statistics |= STAT_ABORTED;
9138 				break;
9139 
9140 			}
9141 			break;
9142 
9143 		case FC_PKT_NPORT_RJT:
9144 		case FC_PKT_FABRIC_RJT:
9145 		case FC_PKT_NPORT_BSY:
9146 		case FC_PKT_FABRIC_BSY:
9147 		default:
9148 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9149 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9150 			    "FC Status 0x%x, reason 0x%x",
9151 			    fpkt->pkt_state, fpkt->pkt_reason);
9152 			pkt->pkt_reason = CMD_TRAN_ERR;
9153 			pkt->pkt_statistics |= STAT_ABORTED;
9154 			break;
9155 		}
9156 
9157 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9158 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9159 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9160 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9161 		    fpkt->pkt_reason);
9162 	}
9163 
9164 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9165 }
9166 
9167 
9168 static int
9169 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9170 {
9171 	if (rsp->reserved_0 || rsp->reserved_1 ||
9172 	    rsp->fcp_u.fcp_status.reserved_0 ||
9173 	    rsp->fcp_u.fcp_status.reserved_1) {
9174 		/*
9175 		 * These reserved fields should ideally be zero. FCP-2 does say
9176 		 * that the recipient need not check for reserved fields to be
9177 		 * zero. If they are not zero, we will not make a fuss about it
9178 		 * - just log it (in debug to both trace buffer and messages
9179 		 * file and to trace buffer only in non-debug) and move on.
9180 		 *
9181 		 * Non-zero reserved fields were seen with minnows.
9182 		 *
9183 		 * qlc takes care of some of this but we cannot assume that all
9184 		 * FCAs will do so.
9185 		 */
9186 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9187 		    FCP_BUF_LEVEL_5, 0,
9188 		    "Got fcp response packet with non-zero reserved fields "
9189 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9190 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9191 		    rsp->reserved_0, rsp->reserved_1,
9192 		    rsp->fcp_u.fcp_status.reserved_0,
9193 		    rsp->fcp_u.fcp_status.reserved_1);
9194 	}
9195 
9196 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9197 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9198 		return (FC_FAILURE);
9199 	}
9200 
9201 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9202 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9203 	    sizeof (struct fcp_rsp))) {
9204 		return (FC_FAILURE);
9205 	}
9206 
9207 	return (FC_SUCCESS);
9208 }
9209 
9210 
9211 /*
9212  * This is called when there is a change the in device state. The case we're
9213  * handling here is, if the d_id s does not match, offline this tgt and online
9214  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9215  * port_mutex held.
9216  */
9217 static int
9218 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9219     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9220 {
9221 	ASSERT(mutex_owned(&pptr->port_mutex));
9222 
9223 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9224 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9225 	    "Starting fcp_device_changed...");
9226 
9227 	/*
9228 	 * The two cases where the port_device_changed is called is
9229 	 * either it changes it's d_id or it's hard address.
9230 	 */
9231 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9232 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9233 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9234 
9235 		/* offline this target */
9236 		mutex_enter(&ptgt->tgt_mutex);
9237 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9238 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9239 			    0, 1, NDI_DEVI_REMOVE);
9240 		}
9241 		mutex_exit(&ptgt->tgt_mutex);
9242 
9243 		fcp_log(CE_NOTE, pptr->port_dip,
9244 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9245 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9246 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9247 		    map_entry->map_hard_addr.hard_addr);
9248 	}
9249 
9250 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9251 	    link_cnt, tgt_cnt, cause));
9252 }
9253 
9254 /*
9255  *     Function: fcp_alloc_lun
9256  *
9257  *  Description: Creates a new lun structure and adds it to the list
9258  *		 of luns of the target.
9259  *
9260  *     Argument: ptgt		Target the lun will belong to.
9261  *
9262  * Return Value: NULL		Failed
9263  *		 Not NULL	Succeeded
9264  *
9265  *	Context: Kernel context
9266  */
9267 static struct fcp_lun *
9268 fcp_alloc_lun(struct fcp_tgt *ptgt)
9269 {
9270 	struct fcp_lun *plun;
9271 
9272 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9273 	if (plun != NULL) {
9274 		/*
9275 		 * Initialize the mutex before putting in the target list
9276 		 * especially before releasing the target mutex.
9277 		 */
9278 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9279 		plun->lun_tgt = ptgt;
9280 
9281 		mutex_enter(&ptgt->tgt_mutex);
9282 		plun->lun_next = ptgt->tgt_lun;
9283 		ptgt->tgt_lun = plun;
9284 		plun->lun_old_guid = NULL;
9285 		plun->lun_old_guid_size = 0;
9286 		mutex_exit(&ptgt->tgt_mutex);
9287 	}
9288 
9289 	return (plun);
9290 }
9291 
9292 /*
9293  *     Function: fcp_dealloc_lun
9294  *
9295  *  Description: Frees the LUN structure passed by the caller.
9296  *
9297  *     Argument: plun		LUN structure to free.
9298  *
9299  * Return Value: None
9300  *
9301  *	Context: Kernel context.
9302  */
9303 static void
9304 fcp_dealloc_lun(struct fcp_lun *plun)
9305 {
9306 	mutex_enter(&plun->lun_mutex);
9307 	if (plun->lun_cip) {
9308 		fcp_remove_child(plun);
9309 	}
9310 	mutex_exit(&plun->lun_mutex);
9311 
9312 	mutex_destroy(&plun->lun_mutex);
9313 	if (plun->lun_guid) {
9314 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9315 	}
9316 	if (plun->lun_old_guid) {
9317 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9318 	}
9319 	kmem_free(plun, sizeof (*plun));
9320 }
9321 
9322 /*
9323  *     Function: fcp_alloc_tgt
9324  *
9325  *  Description: Creates a new target structure and adds it to the port
9326  *		 hash list.
9327  *
9328  *     Argument: pptr		fcp port structure
9329  *		 *map_entry	entry describing the target to create
9330  *		 link_cnt	Link state change counter
9331  *
9332  * Return Value: NULL		Failed
9333  *		 Not NULL	Succeeded
9334  *
9335  *	Context: Kernel context.
9336  */
9337 static struct fcp_tgt *
9338 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9339 {
9340 	int			hash;
9341 	uchar_t			*wwn;
9342 	struct fcp_tgt	*ptgt;
9343 
9344 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9345 	if (ptgt != NULL) {
9346 		mutex_enter(&pptr->port_mutex);
9347 		if (link_cnt != pptr->port_link_cnt) {
9348 			/*
9349 			 * oh oh -- another link reset
9350 			 * in progress -- give up
9351 			 */
9352 			mutex_exit(&pptr->port_mutex);
9353 			kmem_free(ptgt, sizeof (*ptgt));
9354 			ptgt = NULL;
9355 		} else {
9356 			/*
9357 			 * initialize the mutex before putting in the port
9358 			 * wwn list, especially before releasing the port
9359 			 * mutex.
9360 			 */
9361 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9362 
9363 			/* add new target entry to the port's hash list */
9364 			wwn = (uchar_t *)&map_entry->map_pwwn;
9365 			hash = FCP_HASH(wwn);
9366 
9367 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9368 			pptr->port_tgt_hash_table[hash] = ptgt;
9369 
9370 			/* save cross-ptr */
9371 			ptgt->tgt_port = pptr;
9372 
9373 			ptgt->tgt_change_cnt = 1;
9374 
9375 			/* initialize the target manual_config_only flag */
9376 			if (fcp_enable_auto_configuration) {
9377 				ptgt->tgt_manual_config_only = 0;
9378 			} else {
9379 				ptgt->tgt_manual_config_only = 1;
9380 			}
9381 
9382 			mutex_exit(&pptr->port_mutex);
9383 		}
9384 	}
9385 
9386 	return (ptgt);
9387 }
9388 
9389 /*
9390  *     Function: fcp_dealloc_tgt
9391  *
9392  *  Description: Frees the target structure passed by the caller.
9393  *
9394  *     Argument: ptgt		Target structure to free.
9395  *
9396  * Return Value: None
9397  *
9398  *	Context: Kernel context.
9399  */
9400 static void
9401 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9402 {
9403 	mutex_destroy(&ptgt->tgt_mutex);
9404 	kmem_free(ptgt, sizeof (*ptgt));
9405 }
9406 
9407 
9408 /*
9409  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9410  *
9411  *	Device discovery commands will not be retried for-ever as
9412  *	this will have repercussions on other devices that need to
9413  *	be submitted to the hotplug thread. After a quick glance
9414  *	at the SCSI-3 spec, it was found that the spec doesn't
9415  *	mandate a forever retry, rather recommends a delayed retry.
9416  *
9417  *	Since Photon IB is single threaded, STATUS_BUSY is common
9418  *	in a 4+initiator environment. Make sure the total time
9419  *	spent on retries (including command timeout) does not
9420  *	60 seconds
9421  */
9422 static void
9423 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9424 {
9425 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9426 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9427 
9428 	mutex_enter(&pptr->port_mutex);
9429 	mutex_enter(&ptgt->tgt_mutex);
9430 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9431 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9432 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9433 		    "fcp_queue_ipkt,1:state change occured"
9434 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9435 		mutex_exit(&ptgt->tgt_mutex);
9436 		mutex_exit(&pptr->port_mutex);
9437 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9438 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9439 		fcp_icmd_free(pptr, icmd);
9440 		return;
9441 	}
9442 	mutex_exit(&ptgt->tgt_mutex);
9443 
9444 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9445 
9446 	if (pptr->port_ipkt_list != NULL) {
9447 		/* add pkt to front of doubly-linked list */
9448 		pptr->port_ipkt_list->ipkt_prev = icmd;
9449 		icmd->ipkt_next = pptr->port_ipkt_list;
9450 		pptr->port_ipkt_list = icmd;
9451 		icmd->ipkt_prev = NULL;
9452 	} else {
9453 		/* this is the first/only pkt on the list */
9454 		pptr->port_ipkt_list = icmd;
9455 		icmd->ipkt_next = NULL;
9456 		icmd->ipkt_prev = NULL;
9457 	}
9458 	mutex_exit(&pptr->port_mutex);
9459 }
9460 
9461 /*
9462  *     Function: fcp_transport
9463  *
9464  *  Description: This function submits the Fibre Channel packet to the transort
9465  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9466  *		 fails the submission, the treatment depends on the value of
9467  *		 the variable internal.
9468  *
9469  *     Argument: port_handle	fp/fctl port handle.
9470  *		 *fpkt		Packet to submit to the transport layer.
9471  *		 internal	Not zero when it's an internal packet.
9472  *
9473  * Return Value: FC_TRAN_BUSY
9474  *		 FC_STATEC_BUSY
9475  *		 FC_OFFLINE
9476  *		 FC_LOGINREQ
9477  *		 FC_DEVICE_BUSY
9478  *		 FC_SUCCESS
9479  */
9480 static int
9481 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9482 {
9483 	int	rval;
9484 
9485 	rval = fc_ulp_transport(port_handle, fpkt);
9486 	if (rval == FC_SUCCESS) {
9487 		return (rval);
9488 	}
9489 
9490 	/*
9491 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9492 	 * a command, if the underlying modules see that there is a state
9493 	 * change, or if a port is OFFLINE, that means, that state change
9494 	 * hasn't reached FCP yet, so re-queue the command for deferred
9495 	 * submission.
9496 	 */
9497 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9498 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9499 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9500 		/*
9501 		 * Defer packet re-submission. Life hang is possible on
9502 		 * internal commands if the port driver sends FC_STATEC_BUSY
9503 		 * for ever, but that shouldn't happen in a good environment.
9504 		 * Limiting re-transport for internal commands is probably a
9505 		 * good idea..
9506 		 * A race condition can happen when a port sees barrage of
9507 		 * link transitions offline to online. If the FCTL has
9508 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9509 		 * internal commands should be queued to do the discovery.
9510 		 * The race condition is when an online comes and FCP starts
9511 		 * its internal discovery and the link goes offline. It is
9512 		 * possible that the statec_callback has not reached FCP
9513 		 * and FCP is carrying on with its internal discovery.
9514 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9515 		 * that the link has gone offline. At this point FCP should
9516 		 * drop all the internal commands and wait for the
9517 		 * statec_callback. It will be facilitated by incrementing
9518 		 * port_link_cnt.
9519 		 *
9520 		 * For external commands, the (FC)pkt_timeout is decremented
9521 		 * by the QUEUE Delay added by our driver, Care is taken to
9522 		 * ensure that it doesn't become zero (zero means no timeout)
9523 		 * If the time expires right inside driver queue itself,
9524 		 * the watch thread will return it to the original caller
9525 		 * indicating that the command has timed-out.
9526 		 */
9527 		if (internal) {
9528 			char			*op;
9529 			struct fcp_ipkt	*icmd;
9530 
9531 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9532 			switch (icmd->ipkt_opcode) {
9533 			case SCMD_REPORT_LUN:
9534 				op = "REPORT LUN";
9535 				break;
9536 
9537 			case SCMD_INQUIRY:
9538 				op = "INQUIRY";
9539 				break;
9540 
9541 			case SCMD_INQUIRY_PAGE83:
9542 				op = "INQUIRY-83";
9543 				break;
9544 
9545 			default:
9546 				op = "Internal SCSI COMMAND";
9547 				break;
9548 			}
9549 
9550 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9551 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9552 				rval = FC_SUCCESS;
9553 			}
9554 		} else {
9555 			struct fcp_pkt *cmd;
9556 			struct fcp_port *pptr;
9557 
9558 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9559 			cmd->cmd_state = FCP_PKT_IDLE;
9560 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9561 
9562 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9563 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9564 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9565 				    "fcp_transport: xport busy for pkt %p",
9566 				    cmd->cmd_pkt);
9567 				rval = FC_TRAN_BUSY;
9568 			} else {
9569 				fcp_queue_pkt(pptr, cmd);
9570 				rval = FC_SUCCESS;
9571 			}
9572 		}
9573 	}
9574 
9575 	return (rval);
9576 }
9577 
9578 /*VARARGS3*/
9579 static void
9580 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9581 {
9582 	char		buf[256];
9583 	va_list		ap;
9584 
9585 	if (dip == NULL) {
9586 		dip = fcp_global_dip;
9587 	}
9588 
9589 	va_start(ap, fmt);
9590 	(void) vsprintf(buf, fmt, ap);
9591 	va_end(ap);
9592 
9593 	scsi_log(dip, "fcp", level, buf);
9594 }
9595 
9596 /*
9597  * This function retries NS registry of FC4 type.
9598  * It assumes that fcp_mutex is held.
9599  * The function does nothing if topology is not fabric
9600  * So, the topology has to be set before this function can be called
9601  */
9602 static void
9603 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9604 {
9605 	int	rval;
9606 
9607 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9608 
9609 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9610 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9611 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9612 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9613 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9614 		}
9615 		return;
9616 	}
9617 	mutex_exit(&pptr->port_mutex);
9618 	rval = fcp_do_ns_registry(pptr, s_id);
9619 	mutex_enter(&pptr->port_mutex);
9620 
9621 	if (rval == 0) {
9622 		/* Registry successful. Reset flag */
9623 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9624 	}
9625 }
9626 
9627 /*
9628  * This function registers the ULP with the switch by calling transport i/f
9629  */
9630 static int
9631 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9632 {
9633 	fc_ns_cmd_t		ns_cmd;
9634 	ns_rfc_type_t		rfc;
9635 	uint32_t		types[8];
9636 
9637 	/*
9638 	 * Prepare the Name server structure to
9639 	 * register with the transport in case of
9640 	 * Fabric configuration.
9641 	 */
9642 	bzero(&rfc, sizeof (rfc));
9643 	bzero(types, sizeof (types));
9644 
9645 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9646 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9647 
9648 	rfc.rfc_port_id.port_id = s_id;
9649 	bcopy(types, rfc.rfc_types, sizeof (types));
9650 
9651 	ns_cmd.ns_flags = 0;
9652 	ns_cmd.ns_cmd = NS_RFT_ID;
9653 	ns_cmd.ns_req_len = sizeof (rfc);
9654 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9655 	ns_cmd.ns_resp_len = 0;
9656 	ns_cmd.ns_resp_payload = NULL;
9657 
9658 	/*
9659 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9660 	 */
9661 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9662 		fcp_log(CE_WARN, pptr->port_dip,
9663 		    "!ns_registry: failed name server registration");
9664 		return (1);
9665 	}
9666 
9667 	return (0);
9668 }
9669 
9670 /*
9671  *     Function: fcp_handle_port_attach
9672  *
9673  *  Description: This function is called from fcp_port_attach() to attach a
9674  *		 new port. This routine does the following:
9675  *
9676  *		1) Allocates an fcp_port structure and initializes it.
9677  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9678  *		   server.
9679  *		3) Kicks off the enumeration of the targets/luns visible
9680  *		   through this new port.  That is done by calling
9681  *		   fcp_statec_callback() if the port is online.
9682  *
9683  *     Argument: ulph		fp/fctl port handle.
9684  *		 *pinfo		Port information.
9685  *		 s_id		Port ID.
9686  *		 instance	Device instance number for the local port
9687  *				(returned by ddi_get_instance()).
9688  *
9689  * Return Value: DDI_SUCCESS
9690  *		 DDI_FAILURE
9691  *
9692  *	Context: User and Kernel context.
9693  */
9694 /*ARGSUSED*/
9695 int
9696 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9697     uint32_t s_id, int instance)
9698 {
9699 	int			res = DDI_FAILURE;
9700 	scsi_hba_tran_t		*tran;
9701 	int			mutex_initted = FALSE;
9702 	int			hba_attached = FALSE;
9703 	int			soft_state_linked = FALSE;
9704 	int			event_bind = FALSE;
9705 	struct fcp_port		*pptr;
9706 	fc_portmap_t		*tmp_list = NULL;
9707 	uint32_t		max_cnt, alloc_cnt;
9708 	uchar_t			*boot_wwn = NULL;
9709 	uint_t			nbytes;
9710 	int			manual_cfg;
9711 
9712 	/*
9713 	 * this port instance attaching for the first time (or after
9714 	 * being detached before)
9715 	 */
9716 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9717 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9718 
9719 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9720 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9721 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9722 		    instance);
9723 		return (res);
9724 	}
9725 
9726 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9727 		/* this shouldn't happen */
9728 		ddi_soft_state_free(fcp_softstate, instance);
9729 		cmn_err(CE_WARN, "fcp: bad soft state");
9730 		return (res);
9731 	}
9732 
9733 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9734 
9735 	/*
9736 	 * Make a copy of ulp_port_info as fctl allocates
9737 	 * a temp struct.
9738 	 */
9739 	(void) fcp_cp_pinfo(pptr, pinfo);
9740 
9741 	/*
9742 	 * Check for manual_configuration_only property.
9743 	 * Enable manual configurtion if the property is
9744 	 * set to 1, otherwise disable manual configuration.
9745 	 */
9746 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9747 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9748 	    MANUAL_CFG_ONLY,
9749 	    -1)) != -1) {
9750 		if (manual_cfg == 1) {
9751 			char	*pathname;
9752 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9753 			(void) ddi_pathname(pptr->port_dip, pathname);
9754 			cmn_err(CE_NOTE,
9755 			    "%s (%s%d) %s is enabled via %s.conf.",
9756 			    pathname,
9757 			    ddi_driver_name(pptr->port_dip),
9758 			    ddi_get_instance(pptr->port_dip),
9759 			    MANUAL_CFG_ONLY,
9760 			    ddi_driver_name(pptr->port_dip));
9761 			fcp_enable_auto_configuration = 0;
9762 			kmem_free(pathname, MAXPATHLEN);
9763 		}
9764 	}
9765 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9766 	pptr->port_link_cnt = 1;
9767 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt))
9768 	pptr->port_id = s_id;
9769 	pptr->port_instance = instance;
9770 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state))
9771 	pptr->port_state = FCP_STATE_INIT;
9772 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state))
9773 
9774 	pptr->port_dmacookie_sz = (pptr->port_data_dma_attr.dma_attr_sgllen *
9775 	    sizeof (ddi_dma_cookie_t));
9776 
9777 	/*
9778 	 * The two mutexes of fcp_port are initialized.	 The variable
9779 	 * mutex_initted is incremented to remember that fact.	That variable
9780 	 * is checked when the routine fails and the mutexes have to be
9781 	 * destroyed.
9782 	 */
9783 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9784 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9785 	mutex_initted++;
9786 
9787 	/*
9788 	 * The SCSI tran structure is allocate and initialized now.
9789 	 */
9790 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9791 		fcp_log(CE_WARN, pptr->port_dip,
9792 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9793 		goto fail;
9794 	}
9795 
9796 	/* link in the transport structure then fill it in */
9797 	pptr->port_tran = tran;
9798 	tran->tran_hba_private		= pptr;
9799 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9800 	tran->tran_tgt_probe		= NULL;
9801 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9802 	tran->tran_start		= fcp_scsi_start;
9803 	tran->tran_reset		= fcp_scsi_reset;
9804 	tran->tran_abort		= fcp_scsi_abort;
9805 	tran->tran_getcap		= fcp_scsi_getcap;
9806 	tran->tran_setcap		= fcp_scsi_setcap;
9807 	tran->tran_init_pkt		= NULL;
9808 	tran->tran_destroy_pkt		= NULL;
9809 	tran->tran_dmafree		= NULL;
9810 	tran->tran_sync_pkt		= NULL;
9811 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9812 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9813 	tran->tran_get_name		= fcp_scsi_get_name;
9814 	tran->tran_clear_aca		= NULL;
9815 	tran->tran_clear_task_set	= NULL;
9816 	tran->tran_terminate_task	= NULL;
9817 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9818 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9819 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9820 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9821 	tran->tran_quiesce		= NULL;
9822 	tran->tran_unquiesce		= NULL;
9823 	tran->tran_bus_reset		= NULL;
9824 	tran->tran_bus_config		= fcp_scsi_bus_config;
9825 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9826 	tran->tran_bus_power		= NULL;
9827 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9828 
9829 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9830 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9831 	tran->tran_setup_pkt		= fcp_pkt_setup;
9832 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9833 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9834 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9835 
9836 	/*
9837 	 * Allocate an ndi event handle
9838 	 */
9839 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9840 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9841 
9842 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9843 	    sizeof (fcp_ndi_event_defs));
9844 
9845 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9846 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9847 
9848 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9849 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9850 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9851 
9852 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9853 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9854 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9855 		goto fail;
9856 	}
9857 	event_bind++;	/* Checked in fail case */
9858 
9859 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9860 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9861 	    != DDI_SUCCESS) {
9862 		fcp_log(CE_WARN, pptr->port_dip,
9863 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9864 		goto fail;
9865 	}
9866 	hba_attached++;	/* Checked in fail case */
9867 
9868 	pptr->port_mpxio = 0;
9869 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9870 	    MDI_SUCCESS) {
9871 		pptr->port_mpxio++;
9872 	}
9873 
9874 	/*
9875 	 * The following code is putting the new port structure in the global
9876 	 * list of ports and, if it is the first port to attach, it start the
9877 	 * fcp_watchdog_tick.
9878 	 *
9879 	 * Why put this new port in the global before we are done attaching it?
9880 	 * We are actually making the structure globally known before we are
9881 	 * done attaching it.  The reason for that is: because of the code that
9882 	 * follows.  At this point the resources to handle the port are
9883 	 * allocated.  This function is now going to do the following:
9884 	 *
9885 	 *   1) It is going to try to register with the name server advertizing
9886 	 *	the new FCP capability of the port.
9887 	 *   2) It is going to play the role of the fp/fctl layer by building
9888 	 *	a list of worlwide names reachable through this port and call
9889 	 *	itself on fcp_statec_callback().  That requires the port to
9890 	 *	be part of the global list.
9891 	 */
9892 	mutex_enter(&fcp_global_mutex);
9893 	if (fcp_port_head == NULL) {
9894 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9895 	}
9896 	pptr->port_next = fcp_port_head;
9897 	fcp_port_head = pptr;
9898 	soft_state_linked++;
9899 
9900 	if (fcp_watchdog_init++ == 0) {
9901 		fcp_watchdog_tick = fcp_watchdog_timeout *
9902 		    drv_usectohz(1000000);
9903 		fcp_watchdog_id = timeout(fcp_watch, NULL,
9904 		    fcp_watchdog_tick);
9905 	}
9906 	mutex_exit(&fcp_global_mutex);
9907 
9908 	/*
9909 	 * Here an attempt is made to register with the name server, the new
9910 	 * FCP capability.  That is done using an RTF_ID to the name server.
9911 	 * It is done synchronously.  The function fcp_do_ns_registry()
9912 	 * doesn't return till the name server responded.
9913 	 * On failures, just ignore it for now and it will get retried during
9914 	 * state change callbacks. We'll set a flag to show this failure
9915 	 */
9916 	if (fcp_do_ns_registry(pptr, s_id)) {
9917 		mutex_enter(&pptr->port_mutex);
9918 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
9919 		mutex_exit(&pptr->port_mutex);
9920 	} else {
9921 		mutex_enter(&pptr->port_mutex);
9922 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9923 		mutex_exit(&pptr->port_mutex);
9924 	}
9925 
9926 	/*
9927 	 * Lookup for boot WWN property
9928 	 */
9929 	if (modrootloaded != 1) {
9930 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
9931 		    ddi_get_parent(pinfo->port_dip),
9932 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
9933 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
9934 		    (nbytes == FC_WWN_SIZE)) {
9935 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
9936 		}
9937 		if (boot_wwn) {
9938 			ddi_prop_free(boot_wwn);
9939 		}
9940 	}
9941 
9942 	/*
9943 	 * Handle various topologies and link states.
9944 	 */
9945 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
9946 	case FC_STATE_OFFLINE:
9947 
9948 		/*
9949 		 * we're attaching a port where the link is offline
9950 		 *
9951 		 * Wait for ONLINE, at which time a state
9952 		 * change will cause a statec_callback
9953 		 *
9954 		 * in the mean time, do not do anything
9955 		 */
9956 		res = DDI_SUCCESS;
9957 		pptr->port_state |= FCP_STATE_OFFLINE;
9958 		break;
9959 
9960 	case FC_STATE_ONLINE: {
9961 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
9962 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
9963 			res = DDI_SUCCESS;
9964 			break;
9965 		}
9966 		/*
9967 		 * discover devices and create nodes (a private
9968 		 * loop or point-to-point)
9969 		 */
9970 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
9971 
9972 		/*
9973 		 * At this point we are going to build a list of all the ports
9974 		 * that	can be reached through this local port.	 It looks like
9975 		 * we cannot handle more than FCP_MAX_DEVICES per local port
9976 		 * (128).
9977 		 */
9978 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
9979 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
9980 		    KM_NOSLEEP)) == NULL) {
9981 			fcp_log(CE_WARN, pptr->port_dip,
9982 			    "!fcp%d: failed to allocate portmap",
9983 			    instance);
9984 			goto fail;
9985 		}
9986 
9987 		/*
9988 		 * fc_ulp_getportmap() is going to provide us with the list of
9989 		 * remote ports in the buffer we just allocated.  The way the
9990 		 * list is going to be retrieved depends on the topology.
9991 		 * However, if we are connected to a Fabric, a name server
9992 		 * request may be sent to get the list of FCP capable ports.
9993 		 * It should be noted that is the case the request is
9994 		 * synchronous.	 This means we are stuck here till the name
9995 		 * server replies.  A lot of things can change during that time
9996 		 * and including, may be, being called on
9997 		 * fcp_statec_callback() for different reasons. I'm not sure
9998 		 * the code can handle that.
9999 		 */
10000 		max_cnt = FCP_MAX_DEVICES;
10001 		alloc_cnt = FCP_MAX_DEVICES;
10002 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10003 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10004 		    FC_SUCCESS) {
10005 			caddr_t msg;
10006 
10007 			(void) fc_ulp_error(res, &msg);
10008 
10009 			/*
10010 			 * this	 just means the transport is
10011 			 * busy perhaps building a portmap so,
10012 			 * for now, succeed this port attach
10013 			 * when the transport has a new map,
10014 			 * it'll send us a state change then
10015 			 */
10016 			fcp_log(CE_WARN, pptr->port_dip,
10017 			    "!failed to get port map : %s", msg);
10018 
10019 			res = DDI_SUCCESS;
10020 			break;	/* go return result */
10021 		}
10022 		if (max_cnt > alloc_cnt) {
10023 			alloc_cnt = max_cnt;
10024 		}
10025 
10026 		/*
10027 		 * We are now going to call fcp_statec_callback() ourselves.
10028 		 * By issuing this call we are trying to kick off the enumera-
10029 		 * tion process.
10030 		 */
10031 		/*
10032 		 * let the state change callback do the SCSI device
10033 		 * discovery and create the devinfos
10034 		 */
10035 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10036 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10037 		    max_cnt, pptr->port_id);
10038 
10039 		res = DDI_SUCCESS;
10040 		break;
10041 	}
10042 
10043 	default:
10044 		/* unknown port state */
10045 		fcp_log(CE_WARN, pptr->port_dip,
10046 		    "!fcp%d: invalid port state at attach=0x%x",
10047 		    instance, pptr->port_phys_state);
10048 
10049 		mutex_enter(&pptr->port_mutex);
10050 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10051 		mutex_exit(&pptr->port_mutex);
10052 
10053 		res = DDI_SUCCESS;
10054 		break;
10055 	}
10056 
10057 	/* free temp list if used */
10058 	if (tmp_list != NULL) {
10059 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10060 	}
10061 
10062 	/* note the attach time */
10063 	pptr->port_attach_time = lbolt64;
10064 
10065 	/* all done */
10066 	return (res);
10067 
10068 	/* a failure we have to clean up after */
10069 fail:
10070 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10071 
10072 	if (soft_state_linked) {
10073 		/* remove this fcp_port from the linked list */
10074 		(void) fcp_soft_state_unlink(pptr);
10075 	}
10076 
10077 	/* unbind and free event set */
10078 	if (pptr->port_ndi_event_hdl) {
10079 		if (event_bind) {
10080 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10081 			    &pptr->port_ndi_events, NDI_SLEEP);
10082 		}
10083 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10084 	}
10085 
10086 	if (pptr->port_ndi_event_defs) {
10087 		(void) kmem_free(pptr->port_ndi_event_defs,
10088 		    sizeof (fcp_ndi_event_defs));
10089 	}
10090 
10091 	/*
10092 	 * Clean up mpxio stuff
10093 	 */
10094 	if (pptr->port_mpxio) {
10095 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10096 		pptr->port_mpxio--;
10097 	}
10098 
10099 	/* undo SCSI HBA setup */
10100 	if (hba_attached) {
10101 		(void) scsi_hba_detach(pptr->port_dip);
10102 	}
10103 	if (pptr->port_tran != NULL) {
10104 		scsi_hba_tran_free(pptr->port_tran);
10105 	}
10106 
10107 	mutex_enter(&fcp_global_mutex);
10108 
10109 	/*
10110 	 * We check soft_state_linked, because it is incremented right before
10111 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10112 	 * soft_state_linked is still FALSE, we do not want to decrement
10113 	 * fcp_watchdog_init or possibly call untimeout.
10114 	 */
10115 
10116 	if (soft_state_linked) {
10117 		if (--fcp_watchdog_init == 0) {
10118 			timeout_id_t	tid = fcp_watchdog_id;
10119 
10120 			mutex_exit(&fcp_global_mutex);
10121 			(void) untimeout(tid);
10122 		} else {
10123 			mutex_exit(&fcp_global_mutex);
10124 		}
10125 	} else {
10126 		mutex_exit(&fcp_global_mutex);
10127 	}
10128 
10129 	if (mutex_initted) {
10130 		mutex_destroy(&pptr->port_mutex);
10131 		mutex_destroy(&pptr->port_pkt_mutex);
10132 	}
10133 
10134 	if (tmp_list != NULL) {
10135 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10136 	}
10137 
10138 	/* this makes pptr invalid */
10139 	ddi_soft_state_free(fcp_softstate, instance);
10140 
10141 	return (DDI_FAILURE);
10142 }
10143 
10144 
10145 static int
10146 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10147 {
10148 	int count = 0;
10149 
10150 	mutex_enter(&pptr->port_mutex);
10151 
10152 	/*
10153 	 * if the port is powered down or suspended, nothing else
10154 	 * to do; just return.
10155 	 */
10156 	if (flag != FCP_STATE_DETACHING) {
10157 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10158 		    FCP_STATE_SUSPENDED)) {
10159 			pptr->port_state |= flag;
10160 			mutex_exit(&pptr->port_mutex);
10161 			return (FC_SUCCESS);
10162 		}
10163 	}
10164 
10165 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10166 		mutex_exit(&pptr->port_mutex);
10167 		return (FC_FAILURE);
10168 	}
10169 
10170 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10171 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10172 	    "fcp_handle_port_detach: port is detaching");
10173 
10174 	pptr->port_state |= flag;
10175 
10176 	/*
10177 	 * Wait for any ongoing reconfig/ipkt to complete, that
10178 	 * ensures the freeing to targets/luns is safe.
10179 	 * No more ref to this port should happen from statec/ioctl
10180 	 * after that as it was removed from the global port list.
10181 	 */
10182 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10183 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10184 		/*
10185 		 * Let's give sufficient time for reconfig/ipkt
10186 		 * to complete.
10187 		 */
10188 		if (count++ >= FCP_ICMD_DEADLINE) {
10189 			break;
10190 		}
10191 		mutex_exit(&pptr->port_mutex);
10192 		delay(drv_usectohz(1000000));
10193 		mutex_enter(&pptr->port_mutex);
10194 	}
10195 
10196 	/*
10197 	 * if the driver is still busy then fail to
10198 	 * suspend/power down.
10199 	 */
10200 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10201 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10202 		pptr->port_state &= ~flag;
10203 		mutex_exit(&pptr->port_mutex);
10204 		return (FC_FAILURE);
10205 	}
10206 
10207 	if (flag == FCP_STATE_DETACHING) {
10208 		pptr = fcp_soft_state_unlink(pptr);
10209 		ASSERT(pptr != NULL);
10210 	}
10211 
10212 	pptr->port_link_cnt++;
10213 	pptr->port_state |= FCP_STATE_OFFLINE;
10214 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10215 
10216 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10217 	    FCP_CAUSE_LINK_DOWN);
10218 	mutex_exit(&pptr->port_mutex);
10219 
10220 	/* kill watch dog timer if we're the last */
10221 	mutex_enter(&fcp_global_mutex);
10222 	if (--fcp_watchdog_init == 0) {
10223 		timeout_id_t	tid = fcp_watchdog_id;
10224 		mutex_exit(&fcp_global_mutex);
10225 		(void) untimeout(tid);
10226 	} else {
10227 		mutex_exit(&fcp_global_mutex);
10228 	}
10229 
10230 	/* clean up the port structures */
10231 	if (flag == FCP_STATE_DETACHING) {
10232 		fcp_cleanup_port(pptr, instance);
10233 	}
10234 
10235 	return (FC_SUCCESS);
10236 }
10237 
10238 
10239 static void
10240 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10241 {
10242 	ASSERT(pptr != NULL);
10243 
10244 	/* unbind and free event set */
10245 	if (pptr->port_ndi_event_hdl) {
10246 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10247 		    &pptr->port_ndi_events, NDI_SLEEP);
10248 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10249 	}
10250 
10251 	if (pptr->port_ndi_event_defs) {
10252 		(void) kmem_free(pptr->port_ndi_event_defs,
10253 		    sizeof (fcp_ndi_event_defs));
10254 	}
10255 
10256 	/* free the lun/target structures and devinfos */
10257 	fcp_free_targets(pptr);
10258 
10259 	/*
10260 	 * Clean up mpxio stuff
10261 	 */
10262 	if (pptr->port_mpxio) {
10263 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10264 		pptr->port_mpxio--;
10265 	}
10266 
10267 	/* clean up SCSA stuff */
10268 	(void) scsi_hba_detach(pptr->port_dip);
10269 	if (pptr->port_tran != NULL) {
10270 		scsi_hba_tran_free(pptr->port_tran);
10271 	}
10272 
10273 #ifdef	KSTATS_CODE
10274 	/* clean up kstats */
10275 	if (pptr->fcp_ksp != NULL) {
10276 		kstat_delete(pptr->fcp_ksp);
10277 	}
10278 #endif
10279 
10280 	/* clean up soft state mutexes/condition variables */
10281 	mutex_destroy(&pptr->port_mutex);
10282 	mutex_destroy(&pptr->port_pkt_mutex);
10283 
10284 	/* all done with soft state */
10285 	ddi_soft_state_free(fcp_softstate, instance);
10286 }
10287 
10288 /*
10289  *     Function: fcp_kmem_cache_constructor
10290  *
10291  *  Description: This function allocates and initializes the resources required
10292  *		 to build a scsi_pkt structure the target driver.  The result
10293  *		 of the allocation and initialization will be cached in the
10294  *		 memory cache.	As DMA resources may be allocated here, that
10295  *		 means DMA resources will be tied up in the cache manager.
10296  *		 This is a tradeoff that has been made for performance reasons.
10297  *
10298  *     Argument: *buf		Memory to preinitialize.
10299  *		 *arg		FCP port structure (fcp_port).
10300  *		 kmflags	Value passed to kmem_cache_alloc() and
10301  *				propagated to the constructor.
10302  *
10303  * Return Value: 0	Allocation/Initialization was successful.
10304  *		 -1	Allocation or Initialization failed.
10305  *
10306  *
10307  * If the returned value is 0, the buffer is initialized like this:
10308  *
10309  *		    +================================+
10310  *	     +----> |	      struct scsi_pkt	     |
10311  *	     |	    |				     |
10312  *	     | +--- | pkt_ha_private		     |
10313  *	     | |    |				     |
10314  *	     | |    +================================+
10315  *	     | |
10316  *	     | |    +================================+
10317  *	     | +--> |	    struct fcp_pkt	     | <---------+
10318  *	     |	    |				     |		 |
10319  *	     +----- | cmd_pkt			     |		 |
10320  *		    |			  cmd_fp_pkt | ---+	 |
10321  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10322  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10323  *	  |    |    |--------------------------------|	  |	 |
10324  *	  |    |    |	      struct fc_packet	     | <--+	 |
10325  *	  |    |    |				     |		 |
10326  *	  |    |    |		     pkt_ulp_private | ----------+
10327  *	  |    |    |		     pkt_fca_private | -----+
10328  *	  |    |    |		     pkt_data_cookie | ---+ |
10329  *	  |    |    | pkt_cmdlen		     |	  | |
10330  *	  |    |(a) | pkt_rsplen		     |	  | |
10331  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10332  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10333  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10334  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10335  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10336  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10337  *		    +================================+	  | |  |   |   |    |
10338  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10339  *		    |				     |	    |  |   |   |    |
10340  *		    +================================+	    |  |   |   |    |
10341  *		    |	      fca_private	     | <----+  |   |   |    |
10342  *		    |				     |	       |   |   |    |
10343  *		    +================================+	       |   |   |    |
10344  *							       |   |   |    |
10345  *							       |   |   |    |
10346  *		    +================================+	 (d)   |   |   |    |
10347  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10348  *		    |				     |		   |   |    |
10349  *		    +================================+		   |   |    |
10350  *								   |   |    |
10351  *		    +================================+	 (d)	   |   |    |
10352  *		    |		fcp_resp	     | <-----------+   |    |
10353  *		    |	(DMA resources associated)   |		       |    |
10354  *		    +================================+		       |    |
10355  *								       |    |
10356  *								       |    |
10357  *								       |    |
10358  *		    +================================+	 (c)	       |    |
10359  *		    |	     fcp_cmd cookies	     | <---------------+    |
10360  *		    |				     |			    |
10361  *		    +================================+			    |
10362  *									    |
10363  *		    +================================+	 (c)		    |
10364  *		    |		 fcp_cmd	     | <--------------------+
10365  *		    |	(DMA resources associated)   |
10366  *		    +================================+
10367  *
10368  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10369  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10370  * (c) Only if DMA is used for the FCP_CMD buffer.
10371  * (d) Only if DMA is used for the FCP_RESP buffer
10372  */
10373 static int
10374 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10375     int kmflags)
10376 {
10377 	struct fcp_pkt	*cmd;
10378 	struct fcp_port	*pptr;
10379 	fc_packet_t	*fpkt;
10380 
10381 	pptr = (struct fcp_port *)tran->tran_hba_private;
10382 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10383 	bzero(cmd, tran->tran_hba_len);
10384 
10385 	cmd->cmd_pkt = pkt;
10386 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10387 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10388 	cmd->cmd_fp_pkt = fpkt;
10389 
10390 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10391 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10392 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10393 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10394 
10395 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10396 	    sizeof (struct fcp_pkt));
10397 
10398 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10399 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10400 
10401 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10402 		/*
10403 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10404 		 * fcp_resp.  The transfer of information will be done by
10405 		 * bcopy.
10406 		 * The naming of the flags (that is actually a value) is
10407 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10408 		 * DMA" but instead "NO DMA".
10409 		 */
10410 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10411 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10412 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10413 	} else {
10414 		/*
10415 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10416 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10417 		 * interfaces.
10418 		 */
10419 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10420 			return (-1);
10421 		}
10422 	}
10423 
10424 	return (0);
10425 }
10426 
10427 /*
10428  *     Function: fcp_kmem_cache_destructor
10429  *
10430  *  Description: Called by the destructor of the cache managed by SCSA.
10431  *		 All the resources pre-allocated in fcp_pkt_constructor
10432  *		 and the data also pre-initialized in fcp_pkt_constructor
10433  *		 are freed and uninitialized here.
10434  *
10435  *     Argument: *buf		Memory to uninitialize.
10436  *		 *arg		FCP port structure (fcp_port).
10437  *
10438  * Return Value: None
10439  *
10440  *	Context: kernel
10441  */
10442 static void
10443 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10444 {
10445 	struct fcp_pkt	*cmd;
10446 	struct fcp_port	*pptr;
10447 
10448 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10449 	cmd = pkt->pkt_ha_private;
10450 
10451 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10452 		/*
10453 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10454 		 * buffer and DMA resources allocated to do so are released.
10455 		 */
10456 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10457 	}
10458 }
10459 
10460 /*
10461  *     Function: fcp_alloc_cmd_resp
10462  *
10463  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10464  *		 will be DMAed by the HBA.  The buffer is allocated applying
10465  *		 the DMA requirements for the HBA.  The buffers allocated will
10466  *		 also be bound.	 DMA resources are allocated in the process.
10467  *		 They will be released by fcp_free_cmd_resp().
10468  *
10469  *     Argument: *pptr	FCP port.
10470  *		 *fpkt	fc packet for which the cmd and resp packet should be
10471  *			allocated.
10472  *		 flags	Allocation flags.
10473  *
10474  * Return Value: FC_FAILURE
10475  *		 FC_SUCCESS
10476  *
10477  *	Context: User or Kernel context only if flags == KM_SLEEP.
10478  *		 Interrupt context if the KM_SLEEP is not specified.
10479  */
10480 static int
10481 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10482 {
10483 	int			rval;
10484 	int			cmd_len;
10485 	int			resp_len;
10486 	ulong_t			real_len;
10487 	int			(*cb) (caddr_t);
10488 	ddi_dma_cookie_t	pkt_cookie;
10489 	ddi_dma_cookie_t	*cp;
10490 	uint32_t		cnt;
10491 
10492 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10493 
10494 	cmd_len = fpkt->pkt_cmdlen;
10495 	resp_len = fpkt->pkt_rsplen;
10496 
10497 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10498 
10499 	/* Allocation of a DMA handle used in subsequent calls. */
10500 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10501 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10502 		return (FC_FAILURE);
10503 	}
10504 
10505 	/* A buffer is allocated that satisfies the DMA requirements. */
10506 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10507 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10508 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10509 
10510 	if (rval != DDI_SUCCESS) {
10511 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10512 		return (FC_FAILURE);
10513 	}
10514 
10515 	if (real_len < cmd_len) {
10516 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10517 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10518 		return (FC_FAILURE);
10519 	}
10520 
10521 	/* The buffer allocated is DMA bound. */
10522 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10523 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10524 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10525 
10526 	if (rval != DDI_DMA_MAPPED) {
10527 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10528 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10529 		return (FC_FAILURE);
10530 	}
10531 
10532 	if (fpkt->pkt_cmd_cookie_cnt >
10533 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10534 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10535 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10536 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10537 		return (FC_FAILURE);
10538 	}
10539 
10540 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10541 
10542 	/*
10543 	 * The buffer where the scatter/gather list is going to be built is
10544 	 * allocated.
10545 	 */
10546 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10547 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10548 	    KM_NOSLEEP);
10549 
10550 	if (cp == NULL) {
10551 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10552 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10553 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10554 		return (FC_FAILURE);
10555 	}
10556 
10557 	/*
10558 	 * The scatter/gather list for the buffer we just allocated is built
10559 	 * here.
10560 	 */
10561 	*cp = pkt_cookie;
10562 	cp++;
10563 
10564 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10565 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10566 		    &pkt_cookie);
10567 		*cp = pkt_cookie;
10568 	}
10569 
10570 	ASSERT(fpkt->pkt_resp_dma == NULL);
10571 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10572 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10573 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10574 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10575 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10576 		return (FC_FAILURE);
10577 	}
10578 
10579 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10580 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10581 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10582 	    &fpkt->pkt_resp_acc);
10583 
10584 	if (rval != DDI_SUCCESS) {
10585 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10586 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10587 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10588 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10589 		kmem_free(fpkt->pkt_cmd_cookie,
10590 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10591 		return (FC_FAILURE);
10592 	}
10593 
10594 	if (real_len < resp_len) {
10595 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10596 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10597 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10598 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10599 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10600 		kmem_free(fpkt->pkt_cmd_cookie,
10601 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10602 		return (FC_FAILURE);
10603 	}
10604 
10605 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10606 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10607 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10608 
10609 	if (rval != DDI_DMA_MAPPED) {
10610 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10611 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10612 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10613 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10614 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10615 		kmem_free(fpkt->pkt_cmd_cookie,
10616 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10617 		return (FC_FAILURE);
10618 	}
10619 
10620 	if (fpkt->pkt_resp_cookie_cnt >
10621 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10622 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10623 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10624 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10625 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10626 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10627 		kmem_free(fpkt->pkt_cmd_cookie,
10628 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10629 		return (FC_FAILURE);
10630 	}
10631 
10632 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10633 
10634 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10635 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10636 	    KM_NOSLEEP);
10637 
10638 	if (cp == NULL) {
10639 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10640 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10641 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10642 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10643 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10644 		kmem_free(fpkt->pkt_cmd_cookie,
10645 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10646 		return (FC_FAILURE);
10647 	}
10648 
10649 	*cp = pkt_cookie;
10650 	cp++;
10651 
10652 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10653 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10654 		    &pkt_cookie);
10655 		*cp = pkt_cookie;
10656 	}
10657 
10658 	return (FC_SUCCESS);
10659 }
10660 
10661 /*
10662  *     Function: fcp_free_cmd_resp
10663  *
10664  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10665  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10666  *		 associated with them.	That includes the DMA resources and the
10667  *		 buffer allocated for the cookies of each one of them.
10668  *
10669  *     Argument: *pptr		FCP port context.
10670  *		 *fpkt		fc packet containing the cmd and resp packet
10671  *				to be released.
10672  *
10673  * Return Value: None
10674  *
10675  *	Context: Interrupt, User and Kernel context.
10676  */
10677 /* ARGSUSED */
10678 static void
10679 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10680 {
10681 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10682 
10683 	if (fpkt->pkt_resp_dma) {
10684 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10685 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10686 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10687 	}
10688 
10689 	if (fpkt->pkt_resp_cookie) {
10690 		kmem_free(fpkt->pkt_resp_cookie,
10691 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10692 		fpkt->pkt_resp_cookie = NULL;
10693 	}
10694 
10695 	if (fpkt->pkt_cmd_dma) {
10696 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10697 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10698 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10699 	}
10700 
10701 	if (fpkt->pkt_cmd_cookie) {
10702 		kmem_free(fpkt->pkt_cmd_cookie,
10703 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10704 		fpkt->pkt_cmd_cookie = NULL;
10705 	}
10706 }
10707 
10708 
10709 /*
10710  * called by the transport to do our own target initialization
10711  *
10712  * can acquire and release the global mutex
10713  */
10714 /* ARGSUSED */
10715 static int
10716 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10717     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10718 {
10719 	uchar_t			*bytes;
10720 	uint_t			nbytes;
10721 	uint16_t		lun_num;
10722 	struct fcp_tgt	*ptgt;
10723 	struct fcp_lun	*plun;
10724 	struct fcp_port	*pptr = (struct fcp_port *)
10725 	    hba_tran->tran_hba_private;
10726 
10727 	ASSERT(pptr != NULL);
10728 
10729 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10730 	    FCP_BUF_LEVEL_8, 0,
10731 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10732 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10733 
10734 	/* get our port WWN property */
10735 	bytes = NULL;
10736 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10737 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10738 	    (nbytes != FC_WWN_SIZE)) {
10739 		/* no port WWN property */
10740 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10741 		    FCP_BUF_LEVEL_8, 0,
10742 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10743 		    " for %s (instance %d): bytes=%p nbytes=%x",
10744 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10745 		    nbytes);
10746 
10747 		if (bytes != NULL) {
10748 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10749 		}
10750 
10751 		return (DDI_NOT_WELL_FORMED);
10752 	}
10753 	ASSERT(bytes != NULL);
10754 
10755 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10756 	    LUN_PROP, 0xFFFF);
10757 	if (lun_num == 0xFFFF) {
10758 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10759 		    FCP_BUF_LEVEL_8, 0,
10760 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10761 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10762 		    ddi_get_instance(tgt_dip));
10763 
10764 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10765 		return (DDI_NOT_WELL_FORMED);
10766 	}
10767 
10768 	mutex_enter(&pptr->port_mutex);
10769 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10770 		mutex_exit(&pptr->port_mutex);
10771 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10772 		    FCP_BUF_LEVEL_8, 0,
10773 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10774 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10775 		    ddi_get_instance(tgt_dip));
10776 
10777 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10778 		return (DDI_FAILURE);
10779 	}
10780 
10781 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10782 	    FC_WWN_SIZE) == 0);
10783 	ASSERT(plun->lun_num == lun_num);
10784 
10785 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10786 
10787 	ptgt = plun->lun_tgt;
10788 
10789 	mutex_enter(&ptgt->tgt_mutex);
10790 	plun->lun_tgt_count++;
10791 	scsi_device_hba_private_set(sd, plun);
10792 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10793 	plun->lun_sd = sd;
10794 	mutex_exit(&ptgt->tgt_mutex);
10795 	mutex_exit(&pptr->port_mutex);
10796 
10797 	return (DDI_SUCCESS);
10798 }
10799 
10800 /*ARGSUSED*/
10801 static int
10802 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10803     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10804 {
10805 	uchar_t			*bytes;
10806 	uint_t			nbytes;
10807 	uint16_t		lun_num;
10808 	struct fcp_tgt	*ptgt;
10809 	struct fcp_lun	*plun;
10810 	struct fcp_port	*pptr = (struct fcp_port *)
10811 	    hba_tran->tran_hba_private;
10812 	child_info_t		*cip;
10813 
10814 	ASSERT(pptr != NULL);
10815 
10816 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10817 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10818 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10819 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10820 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10821 
10822 	cip = (child_info_t *)sd->sd_pathinfo;
10823 	if (cip == NULL) {
10824 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10825 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10826 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10827 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10828 		    ddi_get_instance(tgt_dip));
10829 
10830 		return (DDI_NOT_WELL_FORMED);
10831 	}
10832 
10833 	/* get our port WWN property */
10834 	bytes = NULL;
10835 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10836 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10837 	    (nbytes != FC_WWN_SIZE)) {
10838 		if (bytes) {
10839 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10840 		}
10841 		return (DDI_NOT_WELL_FORMED);
10842 	}
10843 
10844 	ASSERT(bytes != NULL);
10845 
10846 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10847 	    LUN_PROP, 0xFFFF);
10848 	if (lun_num == 0xFFFF) {
10849 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10850 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10851 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10852 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10853 		    ddi_get_instance(tgt_dip));
10854 
10855 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10856 		return (DDI_NOT_WELL_FORMED);
10857 	}
10858 
10859 	mutex_enter(&pptr->port_mutex);
10860 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10861 		mutex_exit(&pptr->port_mutex);
10862 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10863 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10864 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10865 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10866 		    ddi_get_instance(tgt_dip));
10867 
10868 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10869 		return (DDI_FAILURE);
10870 	}
10871 
10872 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10873 	    FC_WWN_SIZE) == 0);
10874 	ASSERT(plun->lun_num == lun_num);
10875 
10876 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10877 
10878 	ptgt = plun->lun_tgt;
10879 
10880 	mutex_enter(&ptgt->tgt_mutex);
10881 	plun->lun_tgt_count++;
10882 	scsi_device_hba_private_set(sd, plun);
10883 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10884 	plun->lun_sd = sd;
10885 	mutex_exit(&ptgt->tgt_mutex);
10886 	mutex_exit(&pptr->port_mutex);
10887 
10888 	return (DDI_SUCCESS);
10889 }
10890 
10891 
10892 /*
10893  * called by the transport to do our own target initialization
10894  *
10895  * can acquire and release the global mutex
10896  */
10897 /* ARGSUSED */
10898 static int
10899 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10900     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10901 {
10902 	struct fcp_port	*pptr = (struct fcp_port *)
10903 	    hba_tran->tran_hba_private;
10904 	int			rval;
10905 
10906 	ASSERT(pptr != NULL);
10907 
10908 	/*
10909 	 * Child node is getting initialized.  Look at the mpxio component
10910 	 * type on the child device to see if this device is mpxio managed
10911 	 * or not.
10912 	 */
10913 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
10914 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10915 	} else {
10916 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
10917 	}
10918 
10919 	return (rval);
10920 }
10921 
10922 
10923 /* ARGSUSED */
10924 static void
10925 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10926     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10927 {
10928 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
10929 	struct fcp_tgt	*ptgt;
10930 
10931 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
10932 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10933 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
10934 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
10935 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10936 
10937 	if (plun == NULL) {
10938 		return;
10939 	}
10940 	ptgt = plun->lun_tgt;
10941 
10942 	ASSERT(ptgt != NULL);
10943 
10944 	mutex_enter(&ptgt->tgt_mutex);
10945 	ASSERT(plun->lun_tgt_count > 0);
10946 
10947 	if (--plun->lun_tgt_count == 0) {
10948 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
10949 	}
10950 	plun->lun_sd = NULL;
10951 	mutex_exit(&ptgt->tgt_mutex);
10952 }
10953 
10954 /*
10955  *     Function: fcp_scsi_start
10956  *
10957  *  Description: This function is called by the target driver to request a
10958  *		 command to be sent.
10959  *
10960  *     Argument: *ap		SCSI address of the device.
10961  *		 *pkt		SCSI packet containing the cmd to send.
10962  *
10963  * Return Value: TRAN_ACCEPT
10964  *		 TRAN_BUSY
10965  *		 TRAN_BADPKT
10966  *		 TRAN_FATAL_ERROR
10967  */
10968 static int
10969 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
10970 {
10971 	struct fcp_port	*pptr = ADDR2FCP(ap);
10972 	struct fcp_lun	*plun = ADDR2LUN(ap);
10973 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
10974 	struct fcp_tgt	*ptgt = plun->lun_tgt;
10975 	int			rval;
10976 
10977 	/* ensure command isn't already issued */
10978 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
10979 
10980 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10981 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
10982 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
10983 
10984 	/*
10985 	 * It is strange that we enter the fcp_port mutex and the target
10986 	 * mutex to check the lun state (which has a mutex of its own).
10987 	 */
10988 	mutex_enter(&pptr->port_mutex);
10989 	mutex_enter(&ptgt->tgt_mutex);
10990 
10991 	/*
10992 	 * If the device is offline and is not in the process of coming
10993 	 * online, fail the request.
10994 	 */
10995 
10996 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
10997 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
10998 		mutex_exit(&ptgt->tgt_mutex);
10999 		mutex_exit(&pptr->port_mutex);
11000 
11001 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11002 			pkt->pkt_reason = CMD_DEV_GONE;
11003 		}
11004 
11005 		return (TRAN_FATAL_ERROR);
11006 	}
11007 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11008 
11009 	/*
11010 	 * If we are suspended, kernel is trying to dump, so don't
11011 	 * block, fail or defer requests - send them down right away.
11012 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11013 	 * assume we have been suspended.  There is hardware such as
11014 	 * the v880 that doesn't do PM.	 Thus, the check for
11015 	 * ddi_in_panic.
11016 	 *
11017 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11018 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11019 	 * either the device will have gone away or changed and we can fail
11020 	 * the request, or we can proceed if the device didn't change.
11021 	 *
11022 	 * If the pd in the target or the packet is NULL it's probably
11023 	 * because the device has gone away, we allow the request to be
11024 	 * put on the internal queue here in case the device comes back within
11025 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11026 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11027 	 * could be NULL because the device was disappearing during or since
11028 	 * packet initialization.
11029 	 */
11030 
11031 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11032 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11033 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11034 	    (ptgt->tgt_pd_handle == NULL) ||
11035 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11036 		/*
11037 		 * If ((LUN is busy AND
11038 		 *	LUN not suspended AND
11039 		 *	The system is not in panic state) OR
11040 		 *	(The port is coming up))
11041 		 *
11042 		 * We check to see if the any of the flags FLAG_NOINTR or
11043 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11044 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11045 		 */
11046 		mutex_exit(&ptgt->tgt_mutex);
11047 		mutex_exit(&pptr->port_mutex);
11048 
11049 		/* see if using interrupts is allowed (so queueing'll work) */
11050 		if (pkt->pkt_flags & FLAG_NOINTR) {
11051 			pkt->pkt_resid = 0;
11052 			return (TRAN_BUSY);
11053 		}
11054 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11055 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11056 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11057 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11058 			return (TRAN_BUSY);
11059 		}
11060 #ifdef	DEBUG
11061 		mutex_enter(&pptr->port_pkt_mutex);
11062 		pptr->port_npkts++;
11063 		mutex_exit(&pptr->port_pkt_mutex);
11064 #endif /* DEBUG */
11065 
11066 		/* got queue up the pkt for later */
11067 		fcp_queue_pkt(pptr, cmd);
11068 		return (TRAN_ACCEPT);
11069 	}
11070 	cmd->cmd_state = FCP_PKT_ISSUED;
11071 
11072 	mutex_exit(&ptgt->tgt_mutex);
11073 	mutex_exit(&pptr->port_mutex);
11074 
11075 	/*
11076 	 * Now that we released the mutexes, what was protected by them can
11077 	 * change.
11078 	 */
11079 
11080 	/*
11081 	 * If there is a reconfiguration in progress, wait for it to complete.
11082 	 */
11083 	fcp_reconfig_wait(pptr);
11084 
11085 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11086 	    pkt->pkt_time : 0;
11087 
11088 	/* prepare the packet */
11089 
11090 	fcp_prepare_pkt(pptr, cmd, plun);
11091 
11092 	if (cmd->cmd_pkt->pkt_time) {
11093 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11094 	} else {
11095 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11096 	}
11097 
11098 	/*
11099 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11100 	 * have to do polled I/O
11101 	 */
11102 	if (pkt->pkt_flags & FLAG_NOINTR) {
11103 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11104 		return (fcp_dopoll(pptr, cmd));
11105 	}
11106 
11107 #ifdef	DEBUG
11108 	mutex_enter(&pptr->port_pkt_mutex);
11109 	pptr->port_npkts++;
11110 	mutex_exit(&pptr->port_pkt_mutex);
11111 #endif /* DEBUG */
11112 
11113 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11114 	if (rval == FC_SUCCESS) {
11115 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11116 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11117 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11118 		return (TRAN_ACCEPT);
11119 	}
11120 
11121 	cmd->cmd_state = FCP_PKT_IDLE;
11122 
11123 #ifdef	DEBUG
11124 	mutex_enter(&pptr->port_pkt_mutex);
11125 	pptr->port_npkts--;
11126 	mutex_exit(&pptr->port_pkt_mutex);
11127 #endif /* DEBUG */
11128 
11129 	/*
11130 	 * For lack of clearer definitions, choose
11131 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11132 	 */
11133 
11134 	if (rval == FC_TRAN_BUSY) {
11135 		pkt->pkt_resid = 0;
11136 		rval = TRAN_BUSY;
11137 	} else {
11138 		mutex_enter(&ptgt->tgt_mutex);
11139 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11140 			child_info_t	*cip;
11141 
11142 			mutex_enter(&plun->lun_mutex);
11143 			cip = plun->lun_cip;
11144 			mutex_exit(&plun->lun_mutex);
11145 
11146 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11147 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11148 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11149 			    plun->lun_tgt->tgt_d_id, rval, cip);
11150 
11151 			rval = TRAN_FATAL_ERROR;
11152 		} else {
11153 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11154 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11155 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11156 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11157 				    pkt);
11158 				rval = TRAN_BUSY;
11159 			} else {
11160 				rval = TRAN_ACCEPT;
11161 				fcp_queue_pkt(pptr, cmd);
11162 			}
11163 		}
11164 		mutex_exit(&ptgt->tgt_mutex);
11165 	}
11166 
11167 	return (rval);
11168 }
11169 
11170 /*
11171  * called by the transport to abort a packet
11172  */
11173 /*ARGSUSED*/
11174 static int
11175 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11176 {
11177 	int tgt_cnt;
11178 	struct fcp_port		*pptr = ADDR2FCP(ap);
11179 	struct fcp_lun	*plun = ADDR2LUN(ap);
11180 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11181 
11182 	if (pkt == NULL) {
11183 		if (ptgt) {
11184 			mutex_enter(&ptgt->tgt_mutex);
11185 			tgt_cnt = ptgt->tgt_change_cnt;
11186 			mutex_exit(&ptgt->tgt_mutex);
11187 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11188 			return (TRUE);
11189 		}
11190 	}
11191 	return (FALSE);
11192 }
11193 
11194 
11195 /*
11196  * Perform reset
11197  */
11198 int
11199 fcp_scsi_reset(struct scsi_address *ap, int level)
11200 {
11201 	int			rval = 0;
11202 	struct fcp_port		*pptr = ADDR2FCP(ap);
11203 	struct fcp_lun	*plun = ADDR2LUN(ap);
11204 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11205 
11206 	if (level == RESET_ALL) {
11207 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11208 			rval = 1;
11209 		}
11210 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11211 		/*
11212 		 * If we are in the middle of discovery, return
11213 		 * SUCCESS as this target will be rediscovered
11214 		 * anyway
11215 		 */
11216 		mutex_enter(&ptgt->tgt_mutex);
11217 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11218 			mutex_exit(&ptgt->tgt_mutex);
11219 			return (1);
11220 		}
11221 		mutex_exit(&ptgt->tgt_mutex);
11222 
11223 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11224 			rval = 1;
11225 		}
11226 	}
11227 	return (rval);
11228 }
11229 
11230 
11231 /*
11232  * called by the framework to get a SCSI capability
11233  */
11234 static int
11235 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11236 {
11237 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11238 }
11239 
11240 
11241 /*
11242  * called by the framework to set a SCSI capability
11243  */
11244 static int
11245 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11246 {
11247 	return (fcp_commoncap(ap, cap, value, whom, 1));
11248 }
11249 
11250 /*
11251  *     Function: fcp_pkt_setup
11252  *
11253  *  Description: This function sets up the scsi_pkt structure passed by the
11254  *		 caller. This function assumes fcp_pkt_constructor has been
11255  *		 called previously for the packet passed by the caller.	 If
11256  *		 successful this call will have the following results:
11257  *
11258  *		   - The resources needed that will be constant through out
11259  *		     the whole transaction are allocated.
11260  *		   - The fields that will be constant through out the whole
11261  *		     transaction are initialized.
11262  *		   - The scsi packet will be linked to the LUN structure
11263  *		     addressed by the transaction.
11264  *
11265  *     Argument:
11266  *		 *pkt		Pointer to a scsi_pkt structure.
11267  *		 callback
11268  *		 arg
11269  *
11270  * Return Value: 0	Success
11271  *		 !0	Failure
11272  *
11273  *	Context: Kernel context or interrupt context
11274  */
11275 /* ARGSUSED */
11276 static int
11277 fcp_pkt_setup(struct scsi_pkt *pkt,
11278     int (*callback)(caddr_t arg),
11279     caddr_t arg)
11280 {
11281 	struct fcp_pkt	*cmd;
11282 	struct fcp_port	*pptr;
11283 	struct fcp_lun	*plun;
11284 	struct fcp_tgt	*ptgt;
11285 	int		kf;
11286 	fc_packet_t	*fpkt;
11287 	fc_frame_hdr_t	*hp;
11288 
11289 	pptr = ADDR2FCP(&pkt->pkt_address);
11290 	plun = ADDR2LUN(&pkt->pkt_address);
11291 	ptgt = plun->lun_tgt;
11292 
11293 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11294 	fpkt = cmd->cmd_fp_pkt;
11295 
11296 	/*
11297 	 * this request is for dma allocation only
11298 	 */
11299 	/*
11300 	 * First step of fcp_scsi_init_pkt: pkt allocation
11301 	 * We determine if the caller is willing to wait for the
11302 	 * resources.
11303 	 */
11304 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11305 
11306 	/*
11307 	 * Selective zeroing of the pkt.
11308 	 */
11309 	cmd->cmd_back = NULL;
11310 	cmd->cmd_next = NULL;
11311 
11312 	/*
11313 	 * Zero out fcp command
11314 	 */
11315 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11316 
11317 	cmd->cmd_state = FCP_PKT_IDLE;
11318 
11319 	fpkt = cmd->cmd_fp_pkt;
11320 	fpkt->pkt_data_acc = NULL;
11321 
11322 	/*
11323 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11324 	 * could be destroyed.	We need fail pkt_setup.
11325 	 */
11326 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11327 		return (-1);
11328 	}
11329 
11330 	mutex_enter(&ptgt->tgt_mutex);
11331 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11332 
11333 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11334 	    != FC_SUCCESS) {
11335 		mutex_exit(&ptgt->tgt_mutex);
11336 		return (-1);
11337 	}
11338 
11339 	mutex_exit(&ptgt->tgt_mutex);
11340 
11341 	/* Fill in the Fabric Channel Header */
11342 	hp = &fpkt->pkt_cmd_fhdr;
11343 	hp->r_ctl = R_CTL_COMMAND;
11344 	hp->rsvd = 0;
11345 	hp->type = FC_TYPE_SCSI_FCP;
11346 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11347 	hp->seq_id = 0;
11348 	hp->df_ctl  = 0;
11349 	hp->seq_cnt = 0;
11350 	hp->ox_id = 0xffff;
11351 	hp->rx_id = 0xffff;
11352 	hp->ro = 0;
11353 
11354 	/*
11355 	 * A doubly linked list (cmd_forw, cmd_back) is built
11356 	 * out of every allocated packet on a per-lun basis
11357 	 *
11358 	 * The packets are maintained in the list so as to satisfy
11359 	 * scsi_abort() requests. At present (which is unlikely to
11360 	 * change in the future) nobody performs a real scsi_abort
11361 	 * in the SCSI target drivers (as they don't keep the packets
11362 	 * after doing scsi_transport - so they don't know how to
11363 	 * abort a packet other than sending a NULL to abort all
11364 	 * outstanding packets)
11365 	 */
11366 	mutex_enter(&plun->lun_mutex);
11367 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11368 		plun->lun_pkt_head->cmd_back = cmd;
11369 	} else {
11370 		plun->lun_pkt_tail = cmd;
11371 	}
11372 	plun->lun_pkt_head = cmd;
11373 	mutex_exit(&plun->lun_mutex);
11374 	return (0);
11375 }
11376 
11377 /*
11378  *     Function: fcp_pkt_teardown
11379  *
11380  *  Description: This function releases a scsi_pkt structure and all the
11381  *		 resources attached to it.
11382  *
11383  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11384  *
11385  * Return Value: None
11386  *
11387  *	Context: User, Kernel or Interrupt context.
11388  */
11389 static void
11390 fcp_pkt_teardown(struct scsi_pkt *pkt)
11391 {
11392 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11393 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11394 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11395 
11396 	/*
11397 	 * Remove the packet from the per-lun list
11398 	 */
11399 	mutex_enter(&plun->lun_mutex);
11400 	if (cmd->cmd_back) {
11401 		ASSERT(cmd != plun->lun_pkt_head);
11402 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11403 	} else {
11404 		ASSERT(cmd == plun->lun_pkt_head);
11405 		plun->lun_pkt_head = cmd->cmd_forw;
11406 	}
11407 
11408 	if (cmd->cmd_forw) {
11409 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11410 	} else {
11411 		ASSERT(cmd == plun->lun_pkt_tail);
11412 		plun->lun_pkt_tail = cmd->cmd_back;
11413 	}
11414 
11415 	mutex_exit(&plun->lun_mutex);
11416 
11417 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11418 }
11419 
11420 /*
11421  * Routine for reset notification setup, to register or cancel.
11422  * This function is called by SCSA
11423  */
11424 /*ARGSUSED*/
11425 static int
11426 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11427     void (*callback)(caddr_t), caddr_t arg)
11428 {
11429 	struct fcp_port *pptr = ADDR2FCP(ap);
11430 
11431 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11432 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11433 }
11434 
11435 
11436 static int
11437 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11438     ddi_eventcookie_t *event_cookiep)
11439 {
11440 	struct fcp_port *pptr = fcp_dip2port(dip);
11441 
11442 	if (pptr == NULL) {
11443 		return (DDI_FAILURE);
11444 	}
11445 
11446 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11447 	    event_cookiep, NDI_EVENT_NOPASS));
11448 }
11449 
11450 
11451 static int
11452 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11453     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11454     ddi_callback_id_t *cb_id)
11455 {
11456 	struct fcp_port *pptr = fcp_dip2port(dip);
11457 
11458 	if (pptr == NULL) {
11459 		return (DDI_FAILURE);
11460 	}
11461 
11462 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11463 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11464 }
11465 
11466 
11467 static int
11468 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11469 {
11470 
11471 	struct fcp_port *pptr = fcp_dip2port(dip);
11472 
11473 	if (pptr == NULL) {
11474 		return (DDI_FAILURE);
11475 	}
11476 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11477 }
11478 
11479 
11480 /*
11481  * called by the transport to post an event
11482  */
11483 static int
11484 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11485     ddi_eventcookie_t eventid, void *impldata)
11486 {
11487 	struct fcp_port *pptr = fcp_dip2port(dip);
11488 
11489 	if (pptr == NULL) {
11490 		return (DDI_FAILURE);
11491 	}
11492 
11493 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11494 	    eventid, impldata));
11495 }
11496 
11497 
11498 /*
11499  * A target in in many cases in Fibre Channel has a one to one relation
11500  * with a port identifier (which is also known as D_ID and also as AL_PA
11501  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11502  * will most likely result in resetting all LUNs (which means a reset will
11503  * occur on all the SCSI devices connected at the other end of the bridge)
11504  * That is the latest favorite topic for discussion, for, one can debate as
11505  * hot as one likes and come up with arguably a best solution to one's
11506  * satisfaction
11507  *
11508  * To stay on track and not digress much, here are the problems stated
11509  * briefly:
11510  *
11511  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11512  *	target drivers use RESET_TARGET even if their instance is on a
11513  *	LUN. Doesn't that sound a bit broken ?
11514  *
11515  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11516  *	control fields of an FCP_CMND structure. It should have been
11517  *	fixed right there, giving flexibility to the initiators to
11518  *	minimize havoc that could be caused by resetting a target.
11519  */
11520 static int
11521 fcp_reset_target(struct scsi_address *ap, int level)
11522 {
11523 	int			rval = FC_FAILURE;
11524 	char			lun_id[25];
11525 	struct fcp_port		*pptr = ADDR2FCP(ap);
11526 	struct fcp_lun	*plun = ADDR2LUN(ap);
11527 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11528 	struct scsi_pkt		*pkt;
11529 	struct fcp_pkt	*cmd;
11530 	struct fcp_rsp		*rsp;
11531 	uint32_t		tgt_cnt;
11532 	struct fcp_rsp_info	*rsp_info;
11533 	struct fcp_reset_elem	*p;
11534 	int			bval;
11535 
11536 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11537 	    KM_NOSLEEP)) == NULL) {
11538 		return (rval);
11539 	}
11540 
11541 	mutex_enter(&ptgt->tgt_mutex);
11542 	if (level == RESET_TARGET) {
11543 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11544 			mutex_exit(&ptgt->tgt_mutex);
11545 			kmem_free(p, sizeof (struct fcp_reset_elem));
11546 			return (rval);
11547 		}
11548 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11549 		(void) strcpy(lun_id, " ");
11550 	} else {
11551 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11552 			mutex_exit(&ptgt->tgt_mutex);
11553 			kmem_free(p, sizeof (struct fcp_reset_elem));
11554 			return (rval);
11555 		}
11556 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11557 
11558 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11559 	}
11560 	tgt_cnt = ptgt->tgt_change_cnt;
11561 
11562 	mutex_exit(&ptgt->tgt_mutex);
11563 
11564 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11565 	    0, 0, NULL, 0)) == NULL) {
11566 		kmem_free(p, sizeof (struct fcp_reset_elem));
11567 		mutex_enter(&ptgt->tgt_mutex);
11568 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11569 		mutex_exit(&ptgt->tgt_mutex);
11570 		return (rval);
11571 	}
11572 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11573 
11574 	/* fill in cmd part of packet */
11575 	cmd = PKT2CMD(pkt);
11576 	if (level == RESET_TARGET) {
11577 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11578 	} else {
11579 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11580 	}
11581 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11582 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11583 
11584 	/* prepare a packet for transport */
11585 	fcp_prepare_pkt(pptr, cmd, plun);
11586 
11587 	if (cmd->cmd_pkt->pkt_time) {
11588 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11589 	} else {
11590 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11591 	}
11592 
11593 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11594 	bval = fcp_dopoll(pptr, cmd);
11595 	fc_ulp_idle_port(pptr->port_fp_handle);
11596 
11597 	/* submit the packet */
11598 	if (bval == TRAN_ACCEPT) {
11599 		int error = 3;
11600 
11601 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11602 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11603 		    sizeof (struct fcp_rsp));
11604 
11605 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11606 			if (fcp_validate_fcp_response(rsp, pptr) ==
11607 			    FC_SUCCESS) {
11608 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11609 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11610 					    sizeof (struct fcp_rsp), rsp_info,
11611 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11612 					    sizeof (struct fcp_rsp_info));
11613 				}
11614 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11615 					rval = FC_SUCCESS;
11616 					error = 0;
11617 				} else {
11618 					error = 1;
11619 				}
11620 			} else {
11621 				error = 2;
11622 			}
11623 		}
11624 
11625 		switch (error) {
11626 		case 0:
11627 			fcp_log(CE_WARN, pptr->port_dip,
11628 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11629 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11630 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11631 			break;
11632 
11633 		case 1:
11634 			fcp_log(CE_WARN, pptr->port_dip,
11635 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11636 			    " response code=%x",
11637 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11638 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11639 			    rsp_info->rsp_code);
11640 			break;
11641 
11642 		case 2:
11643 			fcp_log(CE_WARN, pptr->port_dip,
11644 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11645 			    " Bad FCP response values: rsvd1=%x,"
11646 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11647 			    " rsplen=%x, senselen=%x",
11648 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11649 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11650 			    rsp->reserved_0, rsp->reserved_1,
11651 			    rsp->fcp_u.fcp_status.reserved_0,
11652 			    rsp->fcp_u.fcp_status.reserved_1,
11653 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11654 			break;
11655 
11656 		default:
11657 			fcp_log(CE_WARN, pptr->port_dip,
11658 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11659 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11660 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11661 			break;
11662 		}
11663 	}
11664 	scsi_destroy_pkt(pkt);
11665 
11666 	if (rval == FC_FAILURE) {
11667 		mutex_enter(&ptgt->tgt_mutex);
11668 		if (level == RESET_TARGET) {
11669 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11670 		} else {
11671 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11672 		}
11673 		mutex_exit(&ptgt->tgt_mutex);
11674 		kmem_free(p, sizeof (struct fcp_reset_elem));
11675 		return (rval);
11676 	}
11677 
11678 	mutex_enter(&pptr->port_mutex);
11679 	if (level == RESET_TARGET) {
11680 		p->tgt = ptgt;
11681 		p->lun = NULL;
11682 	} else {
11683 		p->tgt = NULL;
11684 		p->lun = plun;
11685 	}
11686 	p->tgt = ptgt;
11687 	p->tgt_cnt = tgt_cnt;
11688 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11689 	p->next = pptr->port_reset_list;
11690 	pptr->port_reset_list = p;
11691 
11692 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11693 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11694 	    "Notify ssd of the reset to reinstate the reservations");
11695 
11696 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11697 	    &pptr->port_reset_notify_listf);
11698 
11699 	mutex_exit(&pptr->port_mutex);
11700 
11701 	return (rval);
11702 }
11703 
11704 
11705 /*
11706  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11707  * SCSI capabilities
11708  */
11709 /* ARGSUSED */
11710 static int
11711 fcp_commoncap(struct scsi_address *ap, char *cap,
11712     int val, int tgtonly, int doset)
11713 {
11714 	struct fcp_port		*pptr = ADDR2FCP(ap);
11715 	struct fcp_lun	*plun = ADDR2LUN(ap);
11716 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11717 	int			cidx;
11718 	int			rval = FALSE;
11719 
11720 	if (cap == (char *)0) {
11721 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11722 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11723 		    "fcp_commoncap: invalid arg");
11724 		return (rval);
11725 	}
11726 
11727 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11728 		return (UNDEFINED);
11729 	}
11730 
11731 	/*
11732 	 * Process setcap request.
11733 	 */
11734 	if (doset) {
11735 		/*
11736 		 * At present, we can only set binary (0/1) values
11737 		 */
11738 		switch (cidx) {
11739 		case SCSI_CAP_ARQ:
11740 			if (val == 0) {
11741 				rval = FALSE;
11742 			} else {
11743 				rval = TRUE;
11744 			}
11745 			break;
11746 
11747 		case SCSI_CAP_LUN_RESET:
11748 			if (val) {
11749 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11750 			} else {
11751 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11752 			}
11753 			rval = TRUE;
11754 			break;
11755 
11756 		case SCSI_CAP_SECTOR_SIZE:
11757 			rval = TRUE;
11758 			break;
11759 		default:
11760 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11761 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11762 			    "fcp_setcap: unsupported %d", cidx);
11763 			rval = UNDEFINED;
11764 			break;
11765 		}
11766 
11767 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11768 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11769 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11770 		    "0x%x/0x%x/0x%x/%d",
11771 		    cap, val, tgtonly, doset, rval);
11772 
11773 	} else {
11774 		/*
11775 		 * Process getcap request.
11776 		 */
11777 		switch (cidx) {
11778 		case SCSI_CAP_DMA_MAX:
11779 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11780 
11781 			/*
11782 			 * Need to make an adjustment qlc is uint_t 64
11783 			 * st is int, so we will make the adjustment here
11784 			 * being as nobody wants to touch this.
11785 			 * It still leaves the max single block length
11786 			 * of 2 gig. This should last .
11787 			 */
11788 
11789 			if (rval == -1) {
11790 				rval = MAX_INT_DMA;
11791 			}
11792 
11793 			break;
11794 
11795 		case SCSI_CAP_INITIATOR_ID:
11796 			rval = pptr->port_id;
11797 			break;
11798 
11799 		case SCSI_CAP_ARQ:
11800 		case SCSI_CAP_RESET_NOTIFICATION:
11801 		case SCSI_CAP_TAGGED_QING:
11802 			rval = TRUE;
11803 			break;
11804 
11805 		case SCSI_CAP_SCSI_VERSION:
11806 			rval = 3;
11807 			break;
11808 
11809 		case SCSI_CAP_INTERCONNECT_TYPE:
11810 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11811 			    (ptgt->tgt_hard_addr == 0)) {
11812 				rval = INTERCONNECT_FABRIC;
11813 			} else {
11814 				rval = INTERCONNECT_FIBRE;
11815 			}
11816 			break;
11817 
11818 		case SCSI_CAP_LUN_RESET:
11819 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11820 			    TRUE : FALSE;
11821 			break;
11822 
11823 		default:
11824 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11825 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11826 			    "fcp_getcap: unsupported %d", cidx);
11827 			rval = UNDEFINED;
11828 			break;
11829 		}
11830 
11831 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11832 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11833 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11834 		    "0x%x/0x%x/0x%x/%d",
11835 		    cap, val, tgtonly, doset, rval);
11836 	}
11837 
11838 	return (rval);
11839 }
11840 
11841 /*
11842  * called by the transport to get the port-wwn and lun
11843  * properties of this device, and to create a "name" based on them
11844  *
11845  * these properties don't exist on sun4m
11846  *
11847  * return 1 for success else return 0
11848  */
11849 /* ARGSUSED */
11850 static int
11851 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11852 {
11853 	int			i;
11854 	int			*lun;
11855 	int			numChars;
11856 	uint_t			nlun;
11857 	uint_t			count;
11858 	uint_t			nbytes;
11859 	uchar_t			*bytes;
11860 	uint16_t		lun_num;
11861 	uint32_t		tgt_id;
11862 	char			**conf_wwn;
11863 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11864 	uchar_t			barray[FC_WWN_SIZE];
11865 	dev_info_t		*tgt_dip;
11866 	struct fcp_tgt	*ptgt;
11867 	struct fcp_port	*pptr;
11868 	struct fcp_lun	*plun;
11869 
11870 	ASSERT(sd != NULL);
11871 	ASSERT(name != NULL);
11872 
11873 	tgt_dip = sd->sd_dev;
11874 	pptr = ddi_get_soft_state(fcp_softstate,
11875 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11876 	if (pptr == NULL) {
11877 		return (0);
11878 	}
11879 
11880 	ASSERT(tgt_dip != NULL);
11881 
11882 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11883 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11884 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11885 		name[0] = '\0';
11886 		return (0);
11887 	}
11888 
11889 	if (nlun == 0) {
11890 		ddi_prop_free(lun);
11891 		return (0);
11892 	}
11893 
11894 	lun_num = lun[0];
11895 	ddi_prop_free(lun);
11896 
11897 	/*
11898 	 * Lookup for .conf WWN property
11899 	 */
11900 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11901 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11902 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11903 		ASSERT(count >= 1);
11904 
11905 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
11906 		ddi_prop_free(conf_wwn);
11907 		mutex_enter(&pptr->port_mutex);
11908 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
11909 			mutex_exit(&pptr->port_mutex);
11910 			return (0);
11911 		}
11912 		ptgt = plun->lun_tgt;
11913 		mutex_exit(&pptr->port_mutex);
11914 
11915 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
11916 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
11917 
11918 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
11919 		    ptgt->tgt_hard_addr != 0) {
11920 			tgt_id = (uint32_t)fcp_alpa_to_switch[
11921 			    ptgt->tgt_hard_addr];
11922 		} else {
11923 			tgt_id = ptgt->tgt_d_id;
11924 		}
11925 
11926 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
11927 		    TARGET_PROP, tgt_id);
11928 	}
11929 
11930 	/* get the our port-wwn property */
11931 	bytes = NULL;
11932 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
11933 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
11934 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
11935 		if (bytes != NULL) {
11936 			ddi_prop_free(bytes);
11937 		}
11938 		return (0);
11939 	}
11940 
11941 	for (i = 0; i < FC_WWN_SIZE; i++) {
11942 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
11943 	}
11944 
11945 	/* Stick in the address of the form "wWWN,LUN" */
11946 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
11947 
11948 	ASSERT(numChars < len);
11949 	if (numChars >= len) {
11950 		fcp_log(CE_WARN, pptr->port_dip,
11951 		    "!fcp_scsi_get_name: "
11952 		    "name parameter length too small, it needs to be %d",
11953 		    numChars+1);
11954 	}
11955 
11956 	ddi_prop_free(bytes);
11957 
11958 	return (1);
11959 }
11960 
11961 
11962 /*
11963  * called by the transport to get the SCSI target id value, returning
11964  * it in "name"
11965  *
11966  * this isn't needed/used on sun4m
11967  *
11968  * return 1 for success else return 0
11969  */
11970 /* ARGSUSED */
11971 static int
11972 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
11973 {
11974 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
11975 	struct fcp_tgt	*ptgt;
11976 	int    numChars;
11977 
11978 	if (plun == NULL) {
11979 		return (0);
11980 	}
11981 
11982 	if ((ptgt = plun->lun_tgt) == NULL) {
11983 		return (0);
11984 	}
11985 
11986 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
11987 
11988 	ASSERT(numChars < len);
11989 	if (numChars >= len) {
11990 		fcp_log(CE_WARN, NULL,
11991 		    "!fcp_scsi_get_bus_addr: "
11992 		    "name parameter length too small, it needs to be %d",
11993 		    numChars+1);
11994 	}
11995 
11996 	return (1);
11997 }
11998 
11999 
12000 /*
12001  * called internally to reset the link where the specified port lives
12002  */
12003 static int
12004 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12005 {
12006 	la_wwn_t		wwn;
12007 	struct fcp_lun	*plun;
12008 	struct fcp_tgt	*ptgt;
12009 
12010 	/* disable restart of lip if we're suspended */
12011 	mutex_enter(&pptr->port_mutex);
12012 
12013 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12014 	    FCP_STATE_POWER_DOWN)) {
12015 		mutex_exit(&pptr->port_mutex);
12016 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12017 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12018 		    "fcp_linkreset, fcp%d: link reset "
12019 		    "disabled due to DDI_SUSPEND",
12020 		    ddi_get_instance(pptr->port_dip));
12021 		return (FC_FAILURE);
12022 	}
12023 
12024 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12025 		mutex_exit(&pptr->port_mutex);
12026 		return (FC_SUCCESS);
12027 	}
12028 
12029 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12030 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12031 
12032 	/*
12033 	 * If ap == NULL assume local link reset.
12034 	 */
12035 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12036 		plun = ADDR2LUN(ap);
12037 		ptgt = plun->lun_tgt;
12038 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12039 	} else {
12040 		bzero((caddr_t)&wwn, sizeof (wwn));
12041 	}
12042 	mutex_exit(&pptr->port_mutex);
12043 
12044 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12045 }
12046 
12047 
12048 /*
12049  * called from fcp_port_attach() to resume a port
12050  * return DDI_* success/failure status
12051  * acquires and releases the global mutex
12052  * acquires and releases the port mutex
12053  */
12054 /*ARGSUSED*/
12055 
12056 static int
12057 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12058     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12059 {
12060 	int			res = DDI_FAILURE; /* default result */
12061 	struct fcp_port	*pptr;		/* port state ptr */
12062 	uint32_t		alloc_cnt;
12063 	uint32_t		max_cnt;
12064 	fc_portmap_t		*tmp_list = NULL;
12065 
12066 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12067 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12068 	    instance);
12069 
12070 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12071 		cmn_err(CE_WARN, "fcp: bad soft state");
12072 		return (res);
12073 	}
12074 
12075 	mutex_enter(&pptr->port_mutex);
12076 	switch (cmd) {
12077 	case FC_CMD_RESUME:
12078 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12079 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12080 		break;
12081 
12082 	case FC_CMD_POWER_UP:
12083 		/*
12084 		 * If the port is DDI_SUSPENded, defer rediscovery
12085 		 * until DDI_RESUME occurs
12086 		 */
12087 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12088 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12089 			mutex_exit(&pptr->port_mutex);
12090 			return (DDI_SUCCESS);
12091 		}
12092 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12093 	}
12094 	pptr->port_id = s_id;
12095 	pptr->port_state = FCP_STATE_INIT;
12096 	mutex_exit(&pptr->port_mutex);
12097 
12098 	/*
12099 	 * Make a copy of ulp_port_info as fctl allocates
12100 	 * a temp struct.
12101 	 */
12102 	(void) fcp_cp_pinfo(pptr, pinfo);
12103 
12104 	mutex_enter(&fcp_global_mutex);
12105 	if (fcp_watchdog_init++ == 0) {
12106 		fcp_watchdog_tick = fcp_watchdog_timeout *
12107 		    drv_usectohz(1000000);
12108 		fcp_watchdog_id = timeout(fcp_watch,
12109 		    NULL, fcp_watchdog_tick);
12110 	}
12111 	mutex_exit(&fcp_global_mutex);
12112 
12113 	/*
12114 	 * Handle various topologies and link states.
12115 	 */
12116 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12117 	case FC_STATE_OFFLINE:
12118 		/*
12119 		 * Wait for ONLINE, at which time a state
12120 		 * change will cause a statec_callback
12121 		 */
12122 		res = DDI_SUCCESS;
12123 		break;
12124 
12125 	case FC_STATE_ONLINE:
12126 
12127 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12128 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12129 			res = DDI_SUCCESS;
12130 			break;
12131 		}
12132 
12133 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12134 		    !fcp_enable_auto_configuration) {
12135 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12136 			if (tmp_list == NULL) {
12137 				if (!alloc_cnt) {
12138 					res = DDI_SUCCESS;
12139 				}
12140 				break;
12141 			}
12142 			max_cnt = alloc_cnt;
12143 		} else {
12144 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12145 
12146 			alloc_cnt = FCP_MAX_DEVICES;
12147 
12148 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12149 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12150 			    KM_NOSLEEP)) == NULL) {
12151 				fcp_log(CE_WARN, pptr->port_dip,
12152 				    "!fcp%d: failed to allocate portmap",
12153 				    instance);
12154 				break;
12155 			}
12156 
12157 			max_cnt = alloc_cnt;
12158 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12159 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12160 			    FC_SUCCESS) {
12161 				caddr_t msg;
12162 
12163 				(void) fc_ulp_error(res, &msg);
12164 
12165 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12166 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12167 				    "resume failed getportmap: reason=0x%x",
12168 				    res);
12169 
12170 				fcp_log(CE_WARN, pptr->port_dip,
12171 				    "!failed to get port map : %s", msg);
12172 				break;
12173 			}
12174 			if (max_cnt > alloc_cnt) {
12175 				alloc_cnt = max_cnt;
12176 			}
12177 		}
12178 
12179 		/*
12180 		 * do the SCSI device discovery and create
12181 		 * the devinfos
12182 		 */
12183 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12184 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12185 		    max_cnt, pptr->port_id);
12186 
12187 		res = DDI_SUCCESS;
12188 		break;
12189 
12190 	default:
12191 		fcp_log(CE_WARN, pptr->port_dip,
12192 		    "!fcp%d: invalid port state at attach=0x%x",
12193 		    instance, pptr->port_phys_state);
12194 
12195 		mutex_enter(&pptr->port_mutex);
12196 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12197 		mutex_exit(&pptr->port_mutex);
12198 		res = DDI_SUCCESS;
12199 
12200 		break;
12201 	}
12202 
12203 	if (tmp_list != NULL) {
12204 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12205 	}
12206 
12207 	return (res);
12208 }
12209 
12210 
12211 static void
12212 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12213 {
12214 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12215 	pptr->port_dip = pinfo->port_dip;
12216 	pptr->port_fp_handle = pinfo->port_handle;
12217 	pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12218 	pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12219 	pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12220 	pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12221 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12222 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12223 	pptr->port_phys_state = pinfo->port_state;
12224 	pptr->port_topology = pinfo->port_flags;
12225 	pptr->port_reset_action = pinfo->port_reset_action;
12226 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12227 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12228 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12229 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12230 }
12231 
12232 /*
12233  * If the elements wait field is set to 1 then
12234  * another thread is waiting for the operation to complete. Once
12235  * it is complete, the waiting thread is signaled and the element is
12236  * freed by the waiting thread. If the elements wait field is set to 0
12237  * the element is freed.
12238  */
12239 static void
12240 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12241 {
12242 	ASSERT(elem != NULL);
12243 	mutex_enter(&elem->mutex);
12244 	elem->result = result;
12245 	if (elem->wait) {
12246 		elem->wait = 0;
12247 		cv_signal(&elem->cv);
12248 		mutex_exit(&elem->mutex);
12249 	} else {
12250 		mutex_exit(&elem->mutex);
12251 		cv_destroy(&elem->cv);
12252 		mutex_destroy(&elem->mutex);
12253 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12254 	}
12255 }
12256 
12257 /*
12258  * This function is invoked from the taskq thread to allocate
12259  * devinfo nodes and to online/offline them.
12260  */
12261 static void
12262 fcp_hp_task(void *arg)
12263 {
12264 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12265 	struct fcp_lun	*plun = elem->lun;
12266 	struct fcp_port		*pptr = elem->port;
12267 	int			result;
12268 
12269 	ASSERT(elem->what == FCP_ONLINE ||
12270 	    elem->what == FCP_OFFLINE ||
12271 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12272 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12273 
12274 	mutex_enter(&pptr->port_mutex);
12275 	mutex_enter(&plun->lun_mutex);
12276 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12277 	    plun->lun_event_count != elem->event_cnt) ||
12278 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12279 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12280 		mutex_exit(&plun->lun_mutex);
12281 		mutex_exit(&pptr->port_mutex);
12282 		fcp_process_elem(elem, NDI_FAILURE);
12283 		return;
12284 	}
12285 	mutex_exit(&plun->lun_mutex);
12286 	mutex_exit(&pptr->port_mutex);
12287 
12288 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12289 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12290 	fcp_process_elem(elem, result);
12291 }
12292 
12293 
12294 static child_info_t *
12295 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12296     int tcount)
12297 {
12298 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12299 
12300 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12301 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12302 
12303 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12304 		/*
12305 		 * Child has not been created yet. Create the child device
12306 		 * based on the per-Lun flags.
12307 		 */
12308 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12309 			plun->lun_cip =
12310 			    CIP(fcp_create_dip(plun, lcount, tcount));
12311 			plun->lun_mpxio = 0;
12312 		} else {
12313 			plun->lun_cip =
12314 			    CIP(fcp_create_pip(plun, lcount, tcount));
12315 			plun->lun_mpxio = 1;
12316 		}
12317 	} else {
12318 		plun->lun_cip = cip;
12319 	}
12320 
12321 	return (plun->lun_cip);
12322 }
12323 
12324 
12325 static int
12326 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12327 {
12328 	int		rval = FC_FAILURE;
12329 	dev_info_t	*pdip;
12330 	struct dev_info	*dip;
12331 	int		circular;
12332 
12333 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12334 
12335 	pdip = plun->lun_tgt->tgt_port->port_dip;
12336 
12337 	if (plun->lun_cip == NULL) {
12338 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12339 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12340 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12341 		    "plun: %p lun state: %x num: %d target state: %x",
12342 		    plun, plun->lun_state, plun->lun_num,
12343 		    plun->lun_tgt->tgt_port->port_state);
12344 		return (rval);
12345 	}
12346 	ndi_devi_enter(pdip, &circular);
12347 	dip = DEVI(pdip)->devi_child;
12348 	while (dip) {
12349 		if (dip == DEVI(cdip)) {
12350 			rval = FC_SUCCESS;
12351 			break;
12352 		}
12353 		dip = dip->devi_sibling;
12354 	}
12355 	ndi_devi_exit(pdip, circular);
12356 	return (rval);
12357 }
12358 
12359 static int
12360 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12361 {
12362 	int		rval = FC_FAILURE;
12363 
12364 	ASSERT(plun != NULL);
12365 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12366 
12367 	if (plun->lun_mpxio == 0) {
12368 		rval = fcp_is_dip_present(plun, DIP(cip));
12369 	} else {
12370 		rval = fcp_is_pip_present(plun, PIP(cip));
12371 	}
12372 
12373 	return (rval);
12374 }
12375 
12376 /*
12377  *     Function: fcp_create_dip
12378  *
12379  *  Description: Creates a dev_info_t structure for the LUN specified by the
12380  *		 caller.
12381  *
12382  *     Argument: plun		Lun structure
12383  *		 link_cnt	Link state count.
12384  *		 tgt_cnt	Target state change count.
12385  *
12386  * Return Value: NULL if it failed
12387  *		 dev_info_t structure address if it succeeded
12388  *
12389  *	Context: Kernel context
12390  */
12391 static dev_info_t *
12392 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12393 {
12394 	int			failure = 0;
12395 	uint32_t		tgt_id;
12396 	uint64_t		sam_lun;
12397 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12398 	struct fcp_port	*pptr = ptgt->tgt_port;
12399 	dev_info_t		*pdip = pptr->port_dip;
12400 	dev_info_t		*cdip = NULL;
12401 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12402 	char			*nname = NULL;
12403 	char			**compatible = NULL;
12404 	int			ncompatible;
12405 	char			*scsi_binding_set;
12406 	char			t_pwwn[17];
12407 
12408 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12409 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12410 
12411 	/* get the 'scsi-binding-set' property */
12412 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12413 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12414 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12415 		scsi_binding_set = NULL;
12416 	}
12417 
12418 	/* determine the node name and compatible */
12419 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12420 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12421 	if (scsi_binding_set) {
12422 		ddi_prop_free(scsi_binding_set);
12423 	}
12424 
12425 	if (nname == NULL) {
12426 #ifdef	DEBUG
12427 		cmn_err(CE_WARN, "%s%d: no driver for "
12428 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12429 		    "	 compatible: %s",
12430 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12431 		    ptgt->tgt_port_wwn.raw_wwn[0],
12432 		    ptgt->tgt_port_wwn.raw_wwn[1],
12433 		    ptgt->tgt_port_wwn.raw_wwn[2],
12434 		    ptgt->tgt_port_wwn.raw_wwn[3],
12435 		    ptgt->tgt_port_wwn.raw_wwn[4],
12436 		    ptgt->tgt_port_wwn.raw_wwn[5],
12437 		    ptgt->tgt_port_wwn.raw_wwn[6],
12438 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12439 		    *compatible);
12440 #endif	/* DEBUG */
12441 		failure++;
12442 		goto end_of_fcp_create_dip;
12443 	}
12444 
12445 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12446 
12447 	/*
12448 	 * if the old_dip does not match the cdip, that means there is
12449 	 * some property change. since we'll be using the cdip, we need
12450 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12451 	 * then the dtype for the device has been updated. Offline the
12452 	 * the old device and create a new device with the new device type
12453 	 * Refer to bug: 4764752
12454 	 */
12455 	if (old_dip && (cdip != old_dip ||
12456 	    plun->lun_state & FCP_LUN_CHANGED)) {
12457 		plun->lun_state &= ~(FCP_LUN_INIT);
12458 		mutex_exit(&plun->lun_mutex);
12459 		mutex_exit(&pptr->port_mutex);
12460 
12461 		mutex_enter(&ptgt->tgt_mutex);
12462 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12463 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12464 		mutex_exit(&ptgt->tgt_mutex);
12465 
12466 #ifdef DEBUG
12467 		if (cdip != NULL) {
12468 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12469 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12470 			    "Old dip=%p; New dip=%p don't match", old_dip,
12471 			    cdip);
12472 		} else {
12473 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12474 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12475 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12476 		}
12477 #endif
12478 
12479 		mutex_enter(&pptr->port_mutex);
12480 		mutex_enter(&plun->lun_mutex);
12481 	}
12482 
12483 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12484 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12485 		if (ndi_devi_alloc(pptr->port_dip, nname,
12486 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12487 			failure++;
12488 			goto end_of_fcp_create_dip;
12489 		}
12490 	}
12491 
12492 	/*
12493 	 * Previously all the properties for the devinfo were destroyed here
12494 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12495 	 * the devid property (and other properties established by the target
12496 	 * driver or framework) which the code does not always recreate, this
12497 	 * call was removed.
12498 	 * This opens a theoretical possibility that we may return with a
12499 	 * stale devid on the node if the scsi entity behind the fibre channel
12500 	 * lun has changed.
12501 	 */
12502 
12503 	/* decorate the node with compatible */
12504 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12505 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12506 		failure++;
12507 		goto end_of_fcp_create_dip;
12508 	}
12509 
12510 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12511 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12512 		failure++;
12513 		goto end_of_fcp_create_dip;
12514 	}
12515 
12516 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12517 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12518 		failure++;
12519 		goto end_of_fcp_create_dip;
12520 	}
12521 
12522 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12523 	t_pwwn[16] = '\0';
12524 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12525 	    != DDI_PROP_SUCCESS) {
12526 		failure++;
12527 		goto end_of_fcp_create_dip;
12528 	}
12529 
12530 	/*
12531 	 * If there is no hard address - We might have to deal with
12532 	 * that by using WWN - Having said that it is important to
12533 	 * recognize this problem early so ssd can be informed of
12534 	 * the right interconnect type.
12535 	 */
12536 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12537 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12538 	} else {
12539 		tgt_id = ptgt->tgt_d_id;
12540 	}
12541 
12542 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12543 	    tgt_id) != DDI_PROP_SUCCESS) {
12544 		failure++;
12545 		goto end_of_fcp_create_dip;
12546 	}
12547 
12548 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12549 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12550 		failure++;
12551 		goto end_of_fcp_create_dip;
12552 	}
12553 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12554 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12555 	    sam_lun) != DDI_PROP_SUCCESS) {
12556 		failure++;
12557 		goto end_of_fcp_create_dip;
12558 	}
12559 
12560 end_of_fcp_create_dip:
12561 	scsi_hba_nodename_compatible_free(nname, compatible);
12562 
12563 	if (cdip != NULL && failure) {
12564 		(void) ndi_prop_remove_all(cdip);
12565 		(void) ndi_devi_free(cdip);
12566 		cdip = NULL;
12567 	}
12568 
12569 	return (cdip);
12570 }
12571 
12572 /*
12573  *     Function: fcp_create_pip
12574  *
12575  *  Description: Creates a Path Id for the LUN specified by the caller.
12576  *
12577  *     Argument: plun		Lun structure
12578  *		 link_cnt	Link state count.
12579  *		 tgt_cnt	Target state count.
12580  *
12581  * Return Value: NULL if it failed
12582  *		 mdi_pathinfo_t structure address if it succeeded
12583  *
12584  *	Context: Kernel context
12585  */
12586 static mdi_pathinfo_t *
12587 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12588 {
12589 	int			i;
12590 	char			buf[MAXNAMELEN];
12591 	char			uaddr[MAXNAMELEN];
12592 	int			failure = 0;
12593 	uint32_t		tgt_id;
12594 	uint64_t		sam_lun;
12595 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12596 	struct fcp_port	*pptr = ptgt->tgt_port;
12597 	dev_info_t		*pdip = pptr->port_dip;
12598 	mdi_pathinfo_t		*pip = NULL;
12599 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12600 	char			*nname = NULL;
12601 	char			**compatible = NULL;
12602 	int			ncompatible;
12603 	char			*scsi_binding_set;
12604 	char			t_pwwn[17];
12605 
12606 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12607 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12608 
12609 	scsi_binding_set = "vhci";
12610 
12611 	/* determine the node name and compatible */
12612 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12613 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12614 
12615 	if (nname == NULL) {
12616 #ifdef	DEBUG
12617 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12618 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12619 		    "	 compatible: %s",
12620 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12621 		    ptgt->tgt_port_wwn.raw_wwn[0],
12622 		    ptgt->tgt_port_wwn.raw_wwn[1],
12623 		    ptgt->tgt_port_wwn.raw_wwn[2],
12624 		    ptgt->tgt_port_wwn.raw_wwn[3],
12625 		    ptgt->tgt_port_wwn.raw_wwn[4],
12626 		    ptgt->tgt_port_wwn.raw_wwn[5],
12627 		    ptgt->tgt_port_wwn.raw_wwn[6],
12628 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12629 		    *compatible);
12630 #endif	/* DEBUG */
12631 		failure++;
12632 		goto end_of_fcp_create_pip;
12633 	}
12634 
12635 	pip = fcp_find_existing_pip(plun, pdip);
12636 
12637 	/*
12638 	 * if the old_dip does not match the cdip, that means there is
12639 	 * some property change. since we'll be using the cdip, we need
12640 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12641 	 * then the dtype for the device has been updated. Offline the
12642 	 * the old device and create a new device with the new device type
12643 	 * Refer to bug: 4764752
12644 	 */
12645 	if (old_pip && (pip != old_pip ||
12646 	    plun->lun_state & FCP_LUN_CHANGED)) {
12647 		plun->lun_state &= ~(FCP_LUN_INIT);
12648 		mutex_exit(&plun->lun_mutex);
12649 		mutex_exit(&pptr->port_mutex);
12650 
12651 		mutex_enter(&ptgt->tgt_mutex);
12652 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12653 		    FCP_OFFLINE, lcount, tcount,
12654 		    NDI_DEVI_REMOVE, 0);
12655 		mutex_exit(&ptgt->tgt_mutex);
12656 
12657 		if (pip != NULL) {
12658 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12659 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12660 			    "Old pip=%p; New pip=%p don't match",
12661 			    old_pip, pip);
12662 		} else {
12663 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12664 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12665 			    "Old pip=%p; New pip=NULL don't match",
12666 			    old_pip);
12667 		}
12668 
12669 		mutex_enter(&pptr->port_mutex);
12670 		mutex_enter(&plun->lun_mutex);
12671 	}
12672 
12673 	/*
12674 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12675 	 * lun_guid_size which is dependent on the target, I don't
12676 	 * believe the same trancation happens here UNLESS the standards
12677 	 * change the FC_WWN_SIZE value to something larger than
12678 	 * MAXNAMELEN(currently 255 bytes).
12679 	 */
12680 
12681 	for (i = 0; i < FC_WWN_SIZE; i++) {
12682 		(void) sprintf(&buf[i << 1], "%02x",
12683 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12684 	}
12685 
12686 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12687 	    buf, plun->lun_num);
12688 
12689 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12690 		/*
12691 		 * Release the locks before calling into
12692 		 * mdi_pi_alloc_compatible() since this can result in a
12693 		 * callback into fcp which can result in a deadlock
12694 		 * (see bug # 4870272).
12695 		 *
12696 		 * Basically, what we are trying to avoid is the scenario where
12697 		 * one thread does ndi_devi_enter() and tries to grab
12698 		 * fcp_mutex and another does it the other way round.
12699 		 *
12700 		 * But before we do that, make sure that nobody releases the
12701 		 * port in the meantime. We can do this by setting a flag.
12702 		 */
12703 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12704 		pptr->port_state |= FCP_STATE_IN_MDI;
12705 		mutex_exit(&plun->lun_mutex);
12706 		mutex_exit(&pptr->port_mutex);
12707 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12708 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12709 			fcp_log(CE_WARN, pptr->port_dip,
12710 			    "!path alloc failed:0x%x", plun);
12711 			mutex_enter(&pptr->port_mutex);
12712 			mutex_enter(&plun->lun_mutex);
12713 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12714 			failure++;
12715 			goto end_of_fcp_create_pip;
12716 		}
12717 		mutex_enter(&pptr->port_mutex);
12718 		mutex_enter(&plun->lun_mutex);
12719 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12720 	} else {
12721 		(void) mdi_prop_remove(pip, NULL);
12722 	}
12723 
12724 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12725 
12726 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12727 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12728 	    != DDI_PROP_SUCCESS) {
12729 		failure++;
12730 		goto end_of_fcp_create_pip;
12731 	}
12732 
12733 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12734 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12735 	    != DDI_PROP_SUCCESS) {
12736 		failure++;
12737 		goto end_of_fcp_create_pip;
12738 	}
12739 
12740 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12741 	t_pwwn[16] = '\0';
12742 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12743 	    != DDI_PROP_SUCCESS) {
12744 		failure++;
12745 		goto end_of_fcp_create_pip;
12746 	}
12747 
12748 	/*
12749 	 * If there is no hard address - We might have to deal with
12750 	 * that by using WWN - Having said that it is important to
12751 	 * recognize this problem early so ssd can be informed of
12752 	 * the right interconnect type.
12753 	 */
12754 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12755 	    ptgt->tgt_hard_addr != 0) {
12756 		tgt_id = (uint32_t)
12757 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12758 	} else {
12759 		tgt_id = ptgt->tgt_d_id;
12760 	}
12761 
12762 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12763 	    != DDI_PROP_SUCCESS) {
12764 		failure++;
12765 		goto end_of_fcp_create_pip;
12766 	}
12767 
12768 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12769 	    != DDI_PROP_SUCCESS) {
12770 		failure++;
12771 		goto end_of_fcp_create_pip;
12772 	}
12773 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12774 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12775 	    != DDI_PROP_SUCCESS) {
12776 		failure++;
12777 		goto end_of_fcp_create_pip;
12778 	}
12779 
12780 end_of_fcp_create_pip:
12781 	scsi_hba_nodename_compatible_free(nname, compatible);
12782 
12783 	if (pip != NULL && failure) {
12784 		(void) mdi_prop_remove(pip, NULL);
12785 		mutex_exit(&plun->lun_mutex);
12786 		mutex_exit(&pptr->port_mutex);
12787 		(void) mdi_pi_free(pip, 0);
12788 		mutex_enter(&pptr->port_mutex);
12789 		mutex_enter(&plun->lun_mutex);
12790 		pip = NULL;
12791 	}
12792 
12793 	return (pip);
12794 }
12795 
12796 static dev_info_t *
12797 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12798 {
12799 	uint_t			nbytes;
12800 	uchar_t			*bytes;
12801 	uint_t			nwords;
12802 	uint32_t		tgt_id;
12803 	int			*words;
12804 	dev_info_t		*cdip;
12805 	dev_info_t		*ndip;
12806 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12807 	struct fcp_port	*pptr = ptgt->tgt_port;
12808 	int			circular;
12809 
12810 	ndi_devi_enter(pdip, &circular);
12811 
12812 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12813 	while ((cdip = ndip) != NULL) {
12814 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12815 
12816 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12817 			continue;
12818 		}
12819 
12820 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12821 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12822 		    &nbytes) != DDI_PROP_SUCCESS) {
12823 			continue;
12824 		}
12825 
12826 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12827 			if (bytes != NULL) {
12828 				ddi_prop_free(bytes);
12829 			}
12830 			continue;
12831 		}
12832 		ASSERT(bytes != NULL);
12833 
12834 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12835 			ddi_prop_free(bytes);
12836 			continue;
12837 		}
12838 
12839 		ddi_prop_free(bytes);
12840 
12841 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12842 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12843 		    &nbytes) != DDI_PROP_SUCCESS) {
12844 			continue;
12845 		}
12846 
12847 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12848 			if (bytes != NULL) {
12849 				ddi_prop_free(bytes);
12850 			}
12851 			continue;
12852 		}
12853 		ASSERT(bytes != NULL);
12854 
12855 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12856 			ddi_prop_free(bytes);
12857 			continue;
12858 		}
12859 
12860 		ddi_prop_free(bytes);
12861 
12862 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12863 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12864 		    &nwords) != DDI_PROP_SUCCESS) {
12865 			continue;
12866 		}
12867 
12868 		if (nwords != 1 || words == NULL) {
12869 			if (words != NULL) {
12870 				ddi_prop_free(words);
12871 			}
12872 			continue;
12873 		}
12874 		ASSERT(words != NULL);
12875 
12876 		/*
12877 		 * If there is no hard address - We might have to deal with
12878 		 * that by using WWN - Having said that it is important to
12879 		 * recognize this problem early so ssd can be informed of
12880 		 * the right interconnect type.
12881 		 */
12882 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12883 		    ptgt->tgt_hard_addr != 0) {
12884 			tgt_id =
12885 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12886 		} else {
12887 			tgt_id = ptgt->tgt_d_id;
12888 		}
12889 
12890 		if (tgt_id != (uint32_t)*words) {
12891 			ddi_prop_free(words);
12892 			continue;
12893 		}
12894 		ddi_prop_free(words);
12895 
12896 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12897 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
12898 		    &nwords) != DDI_PROP_SUCCESS) {
12899 			continue;
12900 		}
12901 
12902 		if (nwords != 1 || words == NULL) {
12903 			if (words != NULL) {
12904 				ddi_prop_free(words);
12905 			}
12906 			continue;
12907 		}
12908 		ASSERT(words != NULL);
12909 
12910 		if (plun->lun_num == (uint16_t)*words) {
12911 			ddi_prop_free(words);
12912 			break;
12913 		}
12914 		ddi_prop_free(words);
12915 	}
12916 	ndi_devi_exit(pdip, circular);
12917 
12918 	return (cdip);
12919 }
12920 
12921 
12922 static int
12923 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
12924 {
12925 	dev_info_t	*pdip;
12926 	char		buf[MAXNAMELEN];
12927 	char		uaddr[MAXNAMELEN];
12928 	int		rval = FC_FAILURE;
12929 
12930 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12931 
12932 	pdip = plun->lun_tgt->tgt_port->port_dip;
12933 
12934 	/*
12935 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
12936 	 * non-NULL even when the LUN is not there as in the case when a LUN is
12937 	 * configured and then deleted on the device end (for T3/T4 case). In
12938 	 * such cases, pip will be NULL.
12939 	 *
12940 	 * If the device generates an RSCN, it will end up getting offlined when
12941 	 * it disappeared and a new LUN will get created when it is rediscovered
12942 	 * on the device. If we check for lun_cip here, the LUN will not end
12943 	 * up getting onlined since this function will end up returning a
12944 	 * FC_SUCCESS.
12945 	 *
12946 	 * The behavior is different on other devices. For instance, on a HDS,
12947 	 * there was no RSCN generated by the device but the next I/O generated
12948 	 * a check condition and rediscovery got triggered that way. So, in
12949 	 * such cases, this path will not be exercised
12950 	 */
12951 	if (pip == NULL) {
12952 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12953 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
12954 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
12955 		    "plun: %p lun state: %x num: %d target state: %x",
12956 		    plun, plun->lun_state, plun->lun_num,
12957 		    plun->lun_tgt->tgt_port->port_state);
12958 		return (rval);
12959 	}
12960 
12961 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
12962 
12963 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12964 
12965 	if (plun->lun_old_guid) {
12966 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
12967 			rval = FC_SUCCESS;
12968 		}
12969 	} else {
12970 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
12971 			rval = FC_SUCCESS;
12972 		}
12973 	}
12974 	return (rval);
12975 }
12976 
12977 static mdi_pathinfo_t *
12978 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
12979 {
12980 	char			buf[MAXNAMELEN];
12981 	char			uaddr[MAXNAMELEN];
12982 	mdi_pathinfo_t		*pip;
12983 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12984 	struct fcp_port	*pptr = ptgt->tgt_port;
12985 
12986 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12987 
12988 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
12989 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
12990 
12991 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
12992 
12993 	return (pip);
12994 }
12995 
12996 
12997 static int
12998 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
12999     int tcount, int flags, int *circ)
13000 {
13001 	int			rval;
13002 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13003 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13004 	dev_info_t		*cdip = NULL;
13005 
13006 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13007 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13008 
13009 	if (plun->lun_cip == NULL) {
13010 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13011 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13012 		    "fcp_online_child: plun->lun_cip is NULL: "
13013 		    "plun: %p state: %x num: %d target state: %x",
13014 		    plun, plun->lun_state, plun->lun_num,
13015 		    plun->lun_tgt->tgt_port->port_state);
13016 		return (NDI_FAILURE);
13017 	}
13018 again:
13019 	if (plun->lun_mpxio == 0) {
13020 		cdip = DIP(cip);
13021 		mutex_exit(&plun->lun_mutex);
13022 		mutex_exit(&pptr->port_mutex);
13023 
13024 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13025 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13026 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13027 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13028 
13029 		/*
13030 		 * We could check for FCP_LUN_INIT here but chances
13031 		 * of getting here when it's already in FCP_LUN_INIT
13032 		 * is rare and a duplicate ndi_devi_online wouldn't
13033 		 * hurt either (as the node would already have been
13034 		 * in CF2)
13035 		 */
13036 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13037 			rval = ndi_devi_bind_driver(cdip, flags);
13038 		} else {
13039 			rval = ndi_devi_online(cdip, flags);
13040 		}
13041 		/*
13042 		 * We log the message into trace buffer if the device
13043 		 * is "ses" and into syslog for any other device
13044 		 * type. This is to prevent the ndi_devi_online failure
13045 		 * message that appears for V880/A5K ses devices.
13046 		 */
13047 		if (rval == NDI_SUCCESS) {
13048 			mutex_enter(&ptgt->tgt_mutex);
13049 			plun->lun_state |= FCP_LUN_INIT;
13050 			mutex_exit(&ptgt->tgt_mutex);
13051 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13052 			fcp_log(CE_NOTE, pptr->port_dip,
13053 			    "!ndi_devi_online:"
13054 			    " failed for %s: target=%x lun=%x %x",
13055 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13056 			    plun->lun_num, rval);
13057 		} else {
13058 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13059 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13060 			    " !ndi_devi_online:"
13061 			    " failed for %s: target=%x lun=%x %x",
13062 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13063 			    plun->lun_num, rval);
13064 		}
13065 	} else {
13066 		cdip = mdi_pi_get_client(PIP(cip));
13067 		mutex_exit(&plun->lun_mutex);
13068 		mutex_exit(&pptr->port_mutex);
13069 
13070 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13071 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13072 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13073 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13074 
13075 		/*
13076 		 * Hold path and exit phci to avoid deadlock with power
13077 		 * management code during mdi_pi_online.
13078 		 */
13079 		mdi_hold_path(PIP(cip));
13080 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13081 
13082 		rval = mdi_pi_online(PIP(cip), flags);
13083 
13084 		mdi_devi_enter_phci(pptr->port_dip, circ);
13085 		mdi_rele_path(PIP(cip));
13086 
13087 		if (rval == MDI_SUCCESS) {
13088 			mutex_enter(&ptgt->tgt_mutex);
13089 			plun->lun_state |= FCP_LUN_INIT;
13090 			mutex_exit(&ptgt->tgt_mutex);
13091 
13092 			/*
13093 			 * Clear MPxIO path permanent disable in case
13094 			 * fcp hotplug dropped the offline event.
13095 			 */
13096 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13097 
13098 		} else if (rval == MDI_NOT_SUPPORTED) {
13099 			child_info_t	*old_cip = cip;
13100 
13101 			/*
13102 			 * MPxIO does not support this device yet.
13103 			 * Enumerate in legacy mode.
13104 			 */
13105 			mutex_enter(&pptr->port_mutex);
13106 			mutex_enter(&plun->lun_mutex);
13107 			plun->lun_mpxio = 0;
13108 			plun->lun_cip = NULL;
13109 			cdip = fcp_create_dip(plun, lcount, tcount);
13110 			plun->lun_cip = cip = CIP(cdip);
13111 			if (cip == NULL) {
13112 				fcp_log(CE_WARN, pptr->port_dip,
13113 				    "!fcp_online_child: "
13114 				    "Create devinfo failed for LU=%p", plun);
13115 				mutex_exit(&plun->lun_mutex);
13116 
13117 				mutex_enter(&ptgt->tgt_mutex);
13118 				plun->lun_state |= FCP_LUN_OFFLINE;
13119 				mutex_exit(&ptgt->tgt_mutex);
13120 
13121 				mutex_exit(&pptr->port_mutex);
13122 
13123 				/*
13124 				 * free the mdi_pathinfo node
13125 				 */
13126 				(void) mdi_pi_free(PIP(old_cip), 0);
13127 			} else {
13128 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13129 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13130 				    "fcp_online_child: creating devinfo "
13131 				    "node 0x%p for plun 0x%p",
13132 				    cip, plun);
13133 				mutex_exit(&plun->lun_mutex);
13134 				mutex_exit(&pptr->port_mutex);
13135 				/*
13136 				 * free the mdi_pathinfo node
13137 				 */
13138 				(void) mdi_pi_free(PIP(old_cip), 0);
13139 				mutex_enter(&pptr->port_mutex);
13140 				mutex_enter(&plun->lun_mutex);
13141 				goto again;
13142 			}
13143 		} else {
13144 			if (cdip) {
13145 				fcp_log(CE_NOTE, pptr->port_dip,
13146 				    "!fcp_online_child: mdi_pi_online:"
13147 				    " failed for %s: target=%x lun=%x %x",
13148 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13149 				    plun->lun_num, rval);
13150 			}
13151 		}
13152 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13153 	}
13154 
13155 	if (rval == NDI_SUCCESS) {
13156 		if (cdip) {
13157 			(void) ndi_event_retrieve_cookie(
13158 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13159 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13160 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13161 			    cdip, fcp_insert_eid, NULL);
13162 		}
13163 	}
13164 	mutex_enter(&pptr->port_mutex);
13165 	mutex_enter(&plun->lun_mutex);
13166 	return (rval);
13167 }
13168 
13169 /* ARGSUSED */
13170 static int
13171 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13172     int tcount, int flags, int *circ)
13173 {
13174 	int rval;
13175 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13176 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13177 	dev_info_t		*cdip;
13178 
13179 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13180 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13181 
13182 	if (plun->lun_cip == NULL) {
13183 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13184 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13185 		    "fcp_offline_child: plun->lun_cip is NULL: "
13186 		    "plun: %p lun state: %x num: %d target state: %x",
13187 		    plun, plun->lun_state, plun->lun_num,
13188 		    plun->lun_tgt->tgt_port->port_state);
13189 		return (NDI_FAILURE);
13190 	}
13191 
13192 	if (plun->lun_mpxio == 0) {
13193 		cdip = DIP(cip);
13194 		mutex_exit(&plun->lun_mutex);
13195 		mutex_exit(&pptr->port_mutex);
13196 		rval = ndi_devi_offline(DIP(cip), flags);
13197 		if (rval != NDI_SUCCESS) {
13198 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13199 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13200 			    "fcp_offline_child: ndi_devi_offline failed "
13201 			    "rval=%x cip=%p", rval, cip);
13202 		}
13203 	} else {
13204 		cdip = mdi_pi_get_client(PIP(cip));
13205 		mutex_exit(&plun->lun_mutex);
13206 		mutex_exit(&pptr->port_mutex);
13207 
13208 		/*
13209 		 * Exit phci to avoid deadlock with power management code
13210 		 * during mdi_pi_offline
13211 		 */
13212 		mdi_hold_path(PIP(cip));
13213 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13214 
13215 		rval = mdi_pi_offline(PIP(cip), flags);
13216 
13217 		mdi_devi_enter_phci(pptr->port_dip, circ);
13218 		mdi_rele_path(PIP(cip));
13219 
13220 		if (rval == MDI_SUCCESS) {
13221 			/*
13222 			 * Clear MPxIO path permanent disable as the path is
13223 			 * already offlined.
13224 			 */
13225 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13226 
13227 			if (flags & NDI_DEVI_REMOVE) {
13228 				(void) mdi_pi_free(PIP(cip), 0);
13229 			}
13230 		} else {
13231 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13232 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13233 			    "fcp_offline_child: mdi_pi_offline failed "
13234 			    "rval=%x cip=%p", rval, cip);
13235 		}
13236 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13237 	}
13238 
13239 	mutex_enter(&ptgt->tgt_mutex);
13240 	plun->lun_state &= ~FCP_LUN_INIT;
13241 	mutex_exit(&ptgt->tgt_mutex);
13242 
13243 	mutex_enter(&pptr->port_mutex);
13244 	mutex_enter(&plun->lun_mutex);
13245 
13246 	if (rval == NDI_SUCCESS) {
13247 		cdip = NULL;
13248 		if (flags & NDI_DEVI_REMOVE) {
13249 			/*
13250 			 * If the guid of the LUN changes, lun_cip will not
13251 			 * equal to cip, and after offlining the LUN with the
13252 			 * old guid, we should keep lun_cip since it's the cip
13253 			 * of the LUN with the new guid.
13254 			 * Otherwise remove our reference to child node.
13255 			 */
13256 			if (plun->lun_cip == cip) {
13257 				plun->lun_cip = NULL;
13258 			}
13259 			if (plun->lun_old_guid) {
13260 				kmem_free(plun->lun_old_guid,
13261 				    plun->lun_old_guid_size);
13262 				plun->lun_old_guid = NULL;
13263 				plun->lun_old_guid_size = 0;
13264 			}
13265 		}
13266 	}
13267 
13268 	if (cdip) {
13269 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13270 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13271 		    " target=%x lun=%x", "ndi_offline",
13272 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13273 	}
13274 
13275 	return (rval);
13276 }
13277 
13278 static void
13279 fcp_remove_child(struct fcp_lun *plun)
13280 {
13281 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13282 
13283 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13284 		if (plun->lun_mpxio == 0) {
13285 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13286 			(void) ndi_devi_free(DIP(plun->lun_cip));
13287 		} else {
13288 			mutex_exit(&plun->lun_mutex);
13289 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13290 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13291 			FCP_TRACE(fcp_logq,
13292 			    plun->lun_tgt->tgt_port->port_instbuf,
13293 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13294 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13295 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13296 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13297 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13298 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13299 			mutex_enter(&plun->lun_mutex);
13300 		}
13301 	}
13302 
13303 	plun->lun_cip = NULL;
13304 }
13305 
13306 /*
13307  * called when a timeout occurs
13308  *
13309  * can be scheduled during an attach or resume (if not already running)
13310  *
13311  * one timeout is set up for all ports
13312  *
13313  * acquires and releases the global mutex
13314  */
13315 /*ARGSUSED*/
13316 static void
13317 fcp_watch(void *arg)
13318 {
13319 	struct fcp_port	*pptr;
13320 	struct fcp_ipkt	*icmd;
13321 	struct fcp_ipkt	*nicmd;
13322 	struct fcp_pkt	*cmd;
13323 	struct fcp_pkt	*ncmd;
13324 	struct fcp_pkt	*tail;
13325 	struct fcp_pkt	*pcmd;
13326 	struct fcp_pkt	*save_head;
13327 	struct fcp_port	*save_port;
13328 
13329 	/* increment global watchdog time */
13330 	fcp_watchdog_time += fcp_watchdog_timeout;
13331 
13332 	mutex_enter(&fcp_global_mutex);
13333 
13334 	/* scan each port in our list */
13335 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13336 		save_port = fcp_port_head;
13337 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13338 		mutex_exit(&fcp_global_mutex);
13339 
13340 		mutex_enter(&pptr->port_mutex);
13341 		if (pptr->port_ipkt_list == NULL &&
13342 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13343 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13344 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13345 			mutex_exit(&pptr->port_mutex);
13346 			mutex_enter(&fcp_global_mutex);
13347 			goto end_of_watchdog;
13348 		}
13349 
13350 		/*
13351 		 * We check if a list of targets need to be offlined.
13352 		 */
13353 		if (pptr->port_offline_tgts) {
13354 			fcp_scan_offline_tgts(pptr);
13355 		}
13356 
13357 		/*
13358 		 * We check if a list of luns need to be offlined.
13359 		 */
13360 		if (pptr->port_offline_luns) {
13361 			fcp_scan_offline_luns(pptr);
13362 		}
13363 
13364 		/*
13365 		 * We check if a list of targets or luns need to be reset.
13366 		 */
13367 		if (pptr->port_reset_list) {
13368 			fcp_check_reset_delay(pptr);
13369 		}
13370 
13371 		mutex_exit(&pptr->port_mutex);
13372 
13373 		/*
13374 		 * This is where the pending commands (pkt) are checked for
13375 		 * timeout.
13376 		 */
13377 		mutex_enter(&pptr->port_pkt_mutex);
13378 		tail = pptr->port_pkt_tail;
13379 
13380 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13381 		    cmd != NULL; cmd = ncmd) {
13382 			ncmd = cmd->cmd_next;
13383 			/*
13384 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13385 			 * must be set.
13386 			 */
13387 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13388 			/*
13389 			 * FCP_INVALID_TIMEOUT will be set for those
13390 			 * command that need to be failed. Mostly those
13391 			 * cmds that could not be queued down for the
13392 			 * "timeout" value. cmd->cmd_timeout is used
13393 			 * to try and requeue the command regularly.
13394 			 */
13395 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13396 				/*
13397 				 * This command hasn't timed out yet.  Let's
13398 				 * go to the next one.
13399 				 */
13400 				pcmd = cmd;
13401 				goto end_of_loop;
13402 			}
13403 
13404 			if (cmd == pptr->port_pkt_head) {
13405 				ASSERT(pcmd == NULL);
13406 				pptr->port_pkt_head = cmd->cmd_next;
13407 			} else {
13408 				ASSERT(pcmd != NULL);
13409 				pcmd->cmd_next = cmd->cmd_next;
13410 			}
13411 
13412 			if (cmd == pptr->port_pkt_tail) {
13413 				ASSERT(cmd->cmd_next == NULL);
13414 				pptr->port_pkt_tail = pcmd;
13415 				if (pcmd) {
13416 					pcmd->cmd_next = NULL;
13417 				}
13418 			}
13419 			cmd->cmd_next = NULL;
13420 
13421 			/*
13422 			 * save the current head before dropping the
13423 			 * mutex - If the head doesn't remain the
13424 			 * same after re acquiring the mutex, just
13425 			 * bail out and revisit on next tick.
13426 			 *
13427 			 * PS: The tail pointer can change as the commands
13428 			 * get requeued after failure to retransport
13429 			 */
13430 			save_head = pptr->port_pkt_head;
13431 			mutex_exit(&pptr->port_pkt_mutex);
13432 
13433 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13434 			    FCP_INVALID_TIMEOUT) {
13435 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13436 				struct fcp_lun	*plun;
13437 				struct fcp_tgt	*ptgt;
13438 
13439 				plun = ADDR2LUN(&pkt->pkt_address);
13440 				ptgt = plun->lun_tgt;
13441 
13442 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13443 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13444 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13445 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13446 
13447 				cmd->cmd_state == FCP_PKT_ABORTING ?
13448 				    fcp_fail_cmd(cmd, CMD_RESET,
13449 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13450 				    CMD_TIMEOUT, STAT_ABORTED);
13451 			} else {
13452 				fcp_retransport_cmd(pptr, cmd);
13453 			}
13454 			mutex_enter(&pptr->port_pkt_mutex);
13455 			if (save_head && save_head != pptr->port_pkt_head) {
13456 				/*
13457 				 * Looks like linked list got changed (mostly
13458 				 * happens when an an OFFLINE LUN code starts
13459 				 * returning overflow queue commands in
13460 				 * parallel. So bail out and revisit during
13461 				 * next tick
13462 				 */
13463 				break;
13464 			}
13465 		end_of_loop:
13466 			/*
13467 			 * Scan only upto the previously known tail pointer
13468 			 * to avoid excessive processing - lots of new packets
13469 			 * could have been added to the tail or the old ones
13470 			 * re-queued.
13471 			 */
13472 			if (cmd == tail) {
13473 				break;
13474 			}
13475 		}
13476 		mutex_exit(&pptr->port_pkt_mutex);
13477 
13478 		mutex_enter(&pptr->port_mutex);
13479 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13480 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13481 
13482 			nicmd = icmd->ipkt_next;
13483 			if ((icmd->ipkt_restart != 0) &&
13484 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13485 				/* packet has not timed out */
13486 				continue;
13487 			}
13488 
13489 			/* time for packet re-transport */
13490 			if (icmd == pptr->port_ipkt_list) {
13491 				pptr->port_ipkt_list = icmd->ipkt_next;
13492 				if (pptr->port_ipkt_list) {
13493 					pptr->port_ipkt_list->ipkt_prev =
13494 					    NULL;
13495 				}
13496 			} else {
13497 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13498 				if (icmd->ipkt_next) {
13499 					icmd->ipkt_next->ipkt_prev =
13500 					    icmd->ipkt_prev;
13501 				}
13502 			}
13503 			icmd->ipkt_next = NULL;
13504 			icmd->ipkt_prev = NULL;
13505 			mutex_exit(&pptr->port_mutex);
13506 
13507 			if (fcp_is_retryable(icmd)) {
13508 				fc_ulp_rscn_info_t *rscnp =
13509 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13510 				    pkt_ulp_rscn_infop;
13511 
13512 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13513 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13514 				    "%x to D_ID=%x Retrying..",
13515 				    icmd->ipkt_opcode,
13516 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13517 
13518 				/*
13519 				 * Update the RSCN count in the packet
13520 				 * before resending.
13521 				 */
13522 
13523 				if (rscnp != NULL) {
13524 					rscnp->ulp_rscn_count =
13525 					    fc_ulp_get_rscn_count(pptr->
13526 					    port_fp_handle);
13527 				}
13528 
13529 				mutex_enter(&pptr->port_mutex);
13530 				mutex_enter(&ptgt->tgt_mutex);
13531 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13532 					mutex_exit(&ptgt->tgt_mutex);
13533 					mutex_exit(&pptr->port_mutex);
13534 					switch (icmd->ipkt_opcode) {
13535 						int rval;
13536 					case LA_ELS_PLOGI:
13537 						if ((rval = fc_ulp_login(
13538 						    pptr->port_fp_handle,
13539 						    &icmd->ipkt_fpkt, 1)) ==
13540 						    FC_SUCCESS) {
13541 							mutex_enter(
13542 							    &pptr->port_mutex);
13543 							continue;
13544 						}
13545 						if (fcp_handle_ipkt_errors(
13546 						    pptr, ptgt, icmd, rval,
13547 						    "PLOGI") == DDI_SUCCESS) {
13548 							mutex_enter(
13549 							    &pptr->port_mutex);
13550 							continue;
13551 						}
13552 						break;
13553 
13554 					case LA_ELS_PRLI:
13555 						if ((rval = fc_ulp_issue_els(
13556 						    pptr->port_fp_handle,
13557 						    icmd->ipkt_fpkt)) ==
13558 						    FC_SUCCESS) {
13559 							mutex_enter(
13560 							    &pptr->port_mutex);
13561 							continue;
13562 						}
13563 						if (fcp_handle_ipkt_errors(
13564 						    pptr, ptgt, icmd, rval,
13565 						    "PRLI") == DDI_SUCCESS) {
13566 							mutex_enter(
13567 							    &pptr->port_mutex);
13568 							continue;
13569 						}
13570 						break;
13571 
13572 					default:
13573 						if ((rval = fcp_transport(
13574 						    pptr->port_fp_handle,
13575 						    icmd->ipkt_fpkt, 1)) ==
13576 						    FC_SUCCESS) {
13577 							mutex_enter(
13578 							    &pptr->port_mutex);
13579 							continue;
13580 						}
13581 						if (fcp_handle_ipkt_errors(
13582 						    pptr, ptgt, icmd, rval,
13583 						    "PRLI") == DDI_SUCCESS) {
13584 							mutex_enter(
13585 							    &pptr->port_mutex);
13586 							continue;
13587 						}
13588 						break;
13589 					}
13590 				} else {
13591 					mutex_exit(&ptgt->tgt_mutex);
13592 					mutex_exit(&pptr->port_mutex);
13593 				}
13594 			} else {
13595 				fcp_print_error(icmd->ipkt_fpkt);
13596 			}
13597 
13598 			(void) fcp_call_finish_init(pptr, ptgt,
13599 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13600 			    icmd->ipkt_cause);
13601 			fcp_icmd_free(pptr, icmd);
13602 			mutex_enter(&pptr->port_mutex);
13603 		}
13604 
13605 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13606 		mutex_exit(&pptr->port_mutex);
13607 		mutex_enter(&fcp_global_mutex);
13608 
13609 	end_of_watchdog:
13610 		/*
13611 		 * Bail out early before getting into trouble
13612 		 */
13613 		if (save_port != fcp_port_head) {
13614 			break;
13615 		}
13616 	}
13617 
13618 	if (fcp_watchdog_init > 0) {
13619 		/* reschedule timeout to go again */
13620 		fcp_watchdog_id =
13621 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13622 	}
13623 	mutex_exit(&fcp_global_mutex);
13624 }
13625 
13626 
13627 static void
13628 fcp_check_reset_delay(struct fcp_port *pptr)
13629 {
13630 	uint32_t		tgt_cnt;
13631 	int			level;
13632 	struct fcp_tgt	*ptgt;
13633 	struct fcp_lun	*plun;
13634 	struct fcp_reset_elem *cur = NULL;
13635 	struct fcp_reset_elem *next = NULL;
13636 	struct fcp_reset_elem *prev = NULL;
13637 
13638 	ASSERT(mutex_owned(&pptr->port_mutex));
13639 
13640 	next = pptr->port_reset_list;
13641 	while ((cur = next) != NULL) {
13642 		next = cur->next;
13643 
13644 		if (cur->timeout < fcp_watchdog_time) {
13645 			prev = cur;
13646 			continue;
13647 		}
13648 
13649 		ptgt = cur->tgt;
13650 		plun = cur->lun;
13651 		tgt_cnt = cur->tgt_cnt;
13652 
13653 		if (ptgt) {
13654 			level = RESET_TARGET;
13655 		} else {
13656 			ASSERT(plun != NULL);
13657 			level = RESET_LUN;
13658 			ptgt = plun->lun_tgt;
13659 		}
13660 		if (prev) {
13661 			prev->next = next;
13662 		} else {
13663 			/*
13664 			 * Because we drop port mutex while doing aborts for
13665 			 * packets, we can't rely on reset_list pointing to
13666 			 * our head
13667 			 */
13668 			if (cur == pptr->port_reset_list) {
13669 				pptr->port_reset_list = next;
13670 			} else {
13671 				struct fcp_reset_elem *which;
13672 
13673 				which = pptr->port_reset_list;
13674 				while (which && which->next != cur) {
13675 					which = which->next;
13676 				}
13677 				ASSERT(which != NULL);
13678 
13679 				which->next = next;
13680 				prev = which;
13681 			}
13682 		}
13683 
13684 		kmem_free(cur, sizeof (*cur));
13685 
13686 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13687 			mutex_enter(&ptgt->tgt_mutex);
13688 			if (level == RESET_TARGET) {
13689 				fcp_update_tgt_state(ptgt,
13690 				    FCP_RESET, FCP_LUN_BUSY);
13691 			} else {
13692 				fcp_update_lun_state(plun,
13693 				    FCP_RESET, FCP_LUN_BUSY);
13694 			}
13695 			mutex_exit(&ptgt->tgt_mutex);
13696 
13697 			mutex_exit(&pptr->port_mutex);
13698 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13699 			mutex_enter(&pptr->port_mutex);
13700 		}
13701 	}
13702 }
13703 
13704 
13705 static void
13706 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13707     struct fcp_lun *rlun, int tgt_cnt)
13708 {
13709 	int			rval;
13710 	struct fcp_lun	*tlun, *nlun;
13711 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13712 	    *cmd = NULL, *head = NULL,
13713 	    *tail = NULL;
13714 
13715 	mutex_enter(&pptr->port_pkt_mutex);
13716 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13717 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13718 		struct fcp_tgt *ptgt = plun->lun_tgt;
13719 
13720 		ncmd = cmd->cmd_next;
13721 
13722 		if (ptgt != ttgt && plun != rlun) {
13723 			pcmd = cmd;
13724 			continue;
13725 		}
13726 
13727 		if (pcmd != NULL) {
13728 			ASSERT(pptr->port_pkt_head != cmd);
13729 			pcmd->cmd_next = ncmd;
13730 		} else {
13731 			ASSERT(cmd == pptr->port_pkt_head);
13732 			pptr->port_pkt_head = ncmd;
13733 		}
13734 		if (pptr->port_pkt_tail == cmd) {
13735 			ASSERT(cmd->cmd_next == NULL);
13736 			pptr->port_pkt_tail = pcmd;
13737 			if (pcmd != NULL) {
13738 				pcmd->cmd_next = NULL;
13739 			}
13740 		}
13741 
13742 		if (head == NULL) {
13743 			head = tail = cmd;
13744 		} else {
13745 			ASSERT(tail != NULL);
13746 			tail->cmd_next = cmd;
13747 			tail = cmd;
13748 		}
13749 		cmd->cmd_next = NULL;
13750 	}
13751 	mutex_exit(&pptr->port_pkt_mutex);
13752 
13753 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13754 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13755 
13756 		ncmd = cmd->cmd_next;
13757 		ASSERT(pkt != NULL);
13758 
13759 		mutex_enter(&pptr->port_mutex);
13760 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13761 			mutex_exit(&pptr->port_mutex);
13762 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13763 			pkt->pkt_reason = CMD_RESET;
13764 			pkt->pkt_statistics |= STAT_DEV_RESET;
13765 			cmd->cmd_state = FCP_PKT_IDLE;
13766 			fcp_post_callback(cmd);
13767 		} else {
13768 			mutex_exit(&pptr->port_mutex);
13769 		}
13770 	}
13771 
13772 	/*
13773 	 * If the FCA will return all the commands in its queue then our
13774 	 * work is easy, just return.
13775 	 */
13776 
13777 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13778 		return;
13779 	}
13780 
13781 	/*
13782 	 * For RESET_LUN get hold of target pointer
13783 	 */
13784 	if (ttgt == NULL) {
13785 		ASSERT(rlun != NULL);
13786 
13787 		ttgt = rlun->lun_tgt;
13788 
13789 		ASSERT(ttgt != NULL);
13790 	}
13791 
13792 	/*
13793 	 * There are some severe race conditions here.
13794 	 * While we are trying to abort the pkt, it might be completing
13795 	 * so mark it aborted and if the abort does not succeed then
13796 	 * handle it in the watch thread.
13797 	 */
13798 	mutex_enter(&ttgt->tgt_mutex);
13799 	nlun = ttgt->tgt_lun;
13800 	mutex_exit(&ttgt->tgt_mutex);
13801 	while ((tlun = nlun) != NULL) {
13802 		int restart = 0;
13803 		if (rlun && rlun != tlun) {
13804 			mutex_enter(&ttgt->tgt_mutex);
13805 			nlun = tlun->lun_next;
13806 			mutex_exit(&ttgt->tgt_mutex);
13807 			continue;
13808 		}
13809 		mutex_enter(&tlun->lun_mutex);
13810 		cmd = tlun->lun_pkt_head;
13811 		while (cmd != NULL) {
13812 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13813 				struct scsi_pkt *pkt;
13814 
13815 				restart = 1;
13816 				cmd->cmd_state = FCP_PKT_ABORTING;
13817 				mutex_exit(&tlun->lun_mutex);
13818 				rval = fc_ulp_abort(pptr->port_fp_handle,
13819 				    cmd->cmd_fp_pkt, KM_SLEEP);
13820 				if (rval == FC_SUCCESS) {
13821 					pkt = cmd->cmd_pkt;
13822 					pkt->pkt_reason = CMD_RESET;
13823 					pkt->pkt_statistics |= STAT_DEV_RESET;
13824 					cmd->cmd_state = FCP_PKT_IDLE;
13825 					fcp_post_callback(cmd);
13826 				} else {
13827 					caddr_t msg;
13828 
13829 					(void) fc_ulp_error(rval, &msg);
13830 
13831 					/*
13832 					 * This part is tricky. The abort
13833 					 * failed and now the command could
13834 					 * be completing.  The cmd_state ==
13835 					 * FCP_PKT_ABORTING should save
13836 					 * us in fcp_cmd_callback. If we
13837 					 * are already aborting ignore the
13838 					 * command in fcp_cmd_callback.
13839 					 * Here we leave this packet for 20
13840 					 * sec to be aborted in the
13841 					 * fcp_watch thread.
13842 					 */
13843 					fcp_log(CE_WARN, pptr->port_dip,
13844 					    "!Abort failed after reset %s",
13845 					    msg);
13846 
13847 					cmd->cmd_timeout =
13848 					    fcp_watchdog_time +
13849 					    cmd->cmd_pkt->pkt_time +
13850 					    FCP_FAILED_DELAY;
13851 
13852 					cmd->cmd_fp_pkt->pkt_timeout =
13853 					    FCP_INVALID_TIMEOUT;
13854 					/*
13855 					 * This is a hack, cmd is put in the
13856 					 * overflow queue so that it can be
13857 					 * timed out finally
13858 					 */
13859 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
13860 
13861 					mutex_enter(&pptr->port_pkt_mutex);
13862 					if (pptr->port_pkt_head) {
13863 						ASSERT(pptr->port_pkt_tail
13864 						    != NULL);
13865 						pptr->port_pkt_tail->cmd_next
13866 						    = cmd;
13867 						pptr->port_pkt_tail = cmd;
13868 					} else {
13869 						ASSERT(pptr->port_pkt_tail
13870 						    == NULL);
13871 						pptr->port_pkt_head =
13872 						    pptr->port_pkt_tail
13873 						    = cmd;
13874 					}
13875 					cmd->cmd_next = NULL;
13876 					mutex_exit(&pptr->port_pkt_mutex);
13877 				}
13878 				mutex_enter(&tlun->lun_mutex);
13879 				cmd = tlun->lun_pkt_head;
13880 			} else {
13881 				cmd = cmd->cmd_forw;
13882 			}
13883 		}
13884 		mutex_exit(&tlun->lun_mutex);
13885 
13886 		mutex_enter(&ttgt->tgt_mutex);
13887 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
13888 		mutex_exit(&ttgt->tgt_mutex);
13889 
13890 		mutex_enter(&pptr->port_mutex);
13891 		if (tgt_cnt != ttgt->tgt_change_cnt) {
13892 			mutex_exit(&pptr->port_mutex);
13893 			return;
13894 		} else {
13895 			mutex_exit(&pptr->port_mutex);
13896 		}
13897 	}
13898 }
13899 
13900 
13901 /*
13902  * unlink the soft state, returning the soft state found (if any)
13903  *
13904  * acquires and releases the global mutex
13905  */
13906 struct fcp_port *
13907 fcp_soft_state_unlink(struct fcp_port *pptr)
13908 {
13909 	struct fcp_port	*hptr;		/* ptr index */
13910 	struct fcp_port	*tptr;		/* prev hptr */
13911 
13912 	mutex_enter(&fcp_global_mutex);
13913 	for (hptr = fcp_port_head, tptr = NULL;
13914 	    hptr != NULL;
13915 	    tptr = hptr, hptr = hptr->port_next) {
13916 		if (hptr == pptr) {
13917 			/* we found a match -- remove this item */
13918 			if (tptr == NULL) {
13919 				/* we're at the head of the list */
13920 				fcp_port_head = hptr->port_next;
13921 			} else {
13922 				tptr->port_next = hptr->port_next;
13923 			}
13924 			break;			/* success */
13925 		}
13926 	}
13927 	if (fcp_port_head == NULL) {
13928 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
13929 	}
13930 	mutex_exit(&fcp_global_mutex);
13931 	return (hptr);
13932 }
13933 
13934 
13935 /*
13936  * called by fcp_scsi_hba_tgt_init to find a LUN given a
13937  * WWN and a LUN number
13938  */
13939 /* ARGSUSED */
13940 static struct fcp_lun *
13941 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
13942 {
13943 	int hash;
13944 	struct fcp_tgt *ptgt;
13945 	struct fcp_lun *plun;
13946 
13947 	ASSERT(mutex_owned(&pptr->port_mutex));
13948 
13949 	hash = FCP_HASH(wwn);
13950 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
13951 	    ptgt = ptgt->tgt_next) {
13952 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
13953 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
13954 			mutex_enter(&ptgt->tgt_mutex);
13955 			for (plun = ptgt->tgt_lun;
13956 			    plun != NULL;
13957 			    plun = plun->lun_next) {
13958 				if (plun->lun_num == lun) {
13959 					mutex_exit(&ptgt->tgt_mutex);
13960 					return (plun);
13961 				}
13962 			}
13963 			mutex_exit(&ptgt->tgt_mutex);
13964 			return (NULL);
13965 		}
13966 	}
13967 	return (NULL);
13968 }
13969 
13970 /*
13971  *     Function: fcp_prepare_pkt
13972  *
13973  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
13974  *		 for fcp_start(). It binds the data or partially maps it.
13975  *		 Builds the FCP header and starts the initialization of the
13976  *		 Fibre Channel header.
13977  *
13978  *     Argument: *pptr		FCP port.
13979  *		 *cmd		FCP packet.
13980  *		 *plun		LUN the command will be sent to.
13981  *
13982  *	Context: User, Kernel and Interrupt context.
13983  */
13984 static void
13985 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
13986     struct fcp_lun *plun)
13987 {
13988 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
13989 	struct fcp_tgt		*ptgt = plun->lun_tgt;
13990 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
13991 
13992 	ASSERT(cmd->cmd_pkt->pkt_comp ||
13993 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
13994 
13995 	if (cmd->cmd_pkt->pkt_numcookies) {
13996 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
13997 			fcmd->fcp_cntl.cntl_read_data = 1;
13998 			fcmd->fcp_cntl.cntl_write_data = 0;
13999 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14000 		} else {
14001 			fcmd->fcp_cntl.cntl_read_data = 0;
14002 			fcmd->fcp_cntl.cntl_write_data = 1;
14003 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14004 		}
14005 
14006 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14007 
14008 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14009 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14010 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14011 
14012 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14013 
14014 		/* FCA needs pkt_datalen to be set */
14015 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14016 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14017 	} else {
14018 		fcmd->fcp_cntl.cntl_read_data = 0;
14019 		fcmd->fcp_cntl.cntl_write_data = 0;
14020 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14021 		fpkt->pkt_datalen = 0;
14022 		fcmd->fcp_data_len = 0;
14023 	}
14024 
14025 	/* set up the Tagged Queuing type */
14026 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14027 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14028 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14029 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14030 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14031 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14032 	} else {
14033 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14034 	}
14035 
14036 	fcmd->fcp_ent_addr = plun->lun_addr;
14037 
14038 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14039 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14040 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14041 	} else {
14042 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14043 	}
14044 
14045 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14046 	cmd->cmd_pkt->pkt_state = 0;
14047 	cmd->cmd_pkt->pkt_statistics = 0;
14048 	cmd->cmd_pkt->pkt_resid = 0;
14049 
14050 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14051 
14052 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14053 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14054 		fpkt->pkt_comp = NULL;
14055 	} else {
14056 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14057 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14058 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14059 		}
14060 		fpkt->pkt_comp = fcp_cmd_callback;
14061 	}
14062 
14063 	mutex_enter(&pptr->port_mutex);
14064 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14065 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14066 	}
14067 	mutex_exit(&pptr->port_mutex);
14068 
14069 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14070 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14071 
14072 	/*
14073 	 * Save a few kernel cycles here
14074 	 */
14075 #ifndef	__lock_lint
14076 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14077 #endif /* __lock_lint */
14078 }
14079 
14080 static void
14081 fcp_post_callback(struct fcp_pkt *cmd)
14082 {
14083 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14084 }
14085 
14086 
14087 /*
14088  * called to do polled I/O by fcp_start()
14089  *
14090  * return a transport status value, i.e. TRAN_ACCECPT for success
14091  */
14092 static int
14093 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14094 {
14095 	int	rval;
14096 
14097 #ifdef	DEBUG
14098 	mutex_enter(&pptr->port_pkt_mutex);
14099 	pptr->port_npkts++;
14100 	mutex_exit(&pptr->port_pkt_mutex);
14101 #endif /* DEBUG */
14102 
14103 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14104 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14105 	} else {
14106 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14107 	}
14108 
14109 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14110 
14111 	cmd->cmd_state = FCP_PKT_ISSUED;
14112 
14113 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14114 
14115 #ifdef	DEBUG
14116 	mutex_enter(&pptr->port_pkt_mutex);
14117 	pptr->port_npkts--;
14118 	mutex_exit(&pptr->port_pkt_mutex);
14119 #endif /* DEBUG */
14120 
14121 	cmd->cmd_state = FCP_PKT_IDLE;
14122 
14123 	switch (rval) {
14124 	case FC_SUCCESS:
14125 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14126 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14127 			rval = TRAN_ACCEPT;
14128 		} else {
14129 			rval = TRAN_FATAL_ERROR;
14130 		}
14131 		break;
14132 
14133 	case FC_TRAN_BUSY:
14134 		rval = TRAN_BUSY;
14135 		cmd->cmd_pkt->pkt_resid = 0;
14136 		break;
14137 
14138 	case FC_BADPACKET:
14139 		rval = TRAN_BADPKT;
14140 		break;
14141 
14142 	default:
14143 		rval = TRAN_FATAL_ERROR;
14144 		break;
14145 	}
14146 
14147 	return (rval);
14148 }
14149 
14150 
14151 /*
14152  * called by some of the following transport-called routines to convert
14153  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14154  */
14155 static struct fcp_port *
14156 fcp_dip2port(dev_info_t *dip)
14157 {
14158 	int	instance;
14159 
14160 	instance = ddi_get_instance(dip);
14161 	return (ddi_get_soft_state(fcp_softstate, instance));
14162 }
14163 
14164 
14165 /*
14166  * called internally to return a LUN given a dip
14167  */
14168 struct fcp_lun *
14169 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14170 {
14171 	struct fcp_tgt *ptgt;
14172 	struct fcp_lun *plun;
14173 	int i;
14174 
14175 
14176 	ASSERT(mutex_owned(&pptr->port_mutex));
14177 
14178 	for (i = 0; i < FCP_NUM_HASH; i++) {
14179 		for (ptgt = pptr->port_tgt_hash_table[i];
14180 		    ptgt != NULL;
14181 		    ptgt = ptgt->tgt_next) {
14182 			mutex_enter(&ptgt->tgt_mutex);
14183 			for (plun = ptgt->tgt_lun; plun != NULL;
14184 			    plun = plun->lun_next) {
14185 				mutex_enter(&plun->lun_mutex);
14186 				if (plun->lun_cip == cip) {
14187 					mutex_exit(&plun->lun_mutex);
14188 					mutex_exit(&ptgt->tgt_mutex);
14189 					return (plun); /* match found */
14190 				}
14191 				mutex_exit(&plun->lun_mutex);
14192 			}
14193 			mutex_exit(&ptgt->tgt_mutex);
14194 		}
14195 	}
14196 	return (NULL);				/* no LUN found */
14197 }
14198 
14199 /*
14200  * pass an element to the hotplug list, kick the hotplug thread
14201  * and wait for the element to get processed by the hotplug thread.
14202  * on return the element is freed.
14203  *
14204  * return zero success and non-zero on failure
14205  *
14206  * acquires/releases the target mutex
14207  *
14208  */
14209 static int
14210 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14211     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14212 {
14213 	struct fcp_hp_elem	*elem;
14214 	int			rval;
14215 
14216 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14217 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14218 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14219 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14220 		fcp_log(CE_CONT, pptr->port_dip,
14221 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14222 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14223 		return (NDI_FAILURE);
14224 	}
14225 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14226 	mutex_enter(&elem->mutex);
14227 	if (elem->wait) {
14228 		while (elem->wait) {
14229 			cv_wait(&elem->cv, &elem->mutex);
14230 		}
14231 	}
14232 	rval = (elem->result);
14233 	mutex_exit(&elem->mutex);
14234 	mutex_destroy(&elem->mutex);
14235 	cv_destroy(&elem->cv);
14236 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14237 	return (rval);
14238 }
14239 
14240 /*
14241  * pass an element to the hotplug list, and then
14242  * kick the hotplug thread
14243  *
14244  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14245  *
14246  * acquires/releases the hotplug mutex
14247  *
14248  * called with the target mutex owned
14249  *
14250  * memory acquired in NOSLEEP mode
14251  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14252  *	 for the hp daemon to process the request and is responsible for
14253  *	 freeing the element
14254  */
14255 static struct fcp_hp_elem *
14256 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14257     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14258 {
14259 	struct fcp_hp_elem	*elem;
14260 	dev_info_t *pdip;
14261 
14262 	ASSERT(pptr != NULL);
14263 	ASSERT(plun != NULL);
14264 	ASSERT(plun->lun_tgt != NULL);
14265 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14266 
14267 	/* create space for a hotplug element */
14268 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14269 	    == NULL) {
14270 		fcp_log(CE_WARN, NULL,
14271 		    "!can't allocate memory for hotplug element");
14272 		return (NULL);
14273 	}
14274 
14275 	/* fill in hotplug element */
14276 	elem->port = pptr;
14277 	elem->lun = plun;
14278 	elem->cip = cip;
14279 	elem->old_lun_mpxio = plun->lun_mpxio;
14280 	elem->what = what;
14281 	elem->flags = flags;
14282 	elem->link_cnt = link_cnt;
14283 	elem->tgt_cnt = tgt_cnt;
14284 	elem->wait = wait;
14285 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14286 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14287 
14288 	/* schedule the hotplug task */
14289 	pdip = pptr->port_dip;
14290 	mutex_enter(&plun->lun_mutex);
14291 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14292 		plun->lun_event_count++;
14293 		elem->event_cnt = plun->lun_event_count;
14294 	}
14295 	mutex_exit(&plun->lun_mutex);
14296 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14297 	    (void *)elem, KM_NOSLEEP) == NULL) {
14298 		mutex_enter(&plun->lun_mutex);
14299 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14300 			plun->lun_event_count--;
14301 		}
14302 		mutex_exit(&plun->lun_mutex);
14303 		kmem_free(elem, sizeof (*elem));
14304 		return (0);
14305 	}
14306 
14307 	return (elem);
14308 }
14309 
14310 
14311 static void
14312 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14313 {
14314 	int			rval;
14315 	struct scsi_address	*ap;
14316 	struct fcp_lun	*plun;
14317 	struct fcp_tgt	*ptgt;
14318 	fc_packet_t	*fpkt;
14319 
14320 	ap = &cmd->cmd_pkt->pkt_address;
14321 	plun = ADDR2LUN(ap);
14322 	ptgt = plun->lun_tgt;
14323 
14324 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14325 
14326 	cmd->cmd_state = FCP_PKT_IDLE;
14327 
14328 	mutex_enter(&pptr->port_mutex);
14329 	mutex_enter(&ptgt->tgt_mutex);
14330 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14331 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14332 		fc_ulp_rscn_info_t *rscnp;
14333 
14334 		cmd->cmd_state = FCP_PKT_ISSUED;
14335 
14336 		/*
14337 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14338 		 * originally NULL, hence we try to set it to the pd pointed
14339 		 * to by the SCSI device we're trying to get to.
14340 		 */
14341 
14342 		fpkt = cmd->cmd_fp_pkt;
14343 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14344 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14345 			/*
14346 			 * We need to notify the transport that we now have a
14347 			 * reference to the remote port handle.
14348 			 */
14349 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14350 		}
14351 
14352 		mutex_exit(&ptgt->tgt_mutex);
14353 		mutex_exit(&pptr->port_mutex);
14354 
14355 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14356 
14357 		/* prepare the packet */
14358 
14359 		fcp_prepare_pkt(pptr, cmd, plun);
14360 
14361 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14362 		    pkt_ulp_rscn_infop;
14363 
14364 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14365 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14366 
14367 		if (rscnp != NULL) {
14368 			rscnp->ulp_rscn_count =
14369 			    fc_ulp_get_rscn_count(pptr->
14370 			    port_fp_handle);
14371 		}
14372 
14373 		rval = fcp_transport(pptr->port_fp_handle,
14374 		    cmd->cmd_fp_pkt, 0);
14375 
14376 		if (rval == FC_SUCCESS) {
14377 			return;
14378 		}
14379 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14380 	} else {
14381 		mutex_exit(&ptgt->tgt_mutex);
14382 		mutex_exit(&pptr->port_mutex);
14383 	}
14384 
14385 	fcp_queue_pkt(pptr, cmd);
14386 }
14387 
14388 
14389 static void
14390 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14391 {
14392 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14393 
14394 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14395 	cmd->cmd_state = FCP_PKT_IDLE;
14396 
14397 	cmd->cmd_pkt->pkt_reason = reason;
14398 	cmd->cmd_pkt->pkt_state = 0;
14399 	cmd->cmd_pkt->pkt_statistics = statistics;
14400 
14401 	fcp_post_callback(cmd);
14402 }
14403 
14404 /*
14405  *     Function: fcp_queue_pkt
14406  *
14407  *  Description: This function queues the packet passed by the caller into
14408  *		 the list of packets of the FCP port.
14409  *
14410  *     Argument: *pptr		FCP port.
14411  *		 *cmd		FCP packet to queue.
14412  *
14413  * Return Value: None
14414  *
14415  *	Context: User, Kernel and Interrupt context.
14416  */
14417 static void
14418 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14419 {
14420 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14421 
14422 	mutex_enter(&pptr->port_pkt_mutex);
14423 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14424 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14425 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14426 
14427 	/*
14428 	 * zero pkt_time means hang around for ever
14429 	 */
14430 	if (cmd->cmd_pkt->pkt_time) {
14431 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14432 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14433 		} else {
14434 			/*
14435 			 * Indicate the watch thread to fail the
14436 			 * command by setting it to highest value
14437 			 */
14438 			cmd->cmd_timeout = fcp_watchdog_time;
14439 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14440 		}
14441 	}
14442 
14443 	if (pptr->port_pkt_head) {
14444 		ASSERT(pptr->port_pkt_tail != NULL);
14445 
14446 		pptr->port_pkt_tail->cmd_next = cmd;
14447 		pptr->port_pkt_tail = cmd;
14448 	} else {
14449 		ASSERT(pptr->port_pkt_tail == NULL);
14450 
14451 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14452 	}
14453 	cmd->cmd_next = NULL;
14454 	mutex_exit(&pptr->port_pkt_mutex);
14455 }
14456 
14457 /*
14458  *     Function: fcp_update_targets
14459  *
14460  *  Description: This function applies the specified change of state to all
14461  *		 the targets listed.  The operation applied is 'set'.
14462  *
14463  *     Argument: *pptr		FCP port.
14464  *		 *dev_list	Array of fc_portmap_t structures.
14465  *		 count		Length of dev_list.
14466  *		 state		State bits to update.
14467  *		 cause		Reason for the update.
14468  *
14469  * Return Value: None
14470  *
14471  *	Context: User, Kernel and Interrupt context.
14472  *		 The mutex pptr->port_mutex must be held.
14473  */
14474 static void
14475 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14476     uint32_t count, uint32_t state, int cause)
14477 {
14478 	fc_portmap_t		*map_entry;
14479 	struct fcp_tgt	*ptgt;
14480 
14481 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14482 
14483 	while (count--) {
14484 		map_entry = &(dev_list[count]);
14485 		ptgt = fcp_lookup_target(pptr,
14486 		    (uchar_t *)&(map_entry->map_pwwn));
14487 		if (ptgt == NULL) {
14488 			continue;
14489 		}
14490 
14491 		mutex_enter(&ptgt->tgt_mutex);
14492 		ptgt->tgt_trace = 0;
14493 		ptgt->tgt_change_cnt++;
14494 		ptgt->tgt_statec_cause = cause;
14495 		ptgt->tgt_tmp_cnt = 1;
14496 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14497 		mutex_exit(&ptgt->tgt_mutex);
14498 	}
14499 }
14500 
14501 static int
14502 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14503     int lcount, int tcount, int cause)
14504 {
14505 	int rval;
14506 
14507 	mutex_enter(&pptr->port_mutex);
14508 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14509 	mutex_exit(&pptr->port_mutex);
14510 
14511 	return (rval);
14512 }
14513 
14514 
14515 static int
14516 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14517     int lcount, int tcount, int cause)
14518 {
14519 	int	finish_init = 0;
14520 	int	finish_tgt = 0;
14521 	int	do_finish_init = 0;
14522 	int	rval = FCP_NO_CHANGE;
14523 
14524 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14525 	    cause == FCP_CAUSE_LINK_DOWN) {
14526 		do_finish_init = 1;
14527 	}
14528 
14529 	if (ptgt != NULL) {
14530 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14531 		    FCP_BUF_LEVEL_2, 0,
14532 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14533 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14534 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14535 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14536 		    ptgt->tgt_d_id, ptgt->tgt_done);
14537 
14538 		mutex_enter(&ptgt->tgt_mutex);
14539 
14540 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14541 			rval = FCP_DEV_CHANGE;
14542 			if (do_finish_init && ptgt->tgt_done == 0) {
14543 				ptgt->tgt_done++;
14544 				finish_init = 1;
14545 			}
14546 		} else {
14547 			if (--ptgt->tgt_tmp_cnt <= 0) {
14548 				ptgt->tgt_tmp_cnt = 0;
14549 				finish_tgt = 1;
14550 
14551 				if (do_finish_init) {
14552 					finish_init = 1;
14553 				}
14554 			}
14555 		}
14556 		mutex_exit(&ptgt->tgt_mutex);
14557 	} else {
14558 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14559 		    FCP_BUF_LEVEL_2, 0,
14560 		    "Call Finish Init for NO target");
14561 
14562 		if (do_finish_init) {
14563 			finish_init = 1;
14564 		}
14565 	}
14566 
14567 	if (finish_tgt) {
14568 		ASSERT(ptgt != NULL);
14569 
14570 		mutex_enter(&ptgt->tgt_mutex);
14571 #ifdef	DEBUG
14572 		bzero(ptgt->tgt_tmp_cnt_stack,
14573 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14574 
14575 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14576 		    FCP_STACK_DEPTH);
14577 #endif /* DEBUG */
14578 		mutex_exit(&ptgt->tgt_mutex);
14579 
14580 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14581 	}
14582 
14583 	if (finish_init && lcount == pptr->port_link_cnt) {
14584 		ASSERT(pptr->port_tmp_cnt > 0);
14585 		if (--pptr->port_tmp_cnt == 0) {
14586 			fcp_finish_init(pptr);
14587 		}
14588 	} else if (lcount != pptr->port_link_cnt) {
14589 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14590 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14591 		    "fcp_call_finish_init_held,1: state change occured"
14592 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14593 	}
14594 
14595 	return (rval);
14596 }
14597 
14598 static void
14599 fcp_reconfigure_luns(void * tgt_handle)
14600 {
14601 	uint32_t		dev_cnt;
14602 	fc_portmap_t		*devlist;
14603 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14604 	struct fcp_port		*pptr = ptgt->tgt_port;
14605 
14606 	/*
14607 	 * If the timer that fires this off got canceled too late, the
14608 	 * target could have been destroyed.
14609 	 */
14610 
14611 	if (ptgt->tgt_tid == NULL) {
14612 		return;
14613 	}
14614 
14615 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14616 	if (devlist == NULL) {
14617 		fcp_log(CE_WARN, pptr->port_dip,
14618 		    "!fcp%d: failed to allocate for portmap",
14619 		    pptr->port_instance);
14620 		return;
14621 	}
14622 
14623 	dev_cnt = 1;
14624 	devlist->map_pd = ptgt->tgt_pd_handle;
14625 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14626 	devlist->map_did.port_id = ptgt->tgt_d_id;
14627 
14628 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14629 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14630 
14631 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14632 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14633 	devlist->map_flags = 0;
14634 
14635 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14636 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14637 
14638 	/*
14639 	 * Clear the tgt_tid after no more references to
14640 	 * the fcp_tgt
14641 	 */
14642 	mutex_enter(&ptgt->tgt_mutex);
14643 	ptgt->tgt_tid = NULL;
14644 	mutex_exit(&ptgt->tgt_mutex);
14645 
14646 	kmem_free(devlist, sizeof (*devlist));
14647 }
14648 
14649 
14650 static void
14651 fcp_free_targets(struct fcp_port *pptr)
14652 {
14653 	int			i;
14654 	struct fcp_tgt	*ptgt;
14655 
14656 	mutex_enter(&pptr->port_mutex);
14657 	for (i = 0; i < FCP_NUM_HASH; i++) {
14658 		ptgt = pptr->port_tgt_hash_table[i];
14659 		while (ptgt != NULL) {
14660 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14661 
14662 			fcp_free_target(ptgt);
14663 			ptgt = next_tgt;
14664 		}
14665 	}
14666 	mutex_exit(&pptr->port_mutex);
14667 }
14668 
14669 
14670 static void
14671 fcp_free_target(struct fcp_tgt *ptgt)
14672 {
14673 	struct fcp_lun	*plun;
14674 	timeout_id_t		tid;
14675 
14676 	mutex_enter(&ptgt->tgt_mutex);
14677 	tid = ptgt->tgt_tid;
14678 
14679 	/*
14680 	 * Cancel any pending timeouts for this target.
14681 	 */
14682 
14683 	if (tid != NULL) {
14684 		/*
14685 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14686 		 * If tgt_tid is NULL, the callback will simply return.
14687 		 */
14688 		ptgt->tgt_tid = NULL;
14689 		mutex_exit(&ptgt->tgt_mutex);
14690 		(void) untimeout(tid);
14691 		mutex_enter(&ptgt->tgt_mutex);
14692 	}
14693 
14694 	plun = ptgt->tgt_lun;
14695 	while (plun != NULL) {
14696 		struct fcp_lun *next_lun = plun->lun_next;
14697 
14698 		fcp_dealloc_lun(plun);
14699 		plun = next_lun;
14700 	}
14701 
14702 	mutex_exit(&ptgt->tgt_mutex);
14703 	fcp_dealloc_tgt(ptgt);
14704 }
14705 
14706 /*
14707  *     Function: fcp_is_retryable
14708  *
14709  *  Description: Indicates if the internal packet is retryable.
14710  *
14711  *     Argument: *icmd		FCP internal packet.
14712  *
14713  * Return Value: 0	Not retryable
14714  *		 1	Retryable
14715  *
14716  *	Context: User, Kernel and Interrupt context
14717  */
14718 static int
14719 fcp_is_retryable(struct fcp_ipkt *icmd)
14720 {
14721 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14722 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14723 		return (0);
14724 	}
14725 
14726 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14727 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14728 }
14729 
14730 /*
14731  *     Function: fcp_create_on_demand
14732  *
14733  *     Argument: *pptr		FCP port.
14734  *		 *pwwn		Port WWN.
14735  *
14736  * Return Value: 0	Success
14737  *		 EIO
14738  *		 ENOMEM
14739  *		 EBUSY
14740  *		 EINVAL
14741  *
14742  *	Context: User and Kernel context
14743  */
14744 static int
14745 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14746 {
14747 	int			wait_ms;
14748 	int			tcount;
14749 	int			lcount;
14750 	int			ret;
14751 	int			error;
14752 	int			rval = EIO;
14753 	int			ntries;
14754 	fc_portmap_t		*devlist;
14755 	opaque_t		pd;
14756 	struct fcp_lun		*plun;
14757 	struct fcp_tgt		*ptgt;
14758 	int			old_manual = 0;
14759 
14760 	/* Allocates the fc_portmap_t structure. */
14761 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14762 
14763 	/*
14764 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14765 	 * in the commented statement below:
14766 	 *
14767 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14768 	 *
14769 	 * Below, the deadline for the discovery process is set.
14770 	 */
14771 	mutex_enter(&pptr->port_mutex);
14772 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14773 	mutex_exit(&pptr->port_mutex);
14774 
14775 	/*
14776 	 * We try to find the remote port based on the WWN provided by the
14777 	 * caller.  We actually ask fp/fctl if it has it.
14778 	 */
14779 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14780 	    (la_wwn_t *)pwwn, &error, 1);
14781 
14782 	if (pd == NULL) {
14783 		kmem_free(devlist, sizeof (*devlist));
14784 		return (rval);
14785 	}
14786 
14787 	/*
14788 	 * The remote port was found.  We ask fp/fctl to update our
14789 	 * fc_portmap_t structure.
14790 	 */
14791 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14792 	    (la_wwn_t *)pwwn, devlist);
14793 	if (ret != FC_SUCCESS) {
14794 		kmem_free(devlist, sizeof (*devlist));
14795 		return (rval);
14796 	}
14797 
14798 	/*
14799 	 * The map flag field is set to indicates that the creation is being
14800 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14801 	 */
14802 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14803 
14804 	mutex_enter(&pptr->port_mutex);
14805 
14806 	/*
14807 	 * We check to see if fcp already has a target that describes the
14808 	 * device being created.  If not it is created.
14809 	 */
14810 	ptgt = fcp_lookup_target(pptr, pwwn);
14811 	if (ptgt == NULL) {
14812 		lcount = pptr->port_link_cnt;
14813 		mutex_exit(&pptr->port_mutex);
14814 
14815 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14816 		if (ptgt == NULL) {
14817 			fcp_log(CE_WARN, pptr->port_dip,
14818 			    "!FC target allocation failed");
14819 			return (ENOMEM);
14820 		}
14821 
14822 		mutex_enter(&pptr->port_mutex);
14823 	}
14824 
14825 	mutex_enter(&ptgt->tgt_mutex);
14826 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14827 	ptgt->tgt_tmp_cnt = 1;
14828 	ptgt->tgt_device_created = 0;
14829 	/*
14830 	 * If fabric and auto config is set but the target was
14831 	 * manually unconfigured then reset to the manual_config_only to
14832 	 * 0 so the device will get configured.
14833 	 */
14834 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14835 	    fcp_enable_auto_configuration &&
14836 	    ptgt->tgt_manual_config_only == 1) {
14837 		old_manual = 1;
14838 		ptgt->tgt_manual_config_only = 0;
14839 	}
14840 	mutex_exit(&ptgt->tgt_mutex);
14841 
14842 	fcp_update_targets(pptr, devlist, 1,
14843 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14844 
14845 	lcount = pptr->port_link_cnt;
14846 	tcount = ptgt->tgt_change_cnt;
14847 
14848 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14849 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14850 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14851 		    fcp_enable_auto_configuration && old_manual) {
14852 			mutex_enter(&ptgt->tgt_mutex);
14853 			ptgt->tgt_manual_config_only = 1;
14854 			mutex_exit(&ptgt->tgt_mutex);
14855 		}
14856 
14857 		if (pptr->port_link_cnt != lcount ||
14858 		    ptgt->tgt_change_cnt != tcount) {
14859 			rval = EBUSY;
14860 		}
14861 		mutex_exit(&pptr->port_mutex);
14862 
14863 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14864 		    FCP_BUF_LEVEL_3, 0,
14865 		    "fcp_create_on_demand: mapflags ptgt=%x, "
14866 		    "lcount=%x::port_link_cnt=%x, "
14867 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
14868 		    ptgt, lcount, pptr->port_link_cnt,
14869 		    tcount, ptgt->tgt_change_cnt, rval);
14870 		return (rval);
14871 	}
14872 
14873 	/*
14874 	 * Due to lack of synchronization mechanisms, we perform
14875 	 * periodic monitoring of our request; Because requests
14876 	 * get dropped when another one supercedes (either because
14877 	 * of a link change or a target change), it is difficult to
14878 	 * provide a clean synchronization mechanism (such as a
14879 	 * semaphore or a conditional variable) without exhaustively
14880 	 * rewriting the mainline discovery code of this driver.
14881 	 */
14882 	wait_ms = 500;
14883 
14884 	ntries = fcp_max_target_retries;
14885 
14886 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14887 	    FCP_BUF_LEVEL_3, 0,
14888 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
14889 	    "lcount=%x::port_link_cnt=%x, "
14890 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14891 	    "tgt_tmp_cnt =%x",
14892 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14893 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14894 	    ptgt->tgt_tmp_cnt);
14895 
14896 	mutex_enter(&ptgt->tgt_mutex);
14897 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
14898 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
14899 		mutex_exit(&ptgt->tgt_mutex);
14900 		mutex_exit(&pptr->port_mutex);
14901 
14902 		delay(drv_usectohz(wait_ms * 1000));
14903 
14904 		mutex_enter(&pptr->port_mutex);
14905 		mutex_enter(&ptgt->tgt_mutex);
14906 	}
14907 
14908 
14909 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
14910 		rval = EBUSY;
14911 	} else {
14912 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
14913 		    FCP_TGT_NODE_PRESENT) {
14914 			rval = 0;
14915 		}
14916 	}
14917 
14918 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14919 	    FCP_BUF_LEVEL_3, 0,
14920 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
14921 	    "lcount=%x::port_link_cnt=%x, "
14922 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
14923 	    "tgt_tmp_cnt =%x",
14924 	    ntries, ptgt, lcount, pptr->port_link_cnt,
14925 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
14926 	    ptgt->tgt_tmp_cnt);
14927 
14928 	if (rval) {
14929 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14930 		    fcp_enable_auto_configuration && old_manual) {
14931 			ptgt->tgt_manual_config_only = 1;
14932 		}
14933 		mutex_exit(&ptgt->tgt_mutex);
14934 		mutex_exit(&pptr->port_mutex);
14935 		kmem_free(devlist, sizeof (*devlist));
14936 
14937 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14938 		    FCP_BUF_LEVEL_3, 0,
14939 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
14940 		    "lcount=%x::port_link_cnt=%x, "
14941 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
14942 		    "tgt_device_created=%x, tgt D_ID=%x",
14943 		    ntries, ptgt, lcount, pptr->port_link_cnt,
14944 		    tcount, ptgt->tgt_change_cnt, rval,
14945 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
14946 		return (rval);
14947 	}
14948 
14949 	if ((plun = ptgt->tgt_lun) != NULL) {
14950 		tcount = plun->lun_tgt->tgt_change_cnt;
14951 	} else {
14952 		rval = EINVAL;
14953 	}
14954 	lcount = pptr->port_link_cnt;
14955 
14956 	/*
14957 	 * Configuring the target with no LUNs will fail. We
14958 	 * should reset the node state so that it is not
14959 	 * automatically configured when the LUNs are added
14960 	 * to this target.
14961 	 */
14962 	if (ptgt->tgt_lun_cnt == 0) {
14963 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
14964 	}
14965 	mutex_exit(&ptgt->tgt_mutex);
14966 	mutex_exit(&pptr->port_mutex);
14967 
14968 	while (plun) {
14969 		child_info_t	*cip;
14970 
14971 		mutex_enter(&plun->lun_mutex);
14972 		cip = plun->lun_cip;
14973 		mutex_exit(&plun->lun_mutex);
14974 
14975 		mutex_enter(&ptgt->tgt_mutex);
14976 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
14977 			mutex_exit(&ptgt->tgt_mutex);
14978 
14979 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
14980 			    FCP_ONLINE, lcount, tcount,
14981 			    NDI_ONLINE_ATTACH);
14982 			if (rval != NDI_SUCCESS) {
14983 				FCP_TRACE(fcp_logq,
14984 				    pptr->port_instbuf, fcp_trace,
14985 				    FCP_BUF_LEVEL_3, 0,
14986 				    "fcp_create_on_demand: "
14987 				    "pass_to_hp_and_wait failed "
14988 				    "rval=%x", rval);
14989 				rval = EIO;
14990 			} else {
14991 				mutex_enter(&LUN_TGT->tgt_mutex);
14992 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
14993 				    FCP_LUN_BUSY);
14994 				mutex_exit(&LUN_TGT->tgt_mutex);
14995 			}
14996 			mutex_enter(&ptgt->tgt_mutex);
14997 		}
14998 
14999 		plun = plun->lun_next;
15000 		mutex_exit(&ptgt->tgt_mutex);
15001 	}
15002 
15003 	kmem_free(devlist, sizeof (*devlist));
15004 
15005 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15006 	    fcp_enable_auto_configuration && old_manual) {
15007 		mutex_enter(&ptgt->tgt_mutex);
15008 		/* if successful then set manual to 0 */
15009 		if (rval == 0) {
15010 			ptgt->tgt_manual_config_only = 0;
15011 		} else {
15012 			/* reset to 1 so the user has to do the config */
15013 			ptgt->tgt_manual_config_only = 1;
15014 		}
15015 		mutex_exit(&ptgt->tgt_mutex);
15016 	}
15017 
15018 	return (rval);
15019 }
15020 
15021 
15022 static void
15023 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15024 {
15025 	int		count;
15026 	uchar_t		byte;
15027 
15028 	count = 0;
15029 	while (*string) {
15030 		byte = FCP_ATOB(*string); string++;
15031 		byte = byte << 4 | FCP_ATOB(*string); string++;
15032 		bytes[count++] = byte;
15033 
15034 		if (count >= byte_len) {
15035 			break;
15036 		}
15037 	}
15038 }
15039 
15040 static void
15041 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15042 {
15043 	int		i;
15044 
15045 	for (i = 0; i < FC_WWN_SIZE; i++) {
15046 		(void) sprintf(string + (i * 2),
15047 		    "%02x", wwn[i]);
15048 	}
15049 
15050 }
15051 
15052 static void
15053 fcp_print_error(fc_packet_t *fpkt)
15054 {
15055 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15056 	    fpkt->pkt_ulp_private;
15057 	struct fcp_port	*pptr;
15058 	struct fcp_tgt	*ptgt;
15059 	struct fcp_lun	*plun;
15060 	caddr_t			buf;
15061 	int			scsi_cmd = 0;
15062 
15063 	ptgt = icmd->ipkt_tgt;
15064 	plun = icmd->ipkt_lun;
15065 	pptr = ptgt->tgt_port;
15066 
15067 	buf = kmem_zalloc(256, KM_NOSLEEP);
15068 	if (buf == NULL) {
15069 		return;
15070 	}
15071 
15072 	switch (icmd->ipkt_opcode) {
15073 	case SCMD_REPORT_LUN:
15074 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15075 		    " lun=0x%%x failed");
15076 		scsi_cmd++;
15077 		break;
15078 
15079 	case SCMD_INQUIRY_PAGE83:
15080 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15081 		    " lun=0x%%x failed");
15082 		scsi_cmd++;
15083 		break;
15084 
15085 	case SCMD_INQUIRY:
15086 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15087 		    " lun=0x%%x failed");
15088 		scsi_cmd++;
15089 		break;
15090 
15091 	case LA_ELS_PLOGI:
15092 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15093 		break;
15094 
15095 	case LA_ELS_PRLI:
15096 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15097 		break;
15098 	}
15099 
15100 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15101 		struct fcp_rsp		response, *rsp;
15102 		uchar_t			asc, ascq;
15103 		caddr_t			sense_key = NULL;
15104 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15105 
15106 		if (icmd->ipkt_nodma) {
15107 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15108 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15109 			    sizeof (struct fcp_rsp));
15110 		} else {
15111 			rsp = &response;
15112 			bep = &fcp_rsp_err;
15113 
15114 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15115 			    sizeof (struct fcp_rsp));
15116 
15117 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15118 			    bep, fpkt->pkt_resp_acc,
15119 			    sizeof (struct fcp_rsp_info));
15120 		}
15121 
15122 
15123 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15124 			(void) sprintf(buf + strlen(buf),
15125 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15126 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15127 			    " senselen=%%x. Giving up");
15128 
15129 			fcp_log(CE_WARN, pptr->port_dip, buf,
15130 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15131 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15132 			    rsp->fcp_u.fcp_status.reserved_1,
15133 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15134 
15135 			kmem_free(buf, 256);
15136 			return;
15137 		}
15138 
15139 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15140 		    bep->rsp_code != FCP_NO_FAILURE) {
15141 			(void) sprintf(buf + strlen(buf),
15142 			    " FCP Response code = 0x%x", bep->rsp_code);
15143 		}
15144 
15145 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15146 			struct scsi_extended_sense sense_info, *sense_ptr;
15147 
15148 			if (icmd->ipkt_nodma) {
15149 				sense_ptr = (struct scsi_extended_sense *)
15150 				    ((caddr_t)fpkt->pkt_resp +
15151 				    sizeof (struct fcp_rsp) +
15152 				    rsp->fcp_response_len);
15153 			} else {
15154 				sense_ptr = &sense_info;
15155 
15156 				FCP_CP_IN(fpkt->pkt_resp +
15157 				    sizeof (struct fcp_rsp) +
15158 				    rsp->fcp_response_len, &sense_info,
15159 				    fpkt->pkt_resp_acc,
15160 				    sizeof (struct scsi_extended_sense));
15161 			}
15162 
15163 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15164 			    NUM_IMPL_SENSE_KEYS) {
15165 				sense_key = sense_keys[sense_ptr->es_key];
15166 			} else {
15167 				sense_key = "Undefined";
15168 			}
15169 
15170 			asc = sense_ptr->es_add_code;
15171 			ascq = sense_ptr->es_qual_code;
15172 
15173 			(void) sprintf(buf + strlen(buf),
15174 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15175 			    " Giving up");
15176 
15177 			fcp_log(CE_WARN, pptr->port_dip, buf,
15178 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15179 			    asc, ascq);
15180 		} else {
15181 			(void) sprintf(buf + strlen(buf),
15182 			    " : SCSI status=%%x. Giving up");
15183 
15184 			fcp_log(CE_WARN, pptr->port_dip, buf,
15185 			    ptgt->tgt_d_id, plun->lun_num,
15186 			    rsp->fcp_u.fcp_status.scsi_status);
15187 		}
15188 	} else {
15189 		caddr_t state, reason, action, expln;
15190 
15191 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15192 		    &action, &expln);
15193 
15194 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15195 		    " Reason:%%s. Giving up");
15196 
15197 		if (scsi_cmd) {
15198 			fcp_log(CE_WARN, pptr->port_dip, buf,
15199 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15200 		} else {
15201 			fcp_log(CE_WARN, pptr->port_dip, buf,
15202 			    ptgt->tgt_d_id, state, reason);
15203 		}
15204 	}
15205 
15206 	kmem_free(buf, 256);
15207 }
15208 
15209 
15210 static int
15211 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15212     struct fcp_ipkt *icmd, int rval, caddr_t op)
15213 {
15214 	int	ret = DDI_FAILURE;
15215 	char	*error;
15216 
15217 	switch (rval) {
15218 	case FC_DEVICE_BUSY_NEW_RSCN:
15219 		/*
15220 		 * This means that there was a new RSCN that the transport
15221 		 * knows about (which the ULP *may* know about too) but the
15222 		 * pkt that was sent down was related to an older RSCN. So, we
15223 		 * are just going to reset the retry count and deadline and
15224 		 * continue to retry. The idea is that transport is currently
15225 		 * working on the new RSCN and will soon let the ULPs know
15226 		 * about it and when it does the existing logic will kick in
15227 		 * where it will change the tcount to indicate that something
15228 		 * changed on the target. So, rediscovery will start and there
15229 		 * will not be an infinite retry.
15230 		 *
15231 		 * For a full flow of how the RSCN info is transferred back and
15232 		 * forth, see fp.c
15233 		 */
15234 		icmd->ipkt_retries = 0;
15235 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15236 		    FCP_ICMD_DEADLINE;
15237 
15238 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15239 		    FCP_BUF_LEVEL_3, 0,
15240 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15241 		    rval, ptgt->tgt_d_id);
15242 		/* FALLTHROUGH */
15243 
15244 	case FC_STATEC_BUSY:
15245 	case FC_DEVICE_BUSY:
15246 	case FC_PBUSY:
15247 	case FC_FBUSY:
15248 	case FC_TRAN_BUSY:
15249 	case FC_OFFLINE:
15250 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15251 		    FCP_BUF_LEVEL_3, 0,
15252 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15253 		    rval, ptgt->tgt_d_id);
15254 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15255 		    fcp_is_retryable(icmd)) {
15256 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15257 			ret = DDI_SUCCESS;
15258 		}
15259 		break;
15260 
15261 	case FC_LOGINREQ:
15262 		/*
15263 		 * FC_LOGINREQ used to be handled just like all the cases
15264 		 * above. It has been changed to handled a PRLI that fails
15265 		 * with FC_LOGINREQ different than other ipkts that fail
15266 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15267 		 * a simple matter to turn it into a PLOGI instead, so that's
15268 		 * exactly what we do here.
15269 		 */
15270 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15271 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15272 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15273 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15274 		} else {
15275 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15276 			    FCP_BUF_LEVEL_3, 0,
15277 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15278 			    rval, ptgt->tgt_d_id);
15279 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15280 			    fcp_is_retryable(icmd)) {
15281 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15282 				ret = DDI_SUCCESS;
15283 			}
15284 		}
15285 		break;
15286 
15287 	default:
15288 		mutex_enter(&pptr->port_mutex);
15289 		mutex_enter(&ptgt->tgt_mutex);
15290 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15291 			mutex_exit(&ptgt->tgt_mutex);
15292 			mutex_exit(&pptr->port_mutex);
15293 
15294 			(void) fc_ulp_error(rval, &error);
15295 			fcp_log(CE_WARN, pptr->port_dip,
15296 			    "!Failed to send %s to D_ID=%x error=%s",
15297 			    op, ptgt->tgt_d_id, error);
15298 		} else {
15299 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15300 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15301 			    "fcp_handle_ipkt_errors,1: state change occured"
15302 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15303 			mutex_exit(&ptgt->tgt_mutex);
15304 			mutex_exit(&pptr->port_mutex);
15305 		}
15306 		break;
15307 	}
15308 
15309 	return (ret);
15310 }
15311 
15312 
15313 /*
15314  * Check of outstanding commands on any LUN for this target
15315  */
15316 static int
15317 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15318 {
15319 	struct	fcp_lun	*plun;
15320 	struct	fcp_pkt	*cmd;
15321 
15322 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15323 		mutex_enter(&plun->lun_mutex);
15324 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15325 		    cmd = cmd->cmd_forw) {
15326 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15327 				mutex_exit(&plun->lun_mutex);
15328 				return (FC_SUCCESS);
15329 			}
15330 		}
15331 		mutex_exit(&plun->lun_mutex);
15332 	}
15333 
15334 	return (FC_FAILURE);
15335 }
15336 
15337 static fc_portmap_t *
15338 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15339 {
15340 	int			i;
15341 	fc_portmap_t		*devlist;
15342 	fc_portmap_t		*devptr = NULL;
15343 	struct fcp_tgt	*ptgt;
15344 
15345 	mutex_enter(&pptr->port_mutex);
15346 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15347 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15348 		    ptgt = ptgt->tgt_next) {
15349 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15350 				++*dev_cnt;
15351 			}
15352 		}
15353 	}
15354 
15355 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15356 	    KM_NOSLEEP);
15357 	if (devlist == NULL) {
15358 		mutex_exit(&pptr->port_mutex);
15359 		fcp_log(CE_WARN, pptr->port_dip,
15360 		    "!fcp%d: failed to allocate for portmap for construct map",
15361 		    pptr->port_instance);
15362 		return (devptr);
15363 	}
15364 
15365 	for (i = 0; i < FCP_NUM_HASH; i++) {
15366 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15367 		    ptgt = ptgt->tgt_next) {
15368 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15369 				int ret;
15370 
15371 				ret = fc_ulp_pwwn_to_portmap(
15372 				    pptr->port_fp_handle,
15373 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15374 				    devlist);
15375 
15376 				if (ret == FC_SUCCESS) {
15377 					devlist++;
15378 					continue;
15379 				}
15380 
15381 				devlist->map_pd = NULL;
15382 				devlist->map_did.port_id = ptgt->tgt_d_id;
15383 				devlist->map_hard_addr.hard_addr =
15384 				    ptgt->tgt_hard_addr;
15385 
15386 				devlist->map_state = PORT_DEVICE_INVALID;
15387 				devlist->map_type = PORT_DEVICE_OLD;
15388 
15389 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15390 				    &devlist->map_nwwn, FC_WWN_SIZE);
15391 
15392 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15393 				    &devlist->map_pwwn, FC_WWN_SIZE);
15394 
15395 				devlist++;
15396 			}
15397 		}
15398 	}
15399 
15400 	mutex_exit(&pptr->port_mutex);
15401 
15402 	return (devptr);
15403 }
15404 /*
15405  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15406  */
15407 static void
15408 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15409 {
15410 	int i;
15411 	struct fcp_tgt	*ptgt;
15412 	struct fcp_lun	*plun;
15413 
15414 	for (i = 0; i < FCP_NUM_HASH; i++) {
15415 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15416 		    ptgt = ptgt->tgt_next) {
15417 			mutex_enter(&ptgt->tgt_mutex);
15418 			for (plun = ptgt->tgt_lun; plun != NULL;
15419 			    plun = plun->lun_next) {
15420 				if (plun->lun_mpxio &&
15421 				    plun->lun_state & FCP_LUN_BUSY) {
15422 					if (!fcp_pass_to_hp(pptr, plun,
15423 					    plun->lun_cip,
15424 					    FCP_MPXIO_PATH_SET_BUSY,
15425 					    pptr->port_link_cnt,
15426 					    ptgt->tgt_change_cnt, 0, 0)) {
15427 						FCP_TRACE(fcp_logq,
15428 						    pptr->port_instbuf,
15429 						    fcp_trace,
15430 						    FCP_BUF_LEVEL_2, 0,
15431 						    "path_verifybusy: "
15432 						    "disable lun %p failed!",
15433 						    plun);
15434 					}
15435 				}
15436 			}
15437 			mutex_exit(&ptgt->tgt_mutex);
15438 		}
15439 	}
15440 }
15441 
15442 static int
15443 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15444 {
15445 	dev_info_t		*cdip = NULL;
15446 	dev_info_t		*pdip = NULL;
15447 
15448 	ASSERT(plun);
15449 
15450 	mutex_enter(&plun->lun_mutex);
15451 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15452 		mutex_exit(&plun->lun_mutex);
15453 		return (NDI_FAILURE);
15454 	}
15455 	mutex_exit(&plun->lun_mutex);
15456 	cdip = mdi_pi_get_client(PIP(cip));
15457 	pdip = mdi_pi_get_phci(PIP(cip));
15458 
15459 	ASSERT(cdip != NULL);
15460 	ASSERT(pdip != NULL);
15461 
15462 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15463 		/* LUN ready for IO */
15464 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15465 	} else {
15466 		/* LUN busy to accept IO */
15467 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15468 	}
15469 	return (NDI_SUCCESS);
15470 }
15471 
15472 /*
15473  * Caller must free the returned string of MAXPATHLEN len
15474  * If the device is offline (-1 instance number) NULL
15475  * will be returned.
15476  */
15477 static char *
15478 fcp_get_lun_path(struct fcp_lun *plun) {
15479 	dev_info_t	*dip = NULL;
15480 	char	*path = NULL;
15481 	if (plun == NULL) {
15482 		return (NULL);
15483 	}
15484 	if (plun->lun_mpxio == 0) {
15485 		dip = DIP(plun->lun_cip);
15486 	} else {
15487 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15488 	}
15489 	if (dip == NULL) {
15490 		return (NULL);
15491 	}
15492 	if (ddi_get_instance(dip) < 0) {
15493 		return (NULL);
15494 	}
15495 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15496 	if (path == NULL) {
15497 		return (NULL);
15498 	}
15499 
15500 	(void) ddi_pathname(dip, path);
15501 	/*
15502 	 * In reality, the user wants a fully valid path (one they can open)
15503 	 * but this string is lacking the mount point, and the minor node.
15504 	 * It would be nice if we could "figure these out" somehow
15505 	 * and fill them in.  Otherwise, the userland code has to understand
15506 	 * driver specific details of which minor node is the "best" or
15507 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15508 	 * which tape doesn't rewind)
15509 	 */
15510 	return (path);
15511 }
15512 
15513 static int
15514 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15515     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15516 {
15517 	int64_t reset_delay;
15518 	int rval, retry = 0;
15519 	struct fcp_port *pptr = fcp_dip2port(parent);
15520 
15521 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15522 	    (lbolt64 - pptr->port_attach_time);
15523 	if (reset_delay < 0) {
15524 		reset_delay = 0;
15525 	}
15526 
15527 	if (fcp_bus_config_debug) {
15528 		flag |= NDI_DEVI_DEBUG;
15529 	}
15530 
15531 	switch (op) {
15532 	case BUS_CONFIG_ONE:
15533 		/*
15534 		 * Retry the command since we need to ensure
15535 		 * the fabric devices are available for root
15536 		 */
15537 		while (retry++ < fcp_max_bus_config_retries) {
15538 			rval =	(ndi_busop_bus_config(parent,
15539 			    flag | NDI_MDI_FALLBACK, op,
15540 			    arg, childp, (clock_t)reset_delay));
15541 			if (rval == 0) {
15542 				return (rval);
15543 			}
15544 		}
15545 
15546 		/*
15547 		 * drain taskq to make sure nodes are created and then
15548 		 * try again.
15549 		 */
15550 		taskq_wait(DEVI(parent)->devi_taskq);
15551 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15552 		    op, arg, childp, 0));
15553 
15554 	case BUS_CONFIG_DRIVER:
15555 	case BUS_CONFIG_ALL: {
15556 		/*
15557 		 * delay till all devices report in (port_tmp_cnt == 0)
15558 		 * or FCP_INIT_WAIT_TIMEOUT
15559 		 */
15560 		mutex_enter(&pptr->port_mutex);
15561 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15562 			(void) cv_timedwait(&pptr->port_config_cv,
15563 			    &pptr->port_mutex,
15564 			    ddi_get_lbolt() + (clock_t)reset_delay);
15565 			reset_delay =
15566 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15567 			    (lbolt64 - pptr->port_attach_time);
15568 		}
15569 		mutex_exit(&pptr->port_mutex);
15570 		/* drain taskq to make sure nodes are created */
15571 		taskq_wait(DEVI(parent)->devi_taskq);
15572 		return (ndi_busop_bus_config(parent, flag, op,
15573 		    arg, childp, 0));
15574 	}
15575 
15576 	default:
15577 		return (NDI_FAILURE);
15578 	}
15579 	/*NOTREACHED*/
15580 }
15581 
15582 static int
15583 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15584     ddi_bus_config_op_t op, void *arg)
15585 {
15586 	if (fcp_bus_config_debug) {
15587 		flag |= NDI_DEVI_DEBUG;
15588 	}
15589 
15590 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15591 }
15592 
15593 
15594 /*
15595  * Routine to copy GUID into the lun structure.
15596  * returns 0 if copy was successful and 1 if encountered a
15597  * failure and did not copy the guid.
15598  */
15599 static int
15600 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15601 {
15602 
15603 	int retval = 0;
15604 
15605 	/* add one for the null terminator */
15606 	const unsigned int len = strlen(guidp) + 1;
15607 
15608 	if ((guidp == NULL) || (plun == NULL)) {
15609 		return (1);
15610 	}
15611 
15612 	/*
15613 	 * if the plun->lun_guid already has been allocated,
15614 	 * then check the size. if the size is exact, reuse
15615 	 * it....if not free it an allocate the required size.
15616 	 * The reallocation should NOT typically happen
15617 	 * unless the GUIDs reported changes between passes.
15618 	 * We free up and alloc again even if the
15619 	 * size was more than required. This is due to the
15620 	 * fact that the field lun_guid_size - serves
15621 	 * dual role of indicating the size of the wwn
15622 	 * size and ALSO the allocation size.
15623 	 */
15624 	if (plun->lun_guid) {
15625 		if (plun->lun_guid_size != len) {
15626 			/*
15627 			 * free the allocated memory and
15628 			 * initialize the field
15629 			 * lun_guid_size to 0.
15630 			 */
15631 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15632 			plun->lun_guid = NULL;
15633 			plun->lun_guid_size = 0;
15634 		}
15635 	}
15636 	/*
15637 	 * alloc only if not already done.
15638 	 */
15639 	if (plun->lun_guid == NULL) {
15640 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15641 		if (plun->lun_guid == NULL) {
15642 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15643 			    "Unable to allocate"
15644 			    "Memory for GUID!!! size %d", len);
15645 			retval = 1;
15646 		} else {
15647 			plun->lun_guid_size = len;
15648 		}
15649 	}
15650 	if (plun->lun_guid) {
15651 		/*
15652 		 * now copy the GUID
15653 		 */
15654 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15655 	}
15656 	return (retval);
15657 }
15658 
15659 /*
15660  * fcp_reconfig_wait
15661  *
15662  * Wait for a rediscovery/reconfiguration to complete before continuing.
15663  */
15664 
15665 static void
15666 fcp_reconfig_wait(struct fcp_port *pptr)
15667 {
15668 	clock_t		reconfig_start, wait_timeout;
15669 
15670 	/*
15671 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15672 	 * reconfiguration in progress.
15673 	 */
15674 
15675 	mutex_enter(&pptr->port_mutex);
15676 	if (pptr->port_tmp_cnt == 0) {
15677 		mutex_exit(&pptr->port_mutex);
15678 		return;
15679 	}
15680 	mutex_exit(&pptr->port_mutex);
15681 
15682 	/*
15683 	 * If we cause a reconfig by raising power, delay until all devices
15684 	 * report in (port_tmp_cnt returns to 0)
15685 	 */
15686 
15687 	reconfig_start = ddi_get_lbolt();
15688 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15689 
15690 	mutex_enter(&pptr->port_mutex);
15691 
15692 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15693 	    pptr->port_tmp_cnt) {
15694 
15695 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15696 		    reconfig_start + wait_timeout);
15697 	}
15698 
15699 	mutex_exit(&pptr->port_mutex);
15700 
15701 	/*
15702 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15703 	 * we want may still be ok.  If not, it will error out later
15704 	 */
15705 }
15706 
15707 /*
15708  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15709  * We rely on the fcp_global_mutex to provide protection against changes to
15710  * the fcp_lun_blacklist.
15711  *
15712  * You can describe a list of target port WWNs and LUN numbers which will
15713  * not be configured. LUN numbers will be interpreted as decimal. White
15714  * spaces and ',' can be used in the list of LUN numbers.
15715  *
15716  * To prevent LUNs 1 and 2 from being configured for target
15717  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15718  *
15719  * pwwn-lun-blacklist=
15720  * "510000f010fd92a1,1,2",
15721  * "510000e012079df1,1,2";
15722  */
15723 static void
15724 fcp_read_blacklist(dev_info_t *dip,
15725     struct fcp_black_list_entry **pplun_blacklist) {
15726 	char **prop_array	= NULL;
15727 	char *curr_pwwn		= NULL;
15728 	char *curr_lun		= NULL;
15729 	uint32_t prop_item	= 0;
15730 	int idx			= 0;
15731 	int len			= 0;
15732 
15733 	ASSERT(mutex_owned(&fcp_global_mutex));
15734 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15735 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15736 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15737 		return;
15738 	}
15739 
15740 	for (idx = 0; idx < prop_item; idx++) {
15741 
15742 		curr_pwwn = prop_array[idx];
15743 		while (*curr_pwwn == ' ') {
15744 			curr_pwwn++;
15745 		}
15746 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15747 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15748 			    ", please check.", curr_pwwn);
15749 			continue;
15750 		}
15751 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15752 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15753 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15754 			    ", please check.", curr_pwwn);
15755 			continue;
15756 		}
15757 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15758 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15759 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15760 				    "blacklist, please check.", curr_pwwn);
15761 				break;
15762 			}
15763 		}
15764 		if (len != sizeof (la_wwn_t) * 2) {
15765 			continue;
15766 		}
15767 
15768 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15769 		*(curr_lun - 1) = '\0';
15770 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15771 	}
15772 
15773 	ddi_prop_free(prop_array);
15774 }
15775 
15776 /*
15777  * Get the masking info about one remote target port designated by wwn.
15778  * Lun ids could be separated by ',' or white spaces.
15779  */
15780 static void
15781 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15782     struct fcp_black_list_entry **pplun_blacklist) {
15783 	int		idx			= 0;
15784 	uint32_t	offset			= 0;
15785 	unsigned long	lun_id			= 0;
15786 	char		lunid_buf[16];
15787 	char		*pend			= NULL;
15788 	int		illegal_digit		= 0;
15789 
15790 	while (offset < strlen(curr_lun)) {
15791 		while ((curr_lun[offset + idx] != ',') &&
15792 		    (curr_lun[offset + idx] != '\0') &&
15793 		    (curr_lun[offset + idx] != ' ')) {
15794 			if (isdigit(curr_lun[offset + idx]) == 0) {
15795 				illegal_digit++;
15796 			}
15797 			idx++;
15798 		}
15799 		if (illegal_digit > 0) {
15800 			offset += (idx+1);	/* To the start of next lun */
15801 			idx = 0;
15802 			illegal_digit = 0;
15803 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15804 			    "the blacklist, please check digits.",
15805 			    curr_lun, curr_pwwn);
15806 			continue;
15807 		}
15808 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15809 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15810 			    "the blacklist, please check the length of LUN#.",
15811 			    curr_lun, curr_pwwn);
15812 			break;
15813 		}
15814 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15815 		    offset++;
15816 		    continue;
15817 		}
15818 
15819 		bcopy(curr_lun + offset, lunid_buf, idx);
15820 		lunid_buf[idx] = '\0';
15821 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15822 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15823 		} else {
15824 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15825 			    "the blacklist, please check %s.",
15826 			    curr_lun, curr_pwwn, lunid_buf);
15827 		}
15828 		offset += (idx+1);	/* To the start of next lun */
15829 		idx = 0;
15830 	}
15831 }
15832 
15833 /*
15834  * Add one masking record
15835  */
15836 static void
15837 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15838     struct fcp_black_list_entry **pplun_blacklist) {
15839 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15840 	struct fcp_black_list_entry	*new_entry	= NULL;
15841 	la_wwn_t			wwn;
15842 
15843 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15844 	while (tmp_entry) {
15845 		if ((bcmp(&tmp_entry->wwn, &wwn,
15846 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15847 			return;
15848 		}
15849 
15850 		tmp_entry = tmp_entry->next;
15851 	}
15852 
15853 	/* add to black list */
15854 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15855 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15856 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15857 	new_entry->lun = lun_id;
15858 	new_entry->masked = 0;
15859 	new_entry->next = *pplun_blacklist;
15860 	*pplun_blacklist = new_entry;
15861 }
15862 
15863 /*
15864  * Check if we should mask the specified lun of this fcp_tgt
15865  */
15866 static int
15867 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
15868 	struct fcp_black_list_entry *remote_port;
15869 
15870 	remote_port = fcp_lun_blacklist;
15871 	while (remote_port != NULL) {
15872 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
15873 			if (remote_port->lun == lun_id) {
15874 				remote_port->masked++;
15875 				if (remote_port->masked == 1) {
15876 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
15877 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
15878 					    "is masked due to black listing.\n",
15879 					    lun_id, wwn->raw_wwn[0],
15880 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
15881 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
15882 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
15883 					    wwn->raw_wwn[7]);
15884 				}
15885 				return (TRUE);
15886 			}
15887 		}
15888 		remote_port = remote_port->next;
15889 	}
15890 	return (FALSE);
15891 }
15892 
15893 /*
15894  * Release all allocated resources
15895  */
15896 static void
15897 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
15898 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15899 	struct fcp_black_list_entry	*current_entry	= NULL;
15900 
15901 	ASSERT(mutex_owned(&fcp_global_mutex));
15902 	/*
15903 	 * Traverse all luns
15904 	 */
15905 	while (tmp_entry) {
15906 		current_entry = tmp_entry;
15907 		tmp_entry = tmp_entry->next;
15908 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
15909 	}
15910 	*pplun_blacklist = NULL;
15911 }
15912