xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 89b2a9fbeabf42fa54594df0e5927bcc50a07cc9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
431 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
432     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
433 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
434     int cause);
435 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
436     uint32_t state);
437 static struct fcp_port *fcp_get_port(opaque_t port_handle);
438 static void fcp_unsol_callback(fc_packet_t *fpkt);
439 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
440     uchar_t r_ctl, uchar_t type);
441 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
442 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
443     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
444     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
445 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
446 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
447     int nodma, int flags);
448 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
449 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
450     uchar_t *wwn);
451 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
452     uint32_t d_id);
453 static void fcp_icmd_callback(fc_packet_t *fpkt);
454 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
455     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
456 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
457 static void fcp_scsi_callback(fc_packet_t *fpkt);
458 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
459 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
461 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
462     uint16_t lun_num);
463 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
464     int link_cnt, int tgt_cnt, int cause);
465 static void fcp_finish_init(struct fcp_port *pptr);
466 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
467     int tgt_cnt, int cause);
468 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
469     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
470 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
471     int link_cnt, int tgt_cnt, int nowait, int flags);
472 static void fcp_offline_target_now(struct fcp_port *pptr,
473     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
474 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
475     int tgt_cnt, int flags);
476 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
477     int nowait, int flags);
478 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
479     int tgt_cnt);
480 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
481     int tgt_cnt, int flags);
482 static void fcp_scan_offline_luns(struct fcp_port *pptr);
483 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
484 static void fcp_update_offline_flags(struct fcp_lun *plun);
485 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
486 static void fcp_abort_commands(struct fcp_pkt *head, struct
487     fcp_port *pptr);
488 static void fcp_cmd_callback(fc_packet_t *fpkt);
489 static void fcp_complete_pkt(fc_packet_t *fpkt);
490 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
491     struct fcp_port *pptr);
492 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
493     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
494 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
495 static void fcp_dealloc_lun(struct fcp_lun *plun);
496 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
497     fc_portmap_t *map_entry, int link_cnt);
498 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
499 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
500 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
501     int internal);
502 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
503 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
504     uint32_t s_id, int instance);
505 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
506     int instance);
507 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
508 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
509     int);
510 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
511 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
512 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
513     int flags);
514 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
515 static int fcp_reset_target(struct scsi_address *ap, int level);
516 static int fcp_commoncap(struct scsi_address *ap, char *cap,
517     int val, int tgtonly, int doset);
518 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
519 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
520 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
521     int sleep);
522 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
523     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
524 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
525 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
526 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
527     int lcount, int tcount);
528 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
529 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
530 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
531     int tgt_cnt);
532 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
533     dev_info_t *pdip, caddr_t name);
534 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
535     int lcount, int tcount, int flags, int *circ);
536 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
537     int lcount, int tcount, int flags, int *circ);
538 static void fcp_remove_child(struct fcp_lun *plun);
539 static void fcp_watch(void *arg);
540 static void fcp_check_reset_delay(struct fcp_port *pptr);
541 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
542     struct fcp_lun *rlun, int tgt_cnt);
543 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
544 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
545     uchar_t *wwn, uint16_t lun);
546 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
547     struct fcp_lun *plun);
548 static void fcp_post_callback(struct fcp_pkt *cmd);
549 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
550 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
551 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
552     child_info_t *cip);
553 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
554     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
555     int tgt_cnt, int flags);
556 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
557     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
558     int tgt_cnt, int flags, int wait);
559 static void fcp_retransport_cmd(struct fcp_port *pptr,
560     struct fcp_pkt *cmd);
561 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
562     uint_t statistics);
563 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
564 static void fcp_update_targets(struct fcp_port *pptr,
565     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
566 static int fcp_call_finish_init(struct fcp_port *pptr,
567     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
568 static int fcp_call_finish_init_held(struct fcp_port *pptr,
569     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
570 static void fcp_reconfigure_luns(void * tgt_handle);
571 static void fcp_free_targets(struct fcp_port *pptr);
572 static void fcp_free_target(struct fcp_tgt *ptgt);
573 static int fcp_is_retryable(struct fcp_ipkt *icmd);
574 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
575 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
576 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
577 static void fcp_print_error(fc_packet_t *fpkt);
578 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
579     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
580 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
581 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
582     uint32_t *dev_cnt);
583 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
584 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
585 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
586     struct fcp_ioctl *, struct fcp_port **);
587 static char *fcp_get_lun_path(struct fcp_lun *plun);
588 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
589     int *rval);
590 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594     int *rval);
595 static void fcp_reconfig_wait(struct fcp_port *pptr);
596 
597 /*
598  * New functions added for mpxio support
599  */
600 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
601     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
602 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
603     int tcount);
604 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
605     dev_info_t *pdip);
606 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
607 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
608 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
609 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
610 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
611     int what);
612 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
613     fc_packet_t *fpkt);
614 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
615 
616 /*
617  * New functions added for lun masking support
618  */
619 static void fcp_read_blacklist(dev_info_t *dip,
620     struct fcp_black_list_entry **pplun_blacklist);
621 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
622     struct fcp_black_list_entry **pplun_blacklist);
623 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
624     struct fcp_black_list_entry **pplun_blacklist);
625 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
626 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
627 
628 /*
629  * New functions to support software FCA (like fcoei)
630  */
631 static struct scsi_pkt *fcp_pseudo_init_pkt(
632 	struct scsi_address *ap, struct scsi_pkt *pkt,
633 	struct buf *bp, int cmdlen, int statuslen,
634 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
635 static void fcp_pseudo_destroy_pkt(
636 	struct scsi_address *ap, struct scsi_pkt *pkt);
637 static void fcp_pseudo_sync_pkt(
638 	struct scsi_address *ap, struct scsi_pkt *pkt);
639 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
640 static void fcp_pseudo_dmafree(
641 	struct scsi_address *ap, struct scsi_pkt *pkt);
642 
643 extern struct mod_ops	mod_driverops;
644 /*
645  * This variable is defined in modctl.c and set to '1' after the root driver
646  * and fs are loaded.  It serves as an indication that the root filesystem can
647  * be used.
648  */
649 extern int		modrootloaded;
650 /*
651  * This table contains strings associated with the SCSI sense key codes.  It
652  * is used by FCP to print a clear explanation of the code returned in the
653  * sense information by a device.
654  */
655 extern char		*sense_keys[];
656 /*
657  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
658  * under this device that the paths to a physical device are created when
659  * MPxIO is used.
660  */
661 extern dev_info_t	*scsi_vhci_dip;
662 
663 /*
664  * Report lun processing
665  */
666 #define	FCP_LUN_ADDRESSING		0x80
667 #define	FCP_PD_ADDRESSING		0x00
668 #define	FCP_VOLUME_ADDRESSING		0x40
669 
670 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
671 #define	MAX_INT_DMA			0x7fffffff
672 #define	FCP_MAX_SENSE_LEN		252
673 #define	FCP_MAX_RESPONSE_LEN		0xffffff
674 /*
675  * Property definitions
676  */
677 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
678 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
679 #define	TARGET_PROP	(char *)fcp_target_prop
680 #define	LUN_PROP	(char *)fcp_lun_prop
681 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
682 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
683 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
684 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
685 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
686 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
687 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
688 /*
689  * Short hand macros.
690  */
691 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
692 #define	LUN_TGT		(plun->lun_tgt)
693 
694 /*
695  * Driver private macros
696  */
697 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
698 			((x) >= 'a' && (x) <= 'f') ?			\
699 			((x) - 'a' + 10) : ((x) - 'A' + 10))
700 
701 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
702 
703 #define	FCP_N_NDI_EVENTS						\
704 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
705 
706 #define	FCP_LINK_STATE_CHANGED(p, c)			\
707 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
708 
709 #define	FCP_TGT_STATE_CHANGED(t, c)			\
710 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
711 
712 #define	FCP_STATE_CHANGED(p, t, c)		\
713 	(FCP_TGT_STATE_CHANGED(t, c))
714 
715 #define	FCP_MUST_RETRY(fpkt)				\
716 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
717 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
718 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
719 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
720 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
721 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
722 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
723 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
724 
725 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
726 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
727 	(es)->es_add_code == 0x3f &&		\
728 	(es)->es_qual_code == 0x0e)
729 
730 #define	FCP_SENSE_NO_LUN(es)			\
731 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
732 	(es)->es_add_code == 0x25 &&		\
733 	(es)->es_qual_code == 0x0)
734 
735 #define	FCP_VERSION		"20090729-1.190"
736 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
737 
738 #define	FCP_NUM_ELEMENTS(array)			\
739 	(sizeof (array) / sizeof ((array)[0]))
740 
741 /*
742  * Debugging, Error reporting, and tracing
743  */
744 #define	FCP_LOG_SIZE		1024 * 1024
745 
746 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
747 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
748 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
749 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
750 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
751 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
752 #define	FCP_LEVEL_7		0x00040
753 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
754 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
755 
756 
757 
758 /*
759  * Log contents to system messages file
760  */
761 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
762 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
763 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
764 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
765 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
766 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
767 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
768 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
769 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
770 
771 
772 /*
773  * Log contents to trace buffer
774  */
775 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
776 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
777 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
778 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
779 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
780 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
781 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
782 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
783 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
784 
785 
786 /*
787  * Log contents to both system messages file and trace buffer
788  */
789 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
792 				FC_TRACE_LOG_MSG)
793 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
794 				FC_TRACE_LOG_MSG)
795 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
796 				FC_TRACE_LOG_MSG)
797 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
798 				FC_TRACE_LOG_MSG)
799 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
800 				FC_TRACE_LOG_MSG)
801 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
802 				FC_TRACE_LOG_MSG)
803 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
804 				FC_TRACE_LOG_MSG)
805 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
806 				FC_TRACE_LOG_MSG)
807 #ifdef DEBUG
808 #define	FCP_DTRACE	fc_trace_debug
809 #else
810 #define	FCP_DTRACE
811 #endif
812 
813 #define	FCP_TRACE	fc_trace_debug
814 
815 static struct cb_ops fcp_cb_ops = {
816 	fcp_open,			/* open */
817 	fcp_close,			/* close */
818 	nodev,				/* strategy */
819 	nodev,				/* print */
820 	nodev,				/* dump */
821 	nodev,				/* read */
822 	nodev,				/* write */
823 	fcp_ioctl,			/* ioctl */
824 	nodev,				/* devmap */
825 	nodev,				/* mmap */
826 	nodev,				/* segmap */
827 	nochpoll,			/* chpoll */
828 	ddi_prop_op,			/* cb_prop_op */
829 	0,				/* streamtab */
830 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
831 	CB_REV,				/* rev */
832 	nodev,				/* aread */
833 	nodev				/* awrite */
834 };
835 
836 
837 static struct dev_ops fcp_ops = {
838 	DEVO_REV,
839 	0,
840 	ddi_getinfo_1to1,
841 	nulldev,		/* identify */
842 	nulldev,		/* probe */
843 	fcp_attach,		/* attach and detach are mandatory */
844 	fcp_detach,
845 	nodev,			/* reset */
846 	&fcp_cb_ops,		/* cb_ops */
847 	NULL,			/* bus_ops */
848 	NULL,			/* power */
849 };
850 
851 
852 char *fcp_version = FCP_NAME_VERSION;
853 
854 static struct modldrv modldrv = {
855 	&mod_driverops,
856 	FCP_NAME_VERSION,
857 	&fcp_ops
858 };
859 
860 
861 static struct modlinkage modlinkage = {
862 	MODREV_1,
863 	&modldrv,
864 	NULL
865 };
866 
867 
868 static fc_ulp_modinfo_t fcp_modinfo = {
869 	&fcp_modinfo,			/* ulp_handle */
870 	FCTL_ULP_MODREV_4,		/* ulp_rev */
871 	FC4_SCSI_FCP,			/* ulp_type */
872 	"fcp",				/* ulp_name */
873 	FCP_STATEC_MASK,		/* ulp_statec_mask */
874 	fcp_port_attach,		/* ulp_port_attach */
875 	fcp_port_detach,		/* ulp_port_detach */
876 	fcp_port_ioctl,			/* ulp_port_ioctl */
877 	fcp_els_callback,		/* ulp_els_callback */
878 	fcp_data_callback,		/* ulp_data_callback */
879 	fcp_statec_callback		/* ulp_statec_callback */
880 };
881 
882 #ifdef	DEBUG
883 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
884 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
885 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
886 				FCP_LEVEL_6 | FCP_LEVEL_7)
887 #else
888 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
889 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
890 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
891 				FCP_LEVEL_6 | FCP_LEVEL_7)
892 #endif
893 
894 /* FCP global variables */
895 int			fcp_bus_config_debug = 0;
896 static int		fcp_log_size = FCP_LOG_SIZE;
897 static int		fcp_trace = FCP_TRACE_DEFAULT;
898 static fc_trace_logq_t	*fcp_logq = NULL;
899 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
900 /*
901  * The auto-configuration is set by default.  The only way of disabling it is
902  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
903  */
904 static int		fcp_enable_auto_configuration = 1;
905 static int		fcp_max_bus_config_retries	= 4;
906 static int		fcp_lun_ready_retry = 300;
907 /*
908  * The value assigned to the following variable has changed several times due
909  * to a problem with the data underruns reporting of some firmware(s).	The
910  * current value of 50 gives a timeout value of 25 seconds for a max number
911  * of 256 LUNs.
912  */
913 static int		fcp_max_target_retries = 50;
914 /*
915  * Watchdog variables
916  * ------------------
917  *
918  * fcp_watchdog_init
919  *
920  *	Indicates if the watchdog timer is running or not.  This is actually
921  *	a counter of the number of Fibre Channel ports that attached.  When
922  *	the first port attaches the watchdog is started.  When the last port
923  *	detaches the watchdog timer is stopped.
924  *
925  * fcp_watchdog_time
926  *
927  *	This is the watchdog clock counter.  It is incremented by
928  *	fcp_watchdog_time each time the watchdog timer expires.
929  *
930  * fcp_watchdog_timeout
931  *
932  *	Increment value of the variable fcp_watchdog_time as well as the
933  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
934  *	is strange that this is not a #define	but a variable since the code
935  *	never changes this value.  The reason why it can be said that the
936  *	unit is 1 second is because the number of ticks for the watchdog
937  *	timer is determined like this:
938  *
939  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
940  *				  drv_usectohz(1000000);
941  *
942  *	The value 1000000 is hard coded in the code.
943  *
944  * fcp_watchdog_tick
945  *
946  *	Watchdog timer value in ticks.
947  */
948 static int		fcp_watchdog_init = 0;
949 static int		fcp_watchdog_time = 0;
950 static int		fcp_watchdog_timeout = 1;
951 static int		fcp_watchdog_tick;
952 
953 /*
954  * fcp_offline_delay is a global variable to enable customisation of
955  * the timeout on link offlines or RSCNs. The default value is set
956  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
957  * specified in FCP4 Chapter 11 (see www.t10.org).
958  *
959  * The variable fcp_offline_delay is specified in SECONDS.
960  *
961  * If we made this a static var then the user would not be able to
962  * change it. This variable is set in fcp_attach().
963  */
964 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
965 
966 static void		*fcp_softstate = NULL; /* for soft state */
967 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
968 static kmutex_t		fcp_global_mutex;
969 static kmutex_t		fcp_ioctl_mutex;
970 static dev_info_t	*fcp_global_dip = NULL;
971 static timeout_id_t	fcp_watchdog_id;
972 const char		*fcp_lun_prop = "lun";
973 const char		*fcp_sam_lun_prop = "sam-lun";
974 const char		*fcp_target_prop = "target";
975 /*
976  * NOTE: consumers of "node-wwn" property include stmsboot in ON
977  * consolidation.
978  */
979 const char		*fcp_node_wwn_prop = "node-wwn";
980 const char		*fcp_port_wwn_prop = "port-wwn";
981 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
982 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
983 const char		*fcp_manual_config_only = "manual_configuration_only";
984 const char		*fcp_init_port_prop = "initiator-port";
985 const char		*fcp_tgt_port_prop = "target-port";
986 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
987 
988 static struct fcp_port	*fcp_port_head = NULL;
989 static ddi_eventcookie_t	fcp_insert_eid;
990 static ddi_eventcookie_t	fcp_remove_eid;
991 
992 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
993 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
994 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
995 };
996 
997 /*
998  * List of valid commands for the scsi_ioctl call
999  */
1000 static uint8_t scsi_ioctl_list[] = {
1001 	SCMD_INQUIRY,
1002 	SCMD_REPORT_LUN,
1003 	SCMD_READ_CAPACITY
1004 };
1005 
1006 /*
1007  * this is used to dummy up a report lun response for cases
1008  * where the target doesn't support it
1009  */
1010 static uchar_t fcp_dummy_lun[] = {
1011 	0x00,		/* MSB length (length = no of luns * 8) */
1012 	0x00,
1013 	0x00,
1014 	0x08,		/* LSB length */
1015 	0x00,		/* MSB reserved */
1016 	0x00,
1017 	0x00,
1018 	0x00,		/* LSB reserved */
1019 	FCP_PD_ADDRESSING,
1020 	0x00,		/* LUN is ZERO at the first level */
1021 	0x00,
1022 	0x00,		/* second level is zero */
1023 	0x00,
1024 	0x00,		/* third level is zero */
1025 	0x00,
1026 	0x00		/* fourth level is zero */
1027 };
1028 
1029 static uchar_t fcp_alpa_to_switch[] = {
1030 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1031 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1032 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1033 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1034 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1035 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1036 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1037 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1038 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1039 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1040 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1041 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1042 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1043 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1044 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1046 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1047 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1048 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1049 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1050 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1051 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1052 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1053 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1054 };
1055 
1056 static caddr_t pid = "SESS01	      ";
1057 
1058 #if	!defined(lint)
1059 
1060 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1061     fcp_port::fcp_next fcp_watchdog_id))
1062 
1063 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1064 
1065 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1066     fcp_insert_eid
1067     fcp_remove_eid
1068     fcp_watchdog_time))
1069 
1070 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1071     fcp_cb_ops
1072     fcp_ops
1073     callb_cpr))
1074 
1075 #endif /* lint */
1076 
1077 /*
1078  * This table is used to determine whether or not it's safe to copy in
1079  * the target node name for a lun.  Since all luns behind the same target
1080  * have the same wwnn, only tagets that do not support multiple luns are
1081  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1082  */
1083 
1084 char *fcp_symmetric_disk_table[] = {
1085 	"SEAGATE ST",
1086 	"IBM	 DDYFT",
1087 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1088 	"SUN	 SENA",		/* SES device */
1089 	"SUN	 SESS01"	/* VICOM SVE box */
1090 };
1091 
1092 int fcp_symmetric_disk_table_size =
1093 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1094 
1095 /*
1096  * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1097  * will panic if you don't pass this in to the routine, this information.
1098  * Need to determine what the actual impact to the system is by providing
1099  * this information if any. Since dma allocation is done in pkt_init it may
1100  * not have any impact. These values are straight from the Writing Device
1101  * Driver manual.
1102  */
1103 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1104 	DMA_ATTR_V0,	/* ddi_dma_attr version */
1105 	0,		/* low address */
1106 	0xffffffff,	/* high address */
1107 	0x00ffffff,	/* counter upper bound */
1108 	1,		/* alignment requirements */
1109 	0x3f,		/* burst sizes */
1110 	1,		/* minimum DMA access */
1111 	0xffffffff,	/* maximum DMA access */
1112 	(1 << 24) - 1,	/* segment boundary restrictions */
1113 	1,		/* scater/gather list length */
1114 	512,		/* device granularity */
1115 	0		/* DMA flags */
1116 };
1117 
1118 /*
1119  * The _init(9e) return value should be that of mod_install(9f). Under
1120  * some circumstances, a failure may not be related mod_install(9f) and
1121  * one would then require a return value to indicate the failure. Looking
1122  * at mod_install(9f), it is expected to return 0 for success and non-zero
1123  * for failure. mod_install(9f) for device drivers, further goes down the
1124  * calling chain and ends up in ddi_installdrv(), whose return values are
1125  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1126  * calling chain of mod_install(9f) which return values like EINVAL and
1127  * in some even return -1.
1128  *
1129  * To work around the vagaries of the mod_install() calling chain, return
1130  * either 0 or ENODEV depending on the success or failure of mod_install()
1131  */
1132 int
1133 _init(void)
1134 {
1135 	int rval;
1136 
1137 	/*
1138 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1139 	 * before registering with the transport first.
1140 	 */
1141 	if (ddi_soft_state_init(&fcp_softstate,
1142 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1143 		return (EINVAL);
1144 	}
1145 
1146 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1147 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1148 
1149 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1150 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1151 		mutex_destroy(&fcp_global_mutex);
1152 		mutex_destroy(&fcp_ioctl_mutex);
1153 		ddi_soft_state_fini(&fcp_softstate);
1154 		return (ENODEV);
1155 	}
1156 
1157 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1158 
1159 	if ((rval = mod_install(&modlinkage)) != 0) {
1160 		fc_trace_free_logq(fcp_logq);
1161 		(void) fc_ulp_remove(&fcp_modinfo);
1162 		mutex_destroy(&fcp_global_mutex);
1163 		mutex_destroy(&fcp_ioctl_mutex);
1164 		ddi_soft_state_fini(&fcp_softstate);
1165 		rval = ENODEV;
1166 	}
1167 
1168 	return (rval);
1169 }
1170 
1171 
1172 /*
1173  * the system is done with us as a driver, so clean up
1174  */
1175 int
1176 _fini(void)
1177 {
1178 	int rval;
1179 
1180 	/*
1181 	 * don't start cleaning up until we know that the module remove
1182 	 * has worked  -- if this works, then we know that each instance
1183 	 * has successfully been DDI_DETACHed
1184 	 */
1185 	if ((rval = mod_remove(&modlinkage)) != 0) {
1186 		return (rval);
1187 	}
1188 
1189 	(void) fc_ulp_remove(&fcp_modinfo);
1190 
1191 	ddi_soft_state_fini(&fcp_softstate);
1192 	mutex_destroy(&fcp_global_mutex);
1193 	mutex_destroy(&fcp_ioctl_mutex);
1194 	fc_trace_free_logq(fcp_logq);
1195 
1196 	return (rval);
1197 }
1198 
1199 
1200 int
1201 _info(struct modinfo *modinfop)
1202 {
1203 	return (mod_info(&modlinkage, modinfop));
1204 }
1205 
1206 
1207 /*
1208  * attach the module
1209  */
1210 static int
1211 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1212 {
1213 	int rval = DDI_SUCCESS;
1214 
1215 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1216 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1217 
1218 	if (cmd == DDI_ATTACH) {
1219 		/* The FCP pseudo device is created here. */
1220 		mutex_enter(&fcp_global_mutex);
1221 		fcp_global_dip = devi;
1222 		mutex_exit(&fcp_global_mutex);
1223 
1224 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1225 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1226 			ddi_report_dev(fcp_global_dip);
1227 		} else {
1228 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1229 			mutex_enter(&fcp_global_mutex);
1230 			fcp_global_dip = NULL;
1231 			mutex_exit(&fcp_global_mutex);
1232 
1233 			rval = DDI_FAILURE;
1234 		}
1235 		/*
1236 		 * We check the fcp_offline_delay property at this
1237 		 * point. This variable is global for the driver,
1238 		 * not specific to an instance.
1239 		 *
1240 		 * We do not recommend setting the value to less
1241 		 * than 10 seconds (RA_TOV_els), or greater than
1242 		 * 60 seconds.
1243 		 */
1244 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1245 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1246 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1247 		if ((fcp_offline_delay < 10) ||
1248 		    (fcp_offline_delay > 60)) {
1249 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1250 			    "to %d second(s). This is outside the "
1251 			    "recommended range of 10..60 seconds.",
1252 			    fcp_offline_delay);
1253 		}
1254 	}
1255 
1256 	return (rval);
1257 }
1258 
1259 
1260 /*ARGSUSED*/
1261 static int
1262 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1263 {
1264 	int	res = DDI_SUCCESS;
1265 
1266 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1267 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1268 
1269 	if (cmd == DDI_DETACH) {
1270 		/*
1271 		 * Check if there are active ports/threads. If there
1272 		 * are any, we will fail, else we will succeed (there
1273 		 * should not be much to clean up)
1274 		 */
1275 		mutex_enter(&fcp_global_mutex);
1276 		FCP_DTRACE(fcp_logq, "fcp",
1277 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1278 		    (void *) fcp_port_head);
1279 
1280 		if (fcp_port_head == NULL) {
1281 			ddi_remove_minor_node(fcp_global_dip, NULL);
1282 			fcp_global_dip = NULL;
1283 			mutex_exit(&fcp_global_mutex);
1284 		} else {
1285 			mutex_exit(&fcp_global_mutex);
1286 			res = DDI_FAILURE;
1287 		}
1288 	}
1289 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1290 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1291 
1292 	return (res);
1293 }
1294 
1295 
1296 /* ARGSUSED */
1297 static int
1298 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1299 {
1300 	if (otype != OTYP_CHR) {
1301 		return (EINVAL);
1302 	}
1303 
1304 	/*
1305 	 * Allow only root to talk;
1306 	 */
1307 	if (drv_priv(credp)) {
1308 		return (EPERM);
1309 	}
1310 
1311 	mutex_enter(&fcp_global_mutex);
1312 	if (fcp_oflag & FCP_EXCL) {
1313 		mutex_exit(&fcp_global_mutex);
1314 		return (EBUSY);
1315 	}
1316 
1317 	if (flag & FEXCL) {
1318 		if (fcp_oflag & FCP_OPEN) {
1319 			mutex_exit(&fcp_global_mutex);
1320 			return (EBUSY);
1321 		}
1322 		fcp_oflag |= FCP_EXCL;
1323 	}
1324 	fcp_oflag |= FCP_OPEN;
1325 	mutex_exit(&fcp_global_mutex);
1326 
1327 	return (0);
1328 }
1329 
1330 
1331 /* ARGSUSED */
1332 static int
1333 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1334 {
1335 	if (otype != OTYP_CHR) {
1336 		return (EINVAL);
1337 	}
1338 
1339 	mutex_enter(&fcp_global_mutex);
1340 	if (!(fcp_oflag & FCP_OPEN)) {
1341 		mutex_exit(&fcp_global_mutex);
1342 		return (ENODEV);
1343 	}
1344 	fcp_oflag = FCP_IDLE;
1345 	mutex_exit(&fcp_global_mutex);
1346 
1347 	return (0);
1348 }
1349 
1350 
1351 /*
1352  * fcp_ioctl
1353  *	Entry point for the FCP ioctls
1354  *
1355  * Input:
1356  *	See ioctl(9E)
1357  *
1358  * Output:
1359  *	See ioctl(9E)
1360  *
1361  * Returns:
1362  *	See ioctl(9E)
1363  *
1364  * Context:
1365  *	Kernel context.
1366  */
1367 /* ARGSUSED */
1368 static int
1369 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1370     int *rval)
1371 {
1372 	int			ret = 0;
1373 
1374 	mutex_enter(&fcp_global_mutex);
1375 	if (!(fcp_oflag & FCP_OPEN)) {
1376 		mutex_exit(&fcp_global_mutex);
1377 		return (ENXIO);
1378 	}
1379 	mutex_exit(&fcp_global_mutex);
1380 
1381 	switch (cmd) {
1382 	case FCP_TGT_INQUIRY:
1383 	case FCP_TGT_CREATE:
1384 	case FCP_TGT_DELETE:
1385 		ret = fcp_setup_device_data_ioctl(cmd,
1386 		    (struct fcp_ioctl *)data, mode, rval);
1387 		break;
1388 
1389 	case FCP_TGT_SEND_SCSI:
1390 		mutex_enter(&fcp_ioctl_mutex);
1391 		ret = fcp_setup_scsi_ioctl(
1392 		    (struct fcp_scsi_cmd *)data, mode, rval);
1393 		mutex_exit(&fcp_ioctl_mutex);
1394 		break;
1395 
1396 	case FCP_STATE_COUNT:
1397 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1398 		    mode, rval);
1399 		break;
1400 	case FCP_GET_TARGET_MAPPINGS:
1401 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1402 		    mode, rval);
1403 		break;
1404 	default:
1405 		fcp_log(CE_WARN, NULL,
1406 		    "!Invalid ioctl opcode = 0x%x", cmd);
1407 		ret	= EINVAL;
1408 	}
1409 
1410 	return (ret);
1411 }
1412 
1413 
1414 /*
1415  * fcp_setup_device_data_ioctl
1416  *	Setup handler for the "device data" style of
1417  *	ioctl for FCP.	See "fcp_util.h" for data structure
1418  *	definition.
1419  *
1420  * Input:
1421  *	cmd	= FCP ioctl command
1422  *	data	= ioctl data
1423  *	mode	= See ioctl(9E)
1424  *
1425  * Output:
1426  *	data	= ioctl data
1427  *	rval	= return value - see ioctl(9E)
1428  *
1429  * Returns:
1430  *	See ioctl(9E)
1431  *
1432  * Context:
1433  *	Kernel context.
1434  */
1435 /* ARGSUSED */
1436 static int
1437 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1438     int *rval)
1439 {
1440 	struct fcp_port	*pptr;
1441 	struct	device_data	*dev_data;
1442 	uint32_t		link_cnt;
1443 	la_wwn_t		*wwn_ptr = NULL;
1444 	struct fcp_tgt		*ptgt = NULL;
1445 	struct fcp_lun		*plun = NULL;
1446 	int			i, error;
1447 	struct fcp_ioctl	fioctl;
1448 
1449 #ifdef	_MULTI_DATAMODEL
1450 	switch (ddi_model_convert_from(mode & FMODELS)) {
1451 	case DDI_MODEL_ILP32: {
1452 		struct fcp32_ioctl f32_ioctl;
1453 
1454 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1455 		    sizeof (struct fcp32_ioctl), mode)) {
1456 			return (EFAULT);
1457 		}
1458 		fioctl.fp_minor = f32_ioctl.fp_minor;
1459 		fioctl.listlen = f32_ioctl.listlen;
1460 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1461 		break;
1462 	}
1463 	case DDI_MODEL_NONE:
1464 		if (ddi_copyin((void *)data, (void *)&fioctl,
1465 		    sizeof (struct fcp_ioctl), mode)) {
1466 			return (EFAULT);
1467 		}
1468 		break;
1469 	}
1470 
1471 #else	/* _MULTI_DATAMODEL */
1472 	if (ddi_copyin((void *)data, (void *)&fioctl,
1473 	    sizeof (struct fcp_ioctl), mode)) {
1474 		return (EFAULT);
1475 	}
1476 #endif	/* _MULTI_DATAMODEL */
1477 
1478 	/*
1479 	 * Right now we can assume that the minor number matches with
1480 	 * this instance of fp. If this changes we will need to
1481 	 * revisit this logic.
1482 	 */
1483 	mutex_enter(&fcp_global_mutex);
1484 	pptr = fcp_port_head;
1485 	while (pptr) {
1486 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1487 			break;
1488 		} else {
1489 			pptr = pptr->port_next;
1490 		}
1491 	}
1492 	mutex_exit(&fcp_global_mutex);
1493 	if (pptr == NULL) {
1494 		return (ENXIO);
1495 	}
1496 	mutex_enter(&pptr->port_mutex);
1497 
1498 
1499 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1500 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1501 		mutex_exit(&pptr->port_mutex);
1502 		return (ENOMEM);
1503 	}
1504 
1505 	if (ddi_copyin(fioctl.list, dev_data,
1506 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1507 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1508 		mutex_exit(&pptr->port_mutex);
1509 		return (EFAULT);
1510 	}
1511 	link_cnt = pptr->port_link_cnt;
1512 
1513 	if (cmd == FCP_TGT_INQUIRY) {
1514 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1515 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1516 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1517 			/* This ioctl is requesting INQ info of local HBA */
1518 			mutex_exit(&pptr->port_mutex);
1519 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1520 			dev_data[0].dev_status = 0;
1521 			if (ddi_copyout(dev_data, fioctl.list,
1522 			    (sizeof (struct device_data)) * fioctl.listlen,
1523 			    mode)) {
1524 				kmem_free(dev_data,
1525 				    sizeof (*dev_data) * fioctl.listlen);
1526 				return (EFAULT);
1527 			}
1528 			kmem_free(dev_data,
1529 			    sizeof (*dev_data) * fioctl.listlen);
1530 #ifdef	_MULTI_DATAMODEL
1531 			switch (ddi_model_convert_from(mode & FMODELS)) {
1532 			case DDI_MODEL_ILP32: {
1533 				struct fcp32_ioctl f32_ioctl;
1534 				f32_ioctl.fp_minor = fioctl.fp_minor;
1535 				f32_ioctl.listlen = fioctl.listlen;
1536 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1537 				if (ddi_copyout((void *)&f32_ioctl,
1538 				    (void *)data,
1539 				    sizeof (struct fcp32_ioctl), mode)) {
1540 					return (EFAULT);
1541 				}
1542 				break;
1543 			}
1544 			case DDI_MODEL_NONE:
1545 				if (ddi_copyout((void *)&fioctl, (void *)data,
1546 				    sizeof (struct fcp_ioctl), mode)) {
1547 					return (EFAULT);
1548 				}
1549 				break;
1550 			}
1551 #else	/* _MULTI_DATAMODEL */
1552 			if (ddi_copyout((void *)&fioctl, (void *)data,
1553 			    sizeof (struct fcp_ioctl), mode)) {
1554 				return (EFAULT);
1555 			}
1556 #endif	/* _MULTI_DATAMODEL */
1557 			return (0);
1558 		}
1559 	}
1560 
1561 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1562 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1563 		mutex_exit(&pptr->port_mutex);
1564 		return (ENXIO);
1565 	}
1566 
1567 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1568 	    i++) {
1569 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1570 
1571 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1572 
1573 
1574 		dev_data[i].dev_status = ENXIO;
1575 
1576 		if ((ptgt = fcp_lookup_target(pptr,
1577 		    (uchar_t *)wwn_ptr)) == NULL) {
1578 			mutex_exit(&pptr->port_mutex);
1579 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1580 			    wwn_ptr, &error, 0) == NULL) {
1581 				dev_data[i].dev_status = ENODEV;
1582 				mutex_enter(&pptr->port_mutex);
1583 				continue;
1584 			} else {
1585 
1586 				dev_data[i].dev_status = EAGAIN;
1587 
1588 				mutex_enter(&pptr->port_mutex);
1589 				continue;
1590 			}
1591 		} else {
1592 			mutex_enter(&ptgt->tgt_mutex);
1593 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1594 			    FCP_TGT_BUSY)) {
1595 				dev_data[i].dev_status = EAGAIN;
1596 				mutex_exit(&ptgt->tgt_mutex);
1597 				continue;
1598 			}
1599 
1600 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1601 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1602 					dev_data[i].dev_status = ENOTSUP;
1603 				} else {
1604 					dev_data[i].dev_status = ENXIO;
1605 				}
1606 				mutex_exit(&ptgt->tgt_mutex);
1607 				continue;
1608 			}
1609 
1610 			switch (cmd) {
1611 			case FCP_TGT_INQUIRY:
1612 				/*
1613 				 * The reason we give device type of
1614 				 * lun 0 only even though in some
1615 				 * cases(like maxstrat) lun 0 device
1616 				 * type may be 0x3f(invalid) is that
1617 				 * for bridge boxes target will appear
1618 				 * as luns and the first lun could be
1619 				 * a device that utility may not care
1620 				 * about (like a tape device).
1621 				 */
1622 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1623 				dev_data[i].dev_status = 0;
1624 				mutex_exit(&ptgt->tgt_mutex);
1625 
1626 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1627 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1628 				} else {
1629 					dev_data[i].dev0_type = plun->lun_type;
1630 				}
1631 				mutex_enter(&ptgt->tgt_mutex);
1632 				break;
1633 
1634 			case FCP_TGT_CREATE:
1635 				mutex_exit(&ptgt->tgt_mutex);
1636 				mutex_exit(&pptr->port_mutex);
1637 
1638 				/*
1639 				 * serialize state change call backs.
1640 				 * only one call back will be handled
1641 				 * at a time.
1642 				 */
1643 				mutex_enter(&fcp_global_mutex);
1644 				if (fcp_oflag & FCP_BUSY) {
1645 					mutex_exit(&fcp_global_mutex);
1646 					if (dev_data) {
1647 						kmem_free(dev_data,
1648 						    sizeof (*dev_data) *
1649 						    fioctl.listlen);
1650 					}
1651 					return (EBUSY);
1652 				}
1653 				fcp_oflag |= FCP_BUSY;
1654 				mutex_exit(&fcp_global_mutex);
1655 
1656 				dev_data[i].dev_status =
1657 				    fcp_create_on_demand(pptr,
1658 				    wwn_ptr->raw_wwn);
1659 
1660 				if (dev_data[i].dev_status != 0) {
1661 					char	buf[25];
1662 
1663 					for (i = 0; i < FC_WWN_SIZE; i++) {
1664 						(void) sprintf(&buf[i << 1],
1665 						    "%02x",
1666 						    wwn_ptr->raw_wwn[i]);
1667 					}
1668 
1669 					fcp_log(CE_WARN, pptr->port_dip,
1670 					    "!Failed to create nodes for"
1671 					    " pwwn=%s; error=%x", buf,
1672 					    dev_data[i].dev_status);
1673 				}
1674 
1675 				/* allow state change call backs again */
1676 				mutex_enter(&fcp_global_mutex);
1677 				fcp_oflag &= ~FCP_BUSY;
1678 				mutex_exit(&fcp_global_mutex);
1679 
1680 				mutex_enter(&pptr->port_mutex);
1681 				mutex_enter(&ptgt->tgt_mutex);
1682 
1683 				break;
1684 
1685 			case FCP_TGT_DELETE:
1686 				break;
1687 
1688 			default:
1689 				fcp_log(CE_WARN, pptr->port_dip,
1690 				    "!Invalid device data ioctl "
1691 				    "opcode = 0x%x", cmd);
1692 			}
1693 			mutex_exit(&ptgt->tgt_mutex);
1694 		}
1695 	}
1696 	mutex_exit(&pptr->port_mutex);
1697 
1698 	if (ddi_copyout(dev_data, fioctl.list,
1699 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1700 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 		return (EFAULT);
1702 	}
1703 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1704 
1705 #ifdef	_MULTI_DATAMODEL
1706 	switch (ddi_model_convert_from(mode & FMODELS)) {
1707 	case DDI_MODEL_ILP32: {
1708 		struct fcp32_ioctl f32_ioctl;
1709 
1710 		f32_ioctl.fp_minor = fioctl.fp_minor;
1711 		f32_ioctl.listlen = fioctl.listlen;
1712 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1713 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1714 		    sizeof (struct fcp32_ioctl), mode)) {
1715 			return (EFAULT);
1716 		}
1717 		break;
1718 	}
1719 	case DDI_MODEL_NONE:
1720 		if (ddi_copyout((void *)&fioctl, (void *)data,
1721 		    sizeof (struct fcp_ioctl), mode)) {
1722 			return (EFAULT);
1723 		}
1724 		break;
1725 	}
1726 #else	/* _MULTI_DATAMODEL */
1727 
1728 	if (ddi_copyout((void *)&fioctl, (void *)data,
1729 	    sizeof (struct fcp_ioctl), mode)) {
1730 		return (EFAULT);
1731 	}
1732 #endif	/* _MULTI_DATAMODEL */
1733 
1734 	return (0);
1735 }
1736 
1737 /*
1738  * Fetch the target mappings (path, etc.) for all LUNs
1739  * on this port.
1740  */
1741 /* ARGSUSED */
1742 static int
1743 fcp_get_target_mappings(struct fcp_ioctl *data,
1744     int mode, int *rval)
1745 {
1746 	struct fcp_port	    *pptr;
1747 	fc_hba_target_mappings_t    *mappings;
1748 	fc_hba_mapping_entry_t	    *map;
1749 	struct fcp_tgt	    *ptgt = NULL;
1750 	struct fcp_lun	    *plun = NULL;
1751 	int			    i, mapIndex, mappingSize;
1752 	int			    listlen;
1753 	struct fcp_ioctl	    fioctl;
1754 	char			    *path;
1755 	fcp_ent_addr_t		    sam_lun_addr;
1756 
1757 #ifdef	_MULTI_DATAMODEL
1758 	switch (ddi_model_convert_from(mode & FMODELS)) {
1759 	case DDI_MODEL_ILP32: {
1760 		struct fcp32_ioctl f32_ioctl;
1761 
1762 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1763 		    sizeof (struct fcp32_ioctl), mode)) {
1764 			return (EFAULT);
1765 		}
1766 		fioctl.fp_minor = f32_ioctl.fp_minor;
1767 		fioctl.listlen = f32_ioctl.listlen;
1768 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1769 		break;
1770 	}
1771 	case DDI_MODEL_NONE:
1772 		if (ddi_copyin((void *)data, (void *)&fioctl,
1773 		    sizeof (struct fcp_ioctl), mode)) {
1774 			return (EFAULT);
1775 		}
1776 		break;
1777 	}
1778 
1779 #else	/* _MULTI_DATAMODEL */
1780 	if (ddi_copyin((void *)data, (void *)&fioctl,
1781 	    sizeof (struct fcp_ioctl), mode)) {
1782 		return (EFAULT);
1783 	}
1784 #endif	/* _MULTI_DATAMODEL */
1785 
1786 	/*
1787 	 * Right now we can assume that the minor number matches with
1788 	 * this instance of fp. If this changes we will need to
1789 	 * revisit this logic.
1790 	 */
1791 	mutex_enter(&fcp_global_mutex);
1792 	pptr = fcp_port_head;
1793 	while (pptr) {
1794 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1795 			break;
1796 		} else {
1797 			pptr = pptr->port_next;
1798 		}
1799 	}
1800 	mutex_exit(&fcp_global_mutex);
1801 	if (pptr == NULL) {
1802 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1803 		    fioctl.fp_minor);
1804 		return (ENXIO);
1805 	}
1806 
1807 
1808 	/* We use listlen to show the total buffer size */
1809 	mappingSize = fioctl.listlen;
1810 
1811 	/* Now calculate how many mapping entries will fit */
1812 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1813 	    - sizeof (fc_hba_target_mappings_t);
1814 	if (listlen <= 0) {
1815 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1816 		return (ENXIO);
1817 	}
1818 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1819 
1820 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1821 		return (ENOMEM);
1822 	}
1823 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1824 
1825 	/* Now get to work */
1826 	mapIndex = 0;
1827 
1828 	mutex_enter(&pptr->port_mutex);
1829 	/* Loop through all targets on this port */
1830 	for (i = 0; i < FCP_NUM_HASH; i++) {
1831 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1832 		    ptgt = ptgt->tgt_next) {
1833 
1834 
1835 			/* Loop through all LUNs on this target */
1836 			for (plun = ptgt->tgt_lun; plun != NULL;
1837 			    plun = plun->lun_next) {
1838 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1839 					continue;
1840 				}
1841 
1842 				path = fcp_get_lun_path(plun);
1843 				if (path == NULL) {
1844 					continue;
1845 				}
1846 
1847 				if (mapIndex >= listlen) {
1848 					mapIndex ++;
1849 					kmem_free(path, MAXPATHLEN);
1850 					continue;
1851 				}
1852 				map = &mappings->entries[mapIndex++];
1853 				bcopy(path, map->targetDriver,
1854 				    sizeof (map->targetDriver));
1855 				map->d_id = ptgt->tgt_d_id;
1856 				map->busNumber = 0;
1857 				map->targetNumber = ptgt->tgt_d_id;
1858 				map->osLUN = plun->lun_num;
1859 
1860 				/*
1861 				 * We had swapped lun when we stored it in
1862 				 * lun_addr. We need to swap it back before
1863 				 * returning it to user land
1864 				 */
1865 
1866 				sam_lun_addr.ent_addr_0 =
1867 				    BE_16(plun->lun_addr.ent_addr_0);
1868 				sam_lun_addr.ent_addr_1 =
1869 				    BE_16(plun->lun_addr.ent_addr_1);
1870 				sam_lun_addr.ent_addr_2 =
1871 				    BE_16(plun->lun_addr.ent_addr_2);
1872 				sam_lun_addr.ent_addr_3 =
1873 				    BE_16(plun->lun_addr.ent_addr_3);
1874 
1875 				bcopy(&sam_lun_addr, &map->samLUN,
1876 				    FCP_LUN_SIZE);
1877 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1878 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1879 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1880 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1881 
1882 				if (plun->lun_guid) {
1883 
1884 					/* convert ascii wwn to bytes */
1885 					fcp_ascii_to_wwn(plun->lun_guid,
1886 					    map->guid, sizeof (map->guid));
1887 
1888 					if ((sizeof (map->guid)) <
1889 					    plun->lun_guid_size / 2) {
1890 						cmn_err(CE_WARN,
1891 						    "fcp_get_target_mappings:"
1892 						    "guid copy space "
1893 						    "insufficient."
1894 						    "Copy Truncation - "
1895 						    "available %d; need %d",
1896 						    (int)sizeof (map->guid),
1897 						    (int)
1898 						    plun->lun_guid_size / 2);
1899 					}
1900 				}
1901 				kmem_free(path, MAXPATHLEN);
1902 			}
1903 		}
1904 	}
1905 	mutex_exit(&pptr->port_mutex);
1906 	mappings->numLuns = mapIndex;
1907 
1908 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1909 		kmem_free(mappings, mappingSize);
1910 		return (EFAULT);
1911 	}
1912 	kmem_free(mappings, mappingSize);
1913 
1914 #ifdef	_MULTI_DATAMODEL
1915 	switch (ddi_model_convert_from(mode & FMODELS)) {
1916 	case DDI_MODEL_ILP32: {
1917 		struct fcp32_ioctl f32_ioctl;
1918 
1919 		f32_ioctl.fp_minor = fioctl.fp_minor;
1920 		f32_ioctl.listlen = fioctl.listlen;
1921 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1922 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1923 		    sizeof (struct fcp32_ioctl), mode)) {
1924 			return (EFAULT);
1925 		}
1926 		break;
1927 	}
1928 	case DDI_MODEL_NONE:
1929 		if (ddi_copyout((void *)&fioctl, (void *)data,
1930 		    sizeof (struct fcp_ioctl), mode)) {
1931 			return (EFAULT);
1932 		}
1933 		break;
1934 	}
1935 #else	/* _MULTI_DATAMODEL */
1936 
1937 	if (ddi_copyout((void *)&fioctl, (void *)data,
1938 	    sizeof (struct fcp_ioctl), mode)) {
1939 		return (EFAULT);
1940 	}
1941 #endif	/* _MULTI_DATAMODEL */
1942 
1943 	return (0);
1944 }
1945 
1946 /*
1947  * fcp_setup_scsi_ioctl
1948  *	Setup handler for the "scsi passthru" style of
1949  *	ioctl for FCP.	See "fcp_util.h" for data structure
1950  *	definition.
1951  *
1952  * Input:
1953  *	u_fscsi	= ioctl data (user address space)
1954  *	mode	= See ioctl(9E)
1955  *
1956  * Output:
1957  *	u_fscsi	= ioctl data (user address space)
1958  *	rval	= return value - see ioctl(9E)
1959  *
1960  * Returns:
1961  *	0	= OK
1962  *	EAGAIN	= See errno.h
1963  *	EBUSY	= See errno.h
1964  *	EFAULT	= See errno.h
1965  *	EINTR	= See errno.h
1966  *	EINVAL	= See errno.h
1967  *	EIO	= See errno.h
1968  *	ENOMEM	= See errno.h
1969  *	ENXIO	= See errno.h
1970  *
1971  * Context:
1972  *	Kernel context.
1973  */
1974 /* ARGSUSED */
1975 static int
1976 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1977     int mode, int *rval)
1978 {
1979 	int			ret		= 0;
1980 	int			temp_ret;
1981 	caddr_t			k_cdbbufaddr	= NULL;
1982 	caddr_t			k_bufaddr	= NULL;
1983 	caddr_t			k_rqbufaddr	= NULL;
1984 	caddr_t			u_cdbbufaddr;
1985 	caddr_t			u_bufaddr;
1986 	caddr_t			u_rqbufaddr;
1987 	struct fcp_scsi_cmd	k_fscsi;
1988 
1989 	/*
1990 	 * Get fcp_scsi_cmd array element from user address space
1991 	 */
1992 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1993 	    != 0) {
1994 		return (ret);
1995 	}
1996 
1997 
1998 	/*
1999 	 * Even though kmem_alloc() checks the validity of the
2000 	 * buffer length, this check is needed when the
2001 	 * kmem_flags set and the zero buffer length is passed.
2002 	 */
2003 	if ((k_fscsi.scsi_cdblen <= 0) ||
2004 	    (k_fscsi.scsi_buflen <= 0) ||
2005 	    (k_fscsi.scsi_buflen > FCP_MAX_RESPONSE_LEN) ||
2006 	    (k_fscsi.scsi_rqlen <= 0) ||
2007 	    (k_fscsi.scsi_rqlen > FCP_MAX_SENSE_LEN)) {
2008 		return (EINVAL);
2009 	}
2010 
2011 	/*
2012 	 * Allocate data for fcp_scsi_cmd pointer fields
2013 	 */
2014 	if (ret == 0) {
2015 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2016 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2017 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
2018 
2019 		if (k_cdbbufaddr == NULL ||
2020 		    k_bufaddr	 == NULL ||
2021 		    k_rqbufaddr	 == NULL) {
2022 			ret = ENOMEM;
2023 		}
2024 	}
2025 
2026 	/*
2027 	 * Get fcp_scsi_cmd pointer fields from user
2028 	 * address space
2029 	 */
2030 	if (ret == 0) {
2031 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2032 		u_bufaddr    = k_fscsi.scsi_bufaddr;
2033 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
2034 
2035 		if (ddi_copyin(u_cdbbufaddr,
2036 		    k_cdbbufaddr,
2037 		    k_fscsi.scsi_cdblen,
2038 		    mode)) {
2039 			ret = EFAULT;
2040 		} else if (ddi_copyin(u_bufaddr,
2041 		    k_bufaddr,
2042 		    k_fscsi.scsi_buflen,
2043 		    mode)) {
2044 			ret = EFAULT;
2045 		} else if (ddi_copyin(u_rqbufaddr,
2046 		    k_rqbufaddr,
2047 		    k_fscsi.scsi_rqlen,
2048 		    mode)) {
2049 			ret = EFAULT;
2050 		}
2051 	}
2052 
2053 	/*
2054 	 * Send scsi command (blocking)
2055 	 */
2056 	if (ret == 0) {
2057 		/*
2058 		 * Prior to sending the scsi command, the
2059 		 * fcp_scsi_cmd data structure must contain kernel,
2060 		 * not user, addresses.
2061 		 */
2062 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2063 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2064 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2065 
2066 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2067 
2068 		/*
2069 		 * After sending the scsi command, the
2070 		 * fcp_scsi_cmd data structure must contain user,
2071 		 * not kernel, addresses.
2072 		 */
2073 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2074 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2075 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2076 	}
2077 
2078 	/*
2079 	 * Put fcp_scsi_cmd pointer fields to user address space
2080 	 */
2081 	if (ret == 0) {
2082 		if (ddi_copyout(k_cdbbufaddr,
2083 		    u_cdbbufaddr,
2084 		    k_fscsi.scsi_cdblen,
2085 		    mode)) {
2086 			ret = EFAULT;
2087 		} else if (ddi_copyout(k_bufaddr,
2088 		    u_bufaddr,
2089 		    k_fscsi.scsi_buflen,
2090 		    mode)) {
2091 			ret = EFAULT;
2092 		} else if (ddi_copyout(k_rqbufaddr,
2093 		    u_rqbufaddr,
2094 		    k_fscsi.scsi_rqlen,
2095 		    mode)) {
2096 			ret = EFAULT;
2097 		}
2098 	}
2099 
2100 	/*
2101 	 * Free data for fcp_scsi_cmd pointer fields
2102 	 */
2103 	if (k_cdbbufaddr != NULL) {
2104 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2105 	}
2106 	if (k_bufaddr != NULL) {
2107 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2108 	}
2109 	if (k_rqbufaddr != NULL) {
2110 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2111 	}
2112 
2113 	/*
2114 	 * Put fcp_scsi_cmd array element to user address space
2115 	 */
2116 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2117 	if (temp_ret != 0) {
2118 		ret = temp_ret;
2119 	}
2120 
2121 	/*
2122 	 * Return status
2123 	 */
2124 	return (ret);
2125 }
2126 
2127 
2128 /*
2129  * fcp_copyin_scsi_cmd
2130  *	Copy in fcp_scsi_cmd data structure from user address space.
2131  *	The data may be in 32 bit or 64 bit modes.
2132  *
2133  * Input:
2134  *	base_addr	= from address (user address space)
2135  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2136  *
2137  * Output:
2138  *	fscsi		= to address (kernel address space)
2139  *
2140  * Returns:
2141  *	0	= OK
2142  *	EFAULT	= Error
2143  *
2144  * Context:
2145  *	Kernel context.
2146  */
2147 static int
2148 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2149 {
2150 #ifdef	_MULTI_DATAMODEL
2151 	struct fcp32_scsi_cmd	f32scsi;
2152 
2153 	switch (ddi_model_convert_from(mode & FMODELS)) {
2154 	case DDI_MODEL_ILP32:
2155 		/*
2156 		 * Copy data from user address space
2157 		 */
2158 		if (ddi_copyin((void *)base_addr,
2159 		    &f32scsi,
2160 		    sizeof (struct fcp32_scsi_cmd),
2161 		    mode)) {
2162 			return (EFAULT);
2163 		}
2164 		/*
2165 		 * Convert from 32 bit to 64 bit
2166 		 */
2167 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2168 		break;
2169 	case DDI_MODEL_NONE:
2170 		/*
2171 		 * Copy data from user address space
2172 		 */
2173 		if (ddi_copyin((void *)base_addr,
2174 		    fscsi,
2175 		    sizeof (struct fcp_scsi_cmd),
2176 		    mode)) {
2177 			return (EFAULT);
2178 		}
2179 		break;
2180 	}
2181 #else	/* _MULTI_DATAMODEL */
2182 	/*
2183 	 * Copy data from user address space
2184 	 */
2185 	if (ddi_copyin((void *)base_addr,
2186 	    fscsi,
2187 	    sizeof (struct fcp_scsi_cmd),
2188 	    mode)) {
2189 		return (EFAULT);
2190 	}
2191 #endif	/* _MULTI_DATAMODEL */
2192 
2193 	return (0);
2194 }
2195 
2196 
2197 /*
2198  * fcp_copyout_scsi_cmd
2199  *	Copy out fcp_scsi_cmd data structure to user address space.
2200  *	The data may be in 32 bit or 64 bit modes.
2201  *
2202  * Input:
2203  *	fscsi		= to address (kernel address space)
2204  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2205  *
2206  * Output:
2207  *	base_addr	= from address (user address space)
2208  *
2209  * Returns:
2210  *	0	= OK
2211  *	EFAULT	= Error
2212  *
2213  * Context:
2214  *	Kernel context.
2215  */
2216 static int
2217 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2218 {
2219 #ifdef	_MULTI_DATAMODEL
2220 	struct fcp32_scsi_cmd	f32scsi;
2221 
2222 	switch (ddi_model_convert_from(mode & FMODELS)) {
2223 	case DDI_MODEL_ILP32:
2224 		/*
2225 		 * Convert from 64 bit to 32 bit
2226 		 */
2227 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2228 		/*
2229 		 * Copy data to user address space
2230 		 */
2231 		if (ddi_copyout(&f32scsi,
2232 		    (void *)base_addr,
2233 		    sizeof (struct fcp32_scsi_cmd),
2234 		    mode)) {
2235 			return (EFAULT);
2236 		}
2237 		break;
2238 	case DDI_MODEL_NONE:
2239 		/*
2240 		 * Copy data to user address space
2241 		 */
2242 		if (ddi_copyout(fscsi,
2243 		    (void *)base_addr,
2244 		    sizeof (struct fcp_scsi_cmd),
2245 		    mode)) {
2246 			return (EFAULT);
2247 		}
2248 		break;
2249 	}
2250 #else	/* _MULTI_DATAMODEL */
2251 	/*
2252 	 * Copy data to user address space
2253 	 */
2254 	if (ddi_copyout(fscsi,
2255 	    (void *)base_addr,
2256 	    sizeof (struct fcp_scsi_cmd),
2257 	    mode)) {
2258 		return (EFAULT);
2259 	}
2260 #endif	/* _MULTI_DATAMODEL */
2261 
2262 	return (0);
2263 }
2264 
2265 
2266 /*
2267  * fcp_send_scsi_ioctl
2268  *	Sends the SCSI command in blocking mode.
2269  *
2270  * Input:
2271  *	fscsi		= SCSI command data structure
2272  *
2273  * Output:
2274  *	fscsi		= SCSI command data structure
2275  *
2276  * Returns:
2277  *	0	= OK
2278  *	EAGAIN	= See errno.h
2279  *	EBUSY	= See errno.h
2280  *	EINTR	= See errno.h
2281  *	EINVAL	= See errno.h
2282  *	EIO	= See errno.h
2283  *	ENOMEM	= See errno.h
2284  *	ENXIO	= See errno.h
2285  *
2286  * Context:
2287  *	Kernel context.
2288  */
2289 static int
2290 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2291 {
2292 	struct fcp_lun	*plun		= NULL;
2293 	struct fcp_port	*pptr		= NULL;
2294 	struct fcp_tgt	*ptgt		= NULL;
2295 	fc_packet_t		*fpkt		= NULL;
2296 	struct fcp_ipkt	*icmd		= NULL;
2297 	int			target_created	= FALSE;
2298 	fc_frame_hdr_t		*hp;
2299 	struct fcp_cmd		fcp_cmd;
2300 	struct fcp_cmd		*fcmd;
2301 	union scsi_cdb		*scsi_cdb;
2302 	la_wwn_t		*wwn_ptr;
2303 	int			nodma;
2304 	struct fcp_rsp		*rsp;
2305 	struct fcp_rsp_info	*rsp_info;
2306 	caddr_t			rsp_sense;
2307 	int			buf_len;
2308 	int			info_len;
2309 	int			sense_len;
2310 	struct scsi_extended_sense	*sense_to = NULL;
2311 	timeout_id_t		tid;
2312 	uint8_t			reconfig_lun = FALSE;
2313 	uint8_t			reconfig_pending = FALSE;
2314 	uint8_t			scsi_cmd;
2315 	int			rsp_len;
2316 	int			cmd_index;
2317 	int			fc_status;
2318 	int			pkt_state;
2319 	int			pkt_action;
2320 	int			pkt_reason;
2321 	int			ret, xport_retval = ~FC_SUCCESS;
2322 	int			lcount;
2323 	int			tcount;
2324 	int			reconfig_status;
2325 	int			port_busy = FALSE;
2326 	uchar_t			*lun_string;
2327 
2328 	/*
2329 	 * Check valid SCSI command
2330 	 */
2331 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2332 	ret = EINVAL;
2333 	for (cmd_index = 0;
2334 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2335 	    ret != 0;
2336 	    cmd_index++) {
2337 		/*
2338 		 * First byte of CDB is the SCSI command
2339 		 */
2340 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2341 			ret = 0;
2342 		}
2343 	}
2344 
2345 	/*
2346 	 * Check inputs
2347 	 */
2348 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2349 		ret = EINVAL;
2350 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2351 		/* no larger than */
2352 		ret = EINVAL;
2353 	}
2354 
2355 
2356 	/*
2357 	 * Find FC port
2358 	 */
2359 	if (ret == 0) {
2360 		/*
2361 		 * Acquire global mutex
2362 		 */
2363 		mutex_enter(&fcp_global_mutex);
2364 
2365 		pptr = fcp_port_head;
2366 		while (pptr) {
2367 			if (pptr->port_instance ==
2368 			    (uint32_t)fscsi->scsi_fc_port_num) {
2369 				break;
2370 			} else {
2371 				pptr = pptr->port_next;
2372 			}
2373 		}
2374 
2375 		if (pptr == NULL) {
2376 			ret = ENXIO;
2377 		} else {
2378 			/*
2379 			 * fc_ulp_busy_port can raise power
2380 			 *  so, we must not hold any mutexes involved in PM
2381 			 */
2382 			mutex_exit(&fcp_global_mutex);
2383 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2384 		}
2385 
2386 		if (ret == 0) {
2387 
2388 			/* remember port is busy, so we will release later */
2389 			port_busy = TRUE;
2390 
2391 			/*
2392 			 * If there is a reconfiguration in progress, wait
2393 			 * for it to complete.
2394 			 */
2395 
2396 			fcp_reconfig_wait(pptr);
2397 
2398 			/* reacquire mutexes in order */
2399 			mutex_enter(&fcp_global_mutex);
2400 			mutex_enter(&pptr->port_mutex);
2401 
2402 			/*
2403 			 * Will port accept DMA?
2404 			 */
2405 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2406 			    ? 1 : 0;
2407 
2408 			/*
2409 			 * If init or offline, device not known
2410 			 *
2411 			 * If we are discovering (onlining), we can
2412 			 * NOT obviously provide reliable data about
2413 			 * devices until it is complete
2414 			 */
2415 			if (pptr->port_state &	  (FCP_STATE_INIT |
2416 			    FCP_STATE_OFFLINE)) {
2417 				ret = ENXIO;
2418 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2419 				ret = EBUSY;
2420 			} else {
2421 				/*
2422 				 * Find target from pwwn
2423 				 *
2424 				 * The wwn must be put into a local
2425 				 * variable to ensure alignment.
2426 				 */
2427 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2428 				ptgt = fcp_lookup_target(pptr,
2429 				    (uchar_t *)wwn_ptr);
2430 
2431 				/*
2432 				 * If target not found,
2433 				 */
2434 				if (ptgt == NULL) {
2435 					/*
2436 					 * Note: Still have global &
2437 					 * port mutexes
2438 					 */
2439 					mutex_exit(&pptr->port_mutex);
2440 					ptgt = fcp_port_create_tgt(pptr,
2441 					    wwn_ptr, &ret, &fc_status,
2442 					    &pkt_state, &pkt_action,
2443 					    &pkt_reason);
2444 					mutex_enter(&pptr->port_mutex);
2445 
2446 					fscsi->scsi_fc_status  = fc_status;
2447 					fscsi->scsi_pkt_state  =
2448 					    (uchar_t)pkt_state;
2449 					fscsi->scsi_pkt_reason = pkt_reason;
2450 					fscsi->scsi_pkt_action =
2451 					    (uchar_t)pkt_action;
2452 
2453 					if (ptgt != NULL) {
2454 						target_created = TRUE;
2455 					} else if (ret == 0) {
2456 						ret = ENOMEM;
2457 					}
2458 				}
2459 
2460 				if (ret == 0) {
2461 					/*
2462 					 * Acquire target
2463 					 */
2464 					mutex_enter(&ptgt->tgt_mutex);
2465 
2466 					/*
2467 					 * If target is mark or busy,
2468 					 * then target can not be used
2469 					 */
2470 					if (ptgt->tgt_state &
2471 					    (FCP_TGT_MARK |
2472 					    FCP_TGT_BUSY)) {
2473 						ret = EBUSY;
2474 					} else {
2475 						/*
2476 						 * Mark target as busy
2477 						 */
2478 						ptgt->tgt_state |=
2479 						    FCP_TGT_BUSY;
2480 					}
2481 
2482 					/*
2483 					 * Release target
2484 					 */
2485 					lcount = pptr->port_link_cnt;
2486 					tcount = ptgt->tgt_change_cnt;
2487 					mutex_exit(&ptgt->tgt_mutex);
2488 				}
2489 			}
2490 
2491 			/*
2492 			 * Release port
2493 			 */
2494 			mutex_exit(&pptr->port_mutex);
2495 		}
2496 
2497 		/*
2498 		 * Release global mutex
2499 		 */
2500 		mutex_exit(&fcp_global_mutex);
2501 	}
2502 
2503 	if (ret == 0) {
2504 		uint64_t belun = BE_64(fscsi->scsi_lun);
2505 
2506 		/*
2507 		 * If it's a target device, find lun from pwwn
2508 		 * The wwn must be put into a local
2509 		 * variable to ensure alignment.
2510 		 */
2511 		mutex_enter(&pptr->port_mutex);
2512 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2513 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2514 			/* this is not a target */
2515 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2516 			ret = ENXIO;
2517 		} else if ((belun << 16) != 0) {
2518 			/*
2519 			 * Since fcp only support PD and LU addressing method
2520 			 * so far, the last 6 bytes of a valid LUN are expected
2521 			 * to be filled with 00h.
2522 			 */
2523 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2524 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2525 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2526 			    (uint8_t)(belun >> 62), belun);
2527 			ret = ENXIO;
2528 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2529 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2530 			/*
2531 			 * This is a SCSI target, but no LUN at this
2532 			 * address.
2533 			 *
2534 			 * In the future, we may want to send this to
2535 			 * the target, and let it respond
2536 			 * appropriately
2537 			 */
2538 			ret = ENXIO;
2539 		}
2540 		mutex_exit(&pptr->port_mutex);
2541 	}
2542 
2543 	/*
2544 	 * Finished grabbing external resources
2545 	 * Allocate internal packet (icmd)
2546 	 */
2547 	if (ret == 0) {
2548 		/*
2549 		 * Calc rsp len assuming rsp info included
2550 		 */
2551 		rsp_len = sizeof (struct fcp_rsp) +
2552 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2553 
2554 		icmd = fcp_icmd_alloc(pptr, ptgt,
2555 		    sizeof (struct fcp_cmd),
2556 		    rsp_len,
2557 		    fscsi->scsi_buflen,
2558 		    nodma,
2559 		    lcount,			/* ipkt_link_cnt */
2560 		    tcount,			/* ipkt_change_cnt */
2561 		    0,				/* cause */
2562 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2563 
2564 		if (icmd == NULL) {
2565 			ret = ENOMEM;
2566 		} else {
2567 			/*
2568 			 * Setup internal packet as sema sync
2569 			 */
2570 			fcp_ipkt_sema_init(icmd);
2571 		}
2572 	}
2573 
2574 	if (ret == 0) {
2575 		/*
2576 		 * Init fpkt pointer for use.
2577 		 */
2578 
2579 		fpkt = icmd->ipkt_fpkt;
2580 
2581 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2582 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2583 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2584 
2585 		/*
2586 		 * Init fcmd pointer for use by SCSI command
2587 		 */
2588 
2589 		if (nodma) {
2590 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2591 		} else {
2592 			fcmd = &fcp_cmd;
2593 		}
2594 		bzero(fcmd, sizeof (struct fcp_cmd));
2595 		ptgt = plun->lun_tgt;
2596 
2597 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2598 
2599 		fcmd->fcp_ent_addr.ent_addr_0 =
2600 		    BE_16(*(uint16_t *)&(lun_string[0]));
2601 		fcmd->fcp_ent_addr.ent_addr_1 =
2602 		    BE_16(*(uint16_t *)&(lun_string[2]));
2603 		fcmd->fcp_ent_addr.ent_addr_2 =
2604 		    BE_16(*(uint16_t *)&(lun_string[4]));
2605 		fcmd->fcp_ent_addr.ent_addr_3 =
2606 		    BE_16(*(uint16_t *)&(lun_string[6]));
2607 
2608 		/*
2609 		 * Setup internal packet(icmd)
2610 		 */
2611 		icmd->ipkt_lun		= plun;
2612 		icmd->ipkt_restart	= 0;
2613 		icmd->ipkt_retries	= 0;
2614 		icmd->ipkt_opcode	= 0;
2615 
2616 		/*
2617 		 * Init the frame HEADER Pointer for use
2618 		 */
2619 		hp = &fpkt->pkt_cmd_fhdr;
2620 
2621 		hp->s_id	= pptr->port_id;
2622 		hp->d_id	= ptgt->tgt_d_id;
2623 		hp->r_ctl	= R_CTL_COMMAND;
2624 		hp->type	= FC_TYPE_SCSI_FCP;
2625 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2626 		hp->rsvd	= 0;
2627 		hp->seq_id	= 0;
2628 		hp->seq_cnt	= 0;
2629 		hp->ox_id	= 0xffff;
2630 		hp->rx_id	= 0xffff;
2631 		hp->ro		= 0;
2632 
2633 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2634 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2635 		fcmd->fcp_cntl.cntl_write_data	= 0;
2636 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2637 
2638 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2639 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2640 		    fscsi->scsi_cdblen);
2641 
2642 		if (!nodma) {
2643 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2644 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2645 		}
2646 
2647 		/*
2648 		 * Send SCSI command to FC transport
2649 		 */
2650 
2651 		if (ret == 0) {
2652 			mutex_enter(&ptgt->tgt_mutex);
2653 
2654 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2655 				mutex_exit(&ptgt->tgt_mutex);
2656 				fscsi->scsi_fc_status = xport_retval =
2657 				    fc_ulp_transport(pptr->port_fp_handle,
2658 				    fpkt);
2659 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2660 					ret = EIO;
2661 				}
2662 			} else {
2663 				mutex_exit(&ptgt->tgt_mutex);
2664 				ret = EBUSY;
2665 			}
2666 		}
2667 	}
2668 
2669 	/*
2670 	 * Wait for completion only if fc_ulp_transport was called and it
2671 	 * returned a success. This is the only time callback will happen.
2672 	 * Otherwise, there is no point in waiting
2673 	 */
2674 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2675 		ret = fcp_ipkt_sema_wait(icmd);
2676 	}
2677 
2678 	/*
2679 	 * Copy data to IOCTL data structures
2680 	 */
2681 	rsp = NULL;
2682 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2683 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2684 
2685 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2686 			fcp_log(CE_WARN, pptr->port_dip,
2687 			    "!SCSI command to d_id=0x%x lun=0x%x"
2688 			    " failed, Bad FCP response values:"
2689 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2690 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2691 			    ptgt->tgt_d_id, plun->lun_num,
2692 			    rsp->reserved_0, rsp->reserved_1,
2693 			    rsp->fcp_u.fcp_status.reserved_0,
2694 			    rsp->fcp_u.fcp_status.reserved_1,
2695 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2696 
2697 			ret = EIO;
2698 		}
2699 	}
2700 
2701 	if ((ret == 0) && (rsp != NULL)) {
2702 		/*
2703 		 * Calc response lengths
2704 		 */
2705 		sense_len = 0;
2706 		info_len = 0;
2707 
2708 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2709 			info_len = rsp->fcp_response_len;
2710 		}
2711 
2712 		rsp_info   = (struct fcp_rsp_info *)
2713 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2714 
2715 		/*
2716 		 * Get SCSI status
2717 		 */
2718 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2719 		/*
2720 		 * If a lun was just added or removed and the next command
2721 		 * comes through this interface, we need to capture the check
2722 		 * condition so we can discover the new topology.
2723 		 */
2724 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2725 		    rsp->fcp_u.fcp_status.sense_len_set) {
2726 			sense_len = rsp->fcp_sense_len;
2727 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2728 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2729 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2730 			    (FCP_SENSE_NO_LUN(sense_to))) {
2731 				reconfig_lun = TRUE;
2732 			}
2733 		}
2734 
2735 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2736 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2737 			if (reconfig_lun == FALSE) {
2738 				reconfig_status =
2739 				    fcp_is_reconfig_needed(ptgt, fpkt);
2740 			}
2741 
2742 			if ((reconfig_lun == TRUE) ||
2743 			    (reconfig_status == TRUE)) {
2744 				mutex_enter(&ptgt->tgt_mutex);
2745 				if (ptgt->tgt_tid == NULL) {
2746 					/*
2747 					 * Either we've been notified the
2748 					 * REPORT_LUN data has changed, or
2749 					 * we've determined on our own that
2750 					 * we're out of date.  Kick off
2751 					 * rediscovery.
2752 					 */
2753 					tid = timeout(fcp_reconfigure_luns,
2754 					    (caddr_t)ptgt, drv_usectohz(1));
2755 
2756 					ptgt->tgt_tid = tid;
2757 					ptgt->tgt_state |= FCP_TGT_BUSY;
2758 					ret = EBUSY;
2759 					reconfig_pending = TRUE;
2760 				}
2761 				mutex_exit(&ptgt->tgt_mutex);
2762 			}
2763 		}
2764 
2765 		/*
2766 		 * Calc residuals and buffer lengths
2767 		 */
2768 
2769 		if (ret == 0) {
2770 			buf_len = fscsi->scsi_buflen;
2771 			fscsi->scsi_bufresid	= 0;
2772 			if (rsp->fcp_u.fcp_status.resid_under) {
2773 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2774 					fscsi->scsi_bufresid = rsp->fcp_resid;
2775 				} else {
2776 					cmn_err(CE_WARN, "fcp: bad residue %x "
2777 					    "for txfer len %x", rsp->fcp_resid,
2778 					    fscsi->scsi_buflen);
2779 					fscsi->scsi_bufresid =
2780 					    fscsi->scsi_buflen;
2781 				}
2782 				buf_len -= fscsi->scsi_bufresid;
2783 			}
2784 			if (rsp->fcp_u.fcp_status.resid_over) {
2785 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2786 			}
2787 
2788 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2789 			if (fscsi->scsi_rqlen < sense_len) {
2790 				sense_len = fscsi->scsi_rqlen;
2791 			}
2792 
2793 			fscsi->scsi_fc_rspcode	= 0;
2794 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2795 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2796 			}
2797 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2798 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2799 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2800 
2801 			/*
2802 			 * Copy data and request sense
2803 			 *
2804 			 * Data must be copied by using the FCP_CP_IN macro.
2805 			 * This will ensure the proper byte order since the data
2806 			 * is being copied directly from the memory mapped
2807 			 * device register.
2808 			 *
2809 			 * The response (and request sense) will be in the
2810 			 * correct byte order.	No special copy is necessary.
2811 			 */
2812 
2813 			if (buf_len) {
2814 				FCP_CP_IN(fpkt->pkt_data,
2815 				    fscsi->scsi_bufaddr,
2816 				    fpkt->pkt_data_acc,
2817 				    buf_len);
2818 			}
2819 			bcopy((void *)rsp_sense,
2820 			    (void *)fscsi->scsi_rqbufaddr,
2821 			    sense_len);
2822 		}
2823 	}
2824 
2825 	/*
2826 	 * Cleanup transport data structures if icmd was alloc-ed
2827 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2828 	 */
2829 	if (icmd != NULL) {
2830 		fcp_ipkt_sema_cleanup(icmd);
2831 	}
2832 
2833 	/* restore pm busy/idle status */
2834 	if (port_busy) {
2835 		fc_ulp_idle_port(pptr->port_fp_handle);
2836 	}
2837 
2838 	/*
2839 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2840 	 * flag, it'll be cleared when the reconfig is complete.
2841 	 */
2842 	if ((ptgt != NULL) && !reconfig_pending) {
2843 		/*
2844 		 * If target was created,
2845 		 */
2846 		if (target_created) {
2847 			mutex_enter(&ptgt->tgt_mutex);
2848 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2849 			mutex_exit(&ptgt->tgt_mutex);
2850 		} else {
2851 			/*
2852 			 * De-mark target as busy
2853 			 */
2854 			mutex_enter(&ptgt->tgt_mutex);
2855 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2856 			mutex_exit(&ptgt->tgt_mutex);
2857 		}
2858 	}
2859 	return (ret);
2860 }
2861 
2862 
2863 static int
2864 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2865     fc_packet_t	*fpkt)
2866 {
2867 	uchar_t			*lun_string;
2868 	uint16_t		lun_num, i;
2869 	int			num_luns;
2870 	int			actual_luns;
2871 	int			num_masked_luns;
2872 	int			lun_buflen;
2873 	struct fcp_lun	*plun	= NULL;
2874 	struct fcp_reportlun_resp	*report_lun;
2875 	uint8_t			reconfig_needed = FALSE;
2876 	uint8_t			lun_exists = FALSE;
2877 	fcp_port_t			*pptr		 = ptgt->tgt_port;
2878 
2879 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2880 
2881 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2882 	    fpkt->pkt_datalen);
2883 
2884 	/* get number of luns (which is supplied as LUNS * 8) */
2885 	num_luns = BE_32(report_lun->num_lun) >> 3;
2886 
2887 	/*
2888 	 * Figure out exactly how many lun strings our response buffer
2889 	 * can hold.
2890 	 */
2891 	lun_buflen = (fpkt->pkt_datalen -
2892 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2893 
2894 	/*
2895 	 * Is our response buffer full or not? We don't want to
2896 	 * potentially walk beyond the number of luns we have.
2897 	 */
2898 	if (num_luns <= lun_buflen) {
2899 		actual_luns = num_luns;
2900 	} else {
2901 		actual_luns = lun_buflen;
2902 	}
2903 
2904 	mutex_enter(&ptgt->tgt_mutex);
2905 
2906 	/* Scan each lun to see if we have masked it. */
2907 	num_masked_luns = 0;
2908 	if (fcp_lun_blacklist != NULL) {
2909 		for (i = 0; i < actual_luns; i++) {
2910 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2911 			switch (lun_string[0] & 0xC0) {
2912 			case FCP_LUN_ADDRESSING:
2913 			case FCP_PD_ADDRESSING:
2914 			case FCP_VOLUME_ADDRESSING:
2915 				lun_num = ((lun_string[0] & 0x3F) << 8)
2916 				    | lun_string[1];
2917 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2918 				    lun_num) == TRUE) {
2919 					num_masked_luns++;
2920 				}
2921 				break;
2922 			default:
2923 				break;
2924 			}
2925 		}
2926 	}
2927 
2928 	/*
2929 	 * The quick and easy check.  If the number of LUNs reported
2930 	 * doesn't match the number we currently know about, we need
2931 	 * to reconfigure.
2932 	 */
2933 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2934 		mutex_exit(&ptgt->tgt_mutex);
2935 		kmem_free(report_lun, fpkt->pkt_datalen);
2936 		return (TRUE);
2937 	}
2938 
2939 	/*
2940 	 * If the quick and easy check doesn't turn up anything, we walk
2941 	 * the list of luns from the REPORT_LUN response and look for
2942 	 * any luns we don't know about.  If we find one, we know we need
2943 	 * to reconfigure. We will skip LUNs that are masked because of the
2944 	 * blacklist.
2945 	 */
2946 	for (i = 0; i < actual_luns; i++) {
2947 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2948 		lun_exists = FALSE;
2949 		switch (lun_string[0] & 0xC0) {
2950 		case FCP_LUN_ADDRESSING:
2951 		case FCP_PD_ADDRESSING:
2952 		case FCP_VOLUME_ADDRESSING:
2953 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2954 
2955 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2956 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2957 				lun_exists = TRUE;
2958 				break;
2959 			}
2960 
2961 			for (plun = ptgt->tgt_lun; plun;
2962 			    plun = plun->lun_next) {
2963 				if (plun->lun_num == lun_num) {
2964 					lun_exists = TRUE;
2965 					break;
2966 				}
2967 			}
2968 			break;
2969 		default:
2970 			break;
2971 		}
2972 
2973 		if (lun_exists == FALSE) {
2974 			reconfig_needed = TRUE;
2975 			break;
2976 		}
2977 	}
2978 
2979 	mutex_exit(&ptgt->tgt_mutex);
2980 	kmem_free(report_lun, fpkt->pkt_datalen);
2981 
2982 	return (reconfig_needed);
2983 }
2984 
2985 /*
2986  * This function is called by fcp_handle_page83 and uses inquiry response data
2987  * stored in plun->lun_inq to determine whether or not a device is a member of
2988  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2989  * otherwise 1.
2990  */
2991 static int
2992 fcp_symmetric_device_probe(struct fcp_lun *plun)
2993 {
2994 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2995 	char			*devidptr;
2996 	int			i, len;
2997 
2998 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2999 		devidptr = fcp_symmetric_disk_table[i];
3000 		len = (int)strlen(devidptr);
3001 
3002 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3003 			return (0);
3004 		}
3005 	}
3006 	return (1);
3007 }
3008 
3009 
3010 /*
3011  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3012  * It basically returns the current count of # of state change callbacks
3013  * i.e the value of tgt_change_cnt.
3014  *
3015  * INPUT:
3016  *   fcp_ioctl.fp_minor -> The minor # of the fp port
3017  *   fcp_ioctl.listlen	-> 1
3018  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
3019  */
3020 /*ARGSUSED2*/
3021 static int
3022 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3023 {
3024 	int			ret;
3025 	uint32_t		link_cnt;
3026 	struct fcp_ioctl	fioctl;
3027 	struct fcp_port	*pptr = NULL;
3028 
3029 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3030 	    &pptr)) != 0) {
3031 		return (ret);
3032 	}
3033 
3034 	ASSERT(pptr != NULL);
3035 
3036 	if (fioctl.listlen != 1) {
3037 		return (EINVAL);
3038 	}
3039 
3040 	mutex_enter(&pptr->port_mutex);
3041 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3042 		mutex_exit(&pptr->port_mutex);
3043 		return (ENXIO);
3044 	}
3045 
3046 	/*
3047 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3048 	 * When the fcp initially attaches to the port and there are nothing
3049 	 * hanging out of the port or if there was a repeat offline state change
3050 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3051 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3052 	 * will differentiate the 2 cases.
3053 	 */
3054 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3055 		mutex_exit(&pptr->port_mutex);
3056 		return (ENXIO);
3057 	}
3058 
3059 	link_cnt = pptr->port_link_cnt;
3060 	mutex_exit(&pptr->port_mutex);
3061 
3062 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3063 		return (EFAULT);
3064 	}
3065 
3066 #ifdef	_MULTI_DATAMODEL
3067 	switch (ddi_model_convert_from(mode & FMODELS)) {
3068 	case DDI_MODEL_ILP32: {
3069 		struct fcp32_ioctl f32_ioctl;
3070 
3071 		f32_ioctl.fp_minor = fioctl.fp_minor;
3072 		f32_ioctl.listlen = fioctl.listlen;
3073 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3074 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3075 		    sizeof (struct fcp32_ioctl), mode)) {
3076 			return (EFAULT);
3077 		}
3078 		break;
3079 	}
3080 	case DDI_MODEL_NONE:
3081 		if (ddi_copyout((void *)&fioctl, (void *)data,
3082 		    sizeof (struct fcp_ioctl), mode)) {
3083 			return (EFAULT);
3084 		}
3085 		break;
3086 	}
3087 #else	/* _MULTI_DATAMODEL */
3088 
3089 	if (ddi_copyout((void *)&fioctl, (void *)data,
3090 	    sizeof (struct fcp_ioctl), mode)) {
3091 		return (EFAULT);
3092 	}
3093 #endif	/* _MULTI_DATAMODEL */
3094 
3095 	return (0);
3096 }
3097 
3098 /*
3099  * This function copies the fcp_ioctl structure passed in from user land
3100  * into kernel land. Handles 32 bit applications.
3101  */
3102 /*ARGSUSED*/
3103 static int
3104 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3105     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3106 {
3107 	struct fcp_port	*t_pptr;
3108 
3109 #ifdef	_MULTI_DATAMODEL
3110 	switch (ddi_model_convert_from(mode & FMODELS)) {
3111 	case DDI_MODEL_ILP32: {
3112 		struct fcp32_ioctl f32_ioctl;
3113 
3114 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3115 		    sizeof (struct fcp32_ioctl), mode)) {
3116 			return (EFAULT);
3117 		}
3118 		fioctl->fp_minor = f32_ioctl.fp_minor;
3119 		fioctl->listlen = f32_ioctl.listlen;
3120 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3121 		break;
3122 	}
3123 	case DDI_MODEL_NONE:
3124 		if (ddi_copyin((void *)data, (void *)fioctl,
3125 		    sizeof (struct fcp_ioctl), mode)) {
3126 			return (EFAULT);
3127 		}
3128 		break;
3129 	}
3130 
3131 #else	/* _MULTI_DATAMODEL */
3132 	if (ddi_copyin((void *)data, (void *)fioctl,
3133 	    sizeof (struct fcp_ioctl), mode)) {
3134 		return (EFAULT);
3135 	}
3136 #endif	/* _MULTI_DATAMODEL */
3137 
3138 	/*
3139 	 * Right now we can assume that the minor number matches with
3140 	 * this instance of fp. If this changes we will need to
3141 	 * revisit this logic.
3142 	 */
3143 	mutex_enter(&fcp_global_mutex);
3144 	t_pptr = fcp_port_head;
3145 	while (t_pptr) {
3146 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3147 			break;
3148 		} else {
3149 			t_pptr = t_pptr->port_next;
3150 		}
3151 	}
3152 	*pptr = t_pptr;
3153 	mutex_exit(&fcp_global_mutex);
3154 	if (t_pptr == NULL) {
3155 		return (ENXIO);
3156 	}
3157 
3158 	return (0);
3159 }
3160 
3161 /*
3162  *     Function: fcp_port_create_tgt
3163  *
3164  *  Description: As the name suggest this function creates the target context
3165  *		 specified by the the WWN provided by the caller.  If the
3166  *		 creation goes well and the target is known by fp/fctl a PLOGI
3167  *		 followed by a PRLI are issued.
3168  *
3169  *     Argument: pptr		fcp port structure
3170  *		 pwwn		WWN of the target
3171  *		 ret_val	Address of the return code.  It could be:
3172  *				EIO, ENOMEM or 0.
3173  *		 fc_status	PLOGI or PRLI status completion
3174  *		 fc_pkt_state	PLOGI or PRLI state completion
3175  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3176  *		 fc_pkt_action	PLOGI or PRLI action completion
3177  *
3178  * Return Value: NULL if it failed
3179  *		 Target structure address if it succeeds
3180  */
3181 static struct fcp_tgt *
3182 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3183     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3184 {
3185 	struct fcp_tgt	*ptgt = NULL;
3186 	fc_portmap_t		devlist;
3187 	int			lcount;
3188 	int			error;
3189 
3190 	*ret_val = 0;
3191 
3192 	/*
3193 	 * Check FC port device & get port map
3194 	 */
3195 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3196 	    &error, 1) == NULL) {
3197 		*ret_val = EIO;
3198 	} else {
3199 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3200 		    &devlist) != FC_SUCCESS) {
3201 			*ret_val = EIO;
3202 		}
3203 	}
3204 
3205 	/* Set port map flags */
3206 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3207 
3208 	/* Allocate target */
3209 	if (*ret_val == 0) {
3210 		lcount = pptr->port_link_cnt;
3211 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3212 		if (ptgt == NULL) {
3213 			fcp_log(CE_WARN, pptr->port_dip,
3214 			    "!FC target allocation failed");
3215 			*ret_val = ENOMEM;
3216 		} else {
3217 			/* Setup target */
3218 			mutex_enter(&ptgt->tgt_mutex);
3219 
3220 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3221 			ptgt->tgt_tmp_cnt	= 1;
3222 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3223 			ptgt->tgt_hard_addr	=
3224 			    devlist.map_hard_addr.hard_addr;
3225 			ptgt->tgt_pd_handle	= devlist.map_pd;
3226 			ptgt->tgt_fca_dev	= NULL;
3227 
3228 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3229 			    FC_WWN_SIZE);
3230 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3231 			    FC_WWN_SIZE);
3232 
3233 			mutex_exit(&ptgt->tgt_mutex);
3234 		}
3235 	}
3236 
3237 	/* Release global mutex for PLOGI and PRLI */
3238 	mutex_exit(&fcp_global_mutex);
3239 
3240 	/* Send PLOGI (If necessary) */
3241 	if (*ret_val == 0) {
3242 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3243 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3244 	}
3245 
3246 	/* Send PRLI (If necessary) */
3247 	if (*ret_val == 0) {
3248 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3249 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3250 	}
3251 
3252 	mutex_enter(&fcp_global_mutex);
3253 
3254 	return (ptgt);
3255 }
3256 
3257 /*
3258  *     Function: fcp_tgt_send_plogi
3259  *
3260  *  Description: This function sends a PLOGI to the target specified by the
3261  *		 caller and waits till it completes.
3262  *
3263  *     Argument: ptgt		Target to send the plogi to.
3264  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3265  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3266  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3267  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3268  *
3269  * Return Value: 0
3270  *		 ENOMEM
3271  *		 EIO
3272  *
3273  *	Context: User context.
3274  */
3275 static int
3276 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3277     int *fc_pkt_reason, int *fc_pkt_action)
3278 {
3279 	struct fcp_port	*pptr;
3280 	struct fcp_ipkt	*icmd;
3281 	struct fc_packet	*fpkt;
3282 	fc_frame_hdr_t		*hp;
3283 	struct la_els_logi	logi;
3284 	int			tcount;
3285 	int			lcount;
3286 	int			ret, login_retval = ~FC_SUCCESS;
3287 
3288 	ret = 0;
3289 
3290 	pptr = ptgt->tgt_port;
3291 
3292 	lcount = pptr->port_link_cnt;
3293 	tcount = ptgt->tgt_change_cnt;
3294 
3295 	/* Alloc internal packet */
3296 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3297 	    sizeof (la_els_logi_t), 0,
3298 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3299 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3300 
3301 	if (icmd == NULL) {
3302 		ret = ENOMEM;
3303 	} else {
3304 		/*
3305 		 * Setup internal packet as sema sync
3306 		 */
3307 		fcp_ipkt_sema_init(icmd);
3308 
3309 		/*
3310 		 * Setup internal packet (icmd)
3311 		 */
3312 		icmd->ipkt_lun		= NULL;
3313 		icmd->ipkt_restart	= 0;
3314 		icmd->ipkt_retries	= 0;
3315 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3316 
3317 		/*
3318 		 * Setup fc_packet
3319 		 */
3320 		fpkt = icmd->ipkt_fpkt;
3321 
3322 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3323 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3324 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3325 
3326 		/*
3327 		 * Setup FC frame header
3328 		 */
3329 		hp = &fpkt->pkt_cmd_fhdr;
3330 
3331 		hp->s_id	= pptr->port_id;	/* source ID */
3332 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3333 		hp->r_ctl	= R_CTL_ELS_REQ;
3334 		hp->type	= FC_TYPE_EXTENDED_LS;
3335 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3336 		hp->seq_id	= 0;
3337 		hp->rsvd	= 0;
3338 		hp->df_ctl	= 0;
3339 		hp->seq_cnt	= 0;
3340 		hp->ox_id	= 0xffff;		/* i.e. none */
3341 		hp->rx_id	= 0xffff;		/* i.e. none */
3342 		hp->ro		= 0;
3343 
3344 		/*
3345 		 * Setup PLOGI
3346 		 */
3347 		bzero(&logi, sizeof (struct la_els_logi));
3348 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3349 
3350 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3351 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3352 
3353 		/*
3354 		 * Send PLOGI
3355 		 */
3356 		*fc_status = login_retval =
3357 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3358 		if (*fc_status != FC_SUCCESS) {
3359 			ret = EIO;
3360 		}
3361 	}
3362 
3363 	/*
3364 	 * Wait for completion
3365 	 */
3366 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3367 		ret = fcp_ipkt_sema_wait(icmd);
3368 
3369 		*fc_pkt_state	= fpkt->pkt_state;
3370 		*fc_pkt_reason	= fpkt->pkt_reason;
3371 		*fc_pkt_action	= fpkt->pkt_action;
3372 	}
3373 
3374 	/*
3375 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3376 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3377 	 * Otherwise, cleanup happens in callback routine.
3378 	 */
3379 	if (icmd != NULL) {
3380 		fcp_ipkt_sema_cleanup(icmd);
3381 	}
3382 
3383 	return (ret);
3384 }
3385 
3386 /*
3387  *     Function: fcp_tgt_send_prli
3388  *
3389  *  Description: Does nothing as of today.
3390  *
3391  *     Argument: ptgt		Target to send the prli to.
3392  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3393  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3394  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3395  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3396  *
3397  * Return Value: 0
3398  */
3399 /*ARGSUSED*/
3400 static int
3401 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3402     int *fc_pkt_reason, int *fc_pkt_action)
3403 {
3404 	return (0);
3405 }
3406 
3407 /*
3408  *     Function: fcp_ipkt_sema_init
3409  *
3410  *  Description: Initializes the semaphore contained in the internal packet.
3411  *
3412  *     Argument: icmd	Internal packet the semaphore of which must be
3413  *			initialized.
3414  *
3415  * Return Value: None
3416  *
3417  *	Context: User context only.
3418  */
3419 static void
3420 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3421 {
3422 	struct fc_packet	*fpkt;
3423 
3424 	fpkt = icmd->ipkt_fpkt;
3425 
3426 	/* Create semaphore for sync */
3427 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3428 
3429 	/* Setup the completion callback */
3430 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3431 }
3432 
3433 /*
3434  *     Function: fcp_ipkt_sema_wait
3435  *
3436  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3437  *		 semaphore is released in the callback.
3438  *
3439  *     Argument: icmd	Internal packet to wait on for completion.
3440  *
3441  * Return Value: 0
3442  *		 EIO
3443  *		 EBUSY
3444  *		 EAGAIN
3445  *
3446  *	Context: User context only.
3447  *
3448  * This function does a conversion between the field pkt_state of the fc_packet
3449  * embedded in the internal packet (icmd) and the code it returns.
3450  */
3451 static int
3452 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3453 {
3454 	struct fc_packet	*fpkt;
3455 	int	ret;
3456 
3457 	ret = EIO;
3458 	fpkt = icmd->ipkt_fpkt;
3459 
3460 	/*
3461 	 * Wait on semaphore
3462 	 */
3463 	sema_p(&(icmd->ipkt_sema));
3464 
3465 	/*
3466 	 * Check the status of the FC packet
3467 	 */
3468 	switch (fpkt->pkt_state) {
3469 	case FC_PKT_SUCCESS:
3470 		ret = 0;
3471 		break;
3472 	case FC_PKT_LOCAL_RJT:
3473 		switch (fpkt->pkt_reason) {
3474 		case FC_REASON_SEQ_TIMEOUT:
3475 		case FC_REASON_RX_BUF_TIMEOUT:
3476 			ret = EAGAIN;
3477 			break;
3478 		case FC_REASON_PKT_BUSY:
3479 			ret = EBUSY;
3480 			break;
3481 		}
3482 		break;
3483 	case FC_PKT_TIMEOUT:
3484 		ret = EAGAIN;
3485 		break;
3486 	case FC_PKT_LOCAL_BSY:
3487 	case FC_PKT_TRAN_BSY:
3488 	case FC_PKT_NPORT_BSY:
3489 	case FC_PKT_FABRIC_BSY:
3490 		ret = EBUSY;
3491 		break;
3492 	case FC_PKT_LS_RJT:
3493 	case FC_PKT_BA_RJT:
3494 		switch (fpkt->pkt_reason) {
3495 		case FC_REASON_LOGICAL_BSY:
3496 			ret = EBUSY;
3497 			break;
3498 		}
3499 		break;
3500 	case FC_PKT_FS_RJT:
3501 		switch (fpkt->pkt_reason) {
3502 		case FC_REASON_FS_LOGICAL_BUSY:
3503 			ret = EBUSY;
3504 			break;
3505 		}
3506 		break;
3507 	}
3508 
3509 	return (ret);
3510 }
3511 
3512 /*
3513  *     Function: fcp_ipkt_sema_callback
3514  *
3515  *  Description: Registered as the completion callback function for the FC
3516  *		 transport when the ipkt semaphore is used for sync. This will
3517  *		 cleanup the used data structures, if necessary and wake up
3518  *		 the user thread to complete the transaction.
3519  *
3520  *     Argument: fpkt	FC packet (points to the icmd)
3521  *
3522  * Return Value: None
3523  *
3524  *	Context: User context only
3525  */
3526 static void
3527 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3528 {
3529 	struct fcp_ipkt	*icmd;
3530 
3531 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3532 
3533 	/*
3534 	 * Wake up user thread
3535 	 */
3536 	sema_v(&(icmd->ipkt_sema));
3537 }
3538 
3539 /*
3540  *     Function: fcp_ipkt_sema_cleanup
3541  *
3542  *  Description: Called to cleanup (if necessary) the data structures used
3543  *		 when ipkt sema is used for sync.  This function will detect
3544  *		 whether the caller is the last thread (via counter) and
3545  *		 cleanup only if necessary.
3546  *
3547  *     Argument: icmd	Internal command packet
3548  *
3549  * Return Value: None
3550  *
3551  *	Context: User context only
3552  */
3553 static void
3554 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3555 {
3556 	struct fcp_tgt	*ptgt;
3557 	struct fcp_port	*pptr;
3558 
3559 	ptgt = icmd->ipkt_tgt;
3560 	pptr = icmd->ipkt_port;
3561 
3562 	/*
3563 	 * Acquire data structure
3564 	 */
3565 	mutex_enter(&ptgt->tgt_mutex);
3566 
3567 	/*
3568 	 * Destroy semaphore
3569 	 */
3570 	sema_destroy(&(icmd->ipkt_sema));
3571 
3572 	/*
3573 	 * Cleanup internal packet
3574 	 */
3575 	mutex_exit(&ptgt->tgt_mutex);
3576 	fcp_icmd_free(pptr, icmd);
3577 }
3578 
3579 /*
3580  *     Function: fcp_port_attach
3581  *
3582  *  Description: Called by the transport framework to resume, suspend or
3583  *		 attach a new port.
3584  *
3585  *     Argument: ulph		Port handle
3586  *		 *pinfo		Port information
3587  *		 cmd		Command
3588  *		 s_id		Port ID
3589  *
3590  * Return Value: FC_FAILURE or FC_SUCCESS
3591  */
3592 /*ARGSUSED*/
3593 static int
3594 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3595     fc_attach_cmd_t cmd, uint32_t s_id)
3596 {
3597 	int	instance;
3598 	int	res = FC_FAILURE; /* default result */
3599 
3600 	ASSERT(pinfo != NULL);
3601 
3602 	instance = ddi_get_instance(pinfo->port_dip);
3603 
3604 	switch (cmd) {
3605 	case FC_CMD_ATTACH:
3606 		/*
3607 		 * this port instance attaching for the first time (or after
3608 		 * being detached before)
3609 		 */
3610 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3611 		    instance) == DDI_SUCCESS) {
3612 			res = FC_SUCCESS;
3613 		} else {
3614 			ASSERT(ddi_get_soft_state(fcp_softstate,
3615 			    instance) == NULL);
3616 		}
3617 		break;
3618 
3619 	case FC_CMD_RESUME:
3620 	case FC_CMD_POWER_UP:
3621 		/*
3622 		 * this port instance was attached and the suspended and
3623 		 * will now be resumed
3624 		 */
3625 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3626 		    instance) == DDI_SUCCESS) {
3627 			res = FC_SUCCESS;
3628 		}
3629 		break;
3630 
3631 	default:
3632 		/* shouldn't happen */
3633 		FCP_TRACE(fcp_logq, "fcp",
3634 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3635 		    "port_attach: unknown cmdcommand: %d", cmd);
3636 		break;
3637 	}
3638 
3639 	/* return result */
3640 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3641 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3642 
3643 	return (res);
3644 }
3645 
3646 
3647 /*
3648  * detach or suspend this port instance
3649  *
3650  * acquires and releases the global mutex
3651  *
3652  * acquires and releases the mutex for this port
3653  *
3654  * acquires and releases the hotplug mutex for this port
3655  */
3656 /*ARGSUSED*/
3657 static int
3658 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3659     fc_detach_cmd_t cmd)
3660 {
3661 	int			flag;
3662 	int			instance;
3663 	struct fcp_port		*pptr;
3664 
3665 	instance = ddi_get_instance(info->port_dip);
3666 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3667 
3668 	switch (cmd) {
3669 	case FC_CMD_SUSPEND:
3670 		FCP_DTRACE(fcp_logq, "fcp",
3671 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3672 		    "port suspend called for port %d", instance);
3673 		flag = FCP_STATE_SUSPENDED;
3674 		break;
3675 
3676 	case FC_CMD_POWER_DOWN:
3677 		FCP_DTRACE(fcp_logq, "fcp",
3678 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3679 		    "port power down called for port %d", instance);
3680 		flag = FCP_STATE_POWER_DOWN;
3681 		break;
3682 
3683 	case FC_CMD_DETACH:
3684 		FCP_DTRACE(fcp_logq, "fcp",
3685 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3686 		    "port detach called for port %d", instance);
3687 		flag = FCP_STATE_DETACHING;
3688 		break;
3689 
3690 	default:
3691 		/* shouldn't happen */
3692 		return (FC_FAILURE);
3693 	}
3694 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3695 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3696 
3697 	return (fcp_handle_port_detach(pptr, flag, instance));
3698 }
3699 
3700 
3701 /*
3702  * called for ioctls on the transport's devctl interface, and the transport
3703  * has passed it to us
3704  *
3705  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3706  *
3707  * return FC_SUCCESS if we decide to claim the ioctl,
3708  * else return FC_UNCLAIMED
3709  *
3710  * *rval is set iff we decide to claim the ioctl
3711  */
3712 /*ARGSUSED*/
3713 static int
3714 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3715     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3716 {
3717 	int			retval = FC_UNCLAIMED;	/* return value */
3718 	struct fcp_port		*pptr = NULL;		/* our soft state */
3719 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3720 	dev_info_t		*cdip;
3721 	mdi_pathinfo_t		*pip = NULL;
3722 	char			*ndi_nm;		/* NDI name */
3723 	char			*ndi_addr;		/* NDI addr */
3724 	int			is_mpxio, circ;
3725 	int			devi_entered = 0;
3726 	time_t			end_time;
3727 
3728 	ASSERT(rval != NULL);
3729 
3730 	FCP_DTRACE(fcp_logq, "fcp",
3731 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3732 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3733 
3734 	/* if already claimed then forget it */
3735 	if (claimed) {
3736 		/*
3737 		 * for now, if this ioctl has already been claimed, then
3738 		 * we just ignore it
3739 		 */
3740 		return (retval);
3741 	}
3742 
3743 	/* get our port info */
3744 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3745 		fcp_log(CE_WARN, NULL,
3746 		    "!fcp:Invalid port handle handle in ioctl");
3747 		*rval = ENXIO;
3748 		return (retval);
3749 	}
3750 	is_mpxio = pptr->port_mpxio;
3751 
3752 	switch (cmd) {
3753 	case DEVCTL_BUS_GETSTATE:
3754 	case DEVCTL_BUS_QUIESCE:
3755 	case DEVCTL_BUS_UNQUIESCE:
3756 	case DEVCTL_BUS_RESET:
3757 	case DEVCTL_BUS_RESETALL:
3758 
3759 	case DEVCTL_BUS_DEV_CREATE:
3760 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3761 			return (retval);
3762 		}
3763 		break;
3764 
3765 	case DEVCTL_DEVICE_GETSTATE:
3766 	case DEVCTL_DEVICE_OFFLINE:
3767 	case DEVCTL_DEVICE_ONLINE:
3768 	case DEVCTL_DEVICE_REMOVE:
3769 	case DEVCTL_DEVICE_RESET:
3770 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3771 			return (retval);
3772 		}
3773 
3774 		ASSERT(dcp != NULL);
3775 
3776 		/* ensure we have a name and address */
3777 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3778 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3779 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3780 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3781 			    "ioctl: can't get name (%s) or addr (%s)",
3782 			    ndi_nm ? ndi_nm : "<null ptr>",
3783 			    ndi_addr ? ndi_addr : "<null ptr>");
3784 			ndi_dc_freehdl(dcp);
3785 			return (retval);
3786 		}
3787 
3788 
3789 		/* get our child's DIP */
3790 		ASSERT(pptr != NULL);
3791 		if (is_mpxio) {
3792 			mdi_devi_enter(pptr->port_dip, &circ);
3793 		} else {
3794 			ndi_devi_enter(pptr->port_dip, &circ);
3795 		}
3796 		devi_entered = 1;
3797 
3798 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3799 		    ndi_addr)) == NULL) {
3800 			/* Look for virtually enumerated devices. */
3801 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3802 			if (pip == NULL ||
3803 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3804 				*rval = ENXIO;
3805 				goto out;
3806 			}
3807 		}
3808 		break;
3809 
3810 	default:
3811 		*rval = ENOTTY;
3812 		return (retval);
3813 	}
3814 
3815 	/* this ioctl is ours -- process it */
3816 
3817 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3818 
3819 	/* we assume it will be a success; else we'll set error value */
3820 	*rval = 0;
3821 
3822 
3823 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3824 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3825 	    "ioctl: claiming this one");
3826 
3827 	/* handle ioctls now */
3828 	switch (cmd) {
3829 	case DEVCTL_DEVICE_GETSTATE:
3830 		ASSERT(cdip != NULL);
3831 		ASSERT(dcp != NULL);
3832 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3833 			*rval = EFAULT;
3834 		}
3835 		break;
3836 
3837 	case DEVCTL_DEVICE_REMOVE:
3838 	case DEVCTL_DEVICE_OFFLINE: {
3839 		int			flag = 0;
3840 		int			lcount;
3841 		int			tcount;
3842 		struct fcp_pkt	*head = NULL;
3843 		struct fcp_lun	*plun;
3844 		child_info_t		*cip = CIP(cdip);
3845 		int			all = 1;
3846 		struct fcp_lun	*tplun;
3847 		struct fcp_tgt	*ptgt;
3848 
3849 		ASSERT(pptr != NULL);
3850 		ASSERT(cdip != NULL);
3851 
3852 		mutex_enter(&pptr->port_mutex);
3853 		if (pip != NULL) {
3854 			cip = CIP(pip);
3855 		}
3856 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3857 			mutex_exit(&pptr->port_mutex);
3858 			*rval = ENXIO;
3859 			break;
3860 		}
3861 
3862 		head = fcp_scan_commands(plun);
3863 		if (head != NULL) {
3864 			fcp_abort_commands(head, LUN_PORT);
3865 		}
3866 		lcount = pptr->port_link_cnt;
3867 		tcount = plun->lun_tgt->tgt_change_cnt;
3868 		mutex_exit(&pptr->port_mutex);
3869 
3870 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3871 			flag = NDI_DEVI_REMOVE;
3872 		}
3873 
3874 		if (is_mpxio) {
3875 			mdi_devi_exit(pptr->port_dip, circ);
3876 		} else {
3877 			ndi_devi_exit(pptr->port_dip, circ);
3878 		}
3879 		devi_entered = 0;
3880 
3881 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3882 		    FCP_OFFLINE, lcount, tcount, flag);
3883 
3884 		if (*rval != NDI_SUCCESS) {
3885 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3886 			break;
3887 		}
3888 
3889 		fcp_update_offline_flags(plun);
3890 
3891 		ptgt = plun->lun_tgt;
3892 		mutex_enter(&ptgt->tgt_mutex);
3893 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3894 		    tplun->lun_next) {
3895 			mutex_enter(&tplun->lun_mutex);
3896 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3897 				all = 0;
3898 			}
3899 			mutex_exit(&tplun->lun_mutex);
3900 		}
3901 
3902 		if (all) {
3903 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3904 			/*
3905 			 * The user is unconfiguring/offlining the device.
3906 			 * If fabric and the auto configuration is set
3907 			 * then make sure the user is the only one who
3908 			 * can reconfigure the device.
3909 			 */
3910 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3911 			    fcp_enable_auto_configuration) {
3912 				ptgt->tgt_manual_config_only = 1;
3913 			}
3914 		}
3915 		mutex_exit(&ptgt->tgt_mutex);
3916 		break;
3917 	}
3918 
3919 	case DEVCTL_DEVICE_ONLINE: {
3920 		int			lcount;
3921 		int			tcount;
3922 		struct fcp_lun	*plun;
3923 		child_info_t		*cip = CIP(cdip);
3924 
3925 		ASSERT(cdip != NULL);
3926 		ASSERT(pptr != NULL);
3927 
3928 		mutex_enter(&pptr->port_mutex);
3929 		if (pip != NULL) {
3930 			cip = CIP(pip);
3931 		}
3932 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3933 			mutex_exit(&pptr->port_mutex);
3934 			*rval = ENXIO;
3935 			break;
3936 		}
3937 		lcount = pptr->port_link_cnt;
3938 		tcount = plun->lun_tgt->tgt_change_cnt;
3939 		mutex_exit(&pptr->port_mutex);
3940 
3941 		/*
3942 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3943 		 * to allow the device attach to occur when the device is
3944 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3945 		 * from the scsi_probe()).
3946 		 */
3947 		mutex_enter(&LUN_TGT->tgt_mutex);
3948 		plun->lun_state |= FCP_LUN_ONLINING;
3949 		mutex_exit(&LUN_TGT->tgt_mutex);
3950 
3951 		if (is_mpxio) {
3952 			mdi_devi_exit(pptr->port_dip, circ);
3953 		} else {
3954 			ndi_devi_exit(pptr->port_dip, circ);
3955 		}
3956 		devi_entered = 0;
3957 
3958 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3959 		    FCP_ONLINE, lcount, tcount, 0);
3960 
3961 		if (*rval != NDI_SUCCESS) {
3962 			/* Reset the FCP_LUN_ONLINING bit */
3963 			mutex_enter(&LUN_TGT->tgt_mutex);
3964 			plun->lun_state &= ~FCP_LUN_ONLINING;
3965 			mutex_exit(&LUN_TGT->tgt_mutex);
3966 			*rval = EIO;
3967 			break;
3968 		}
3969 		mutex_enter(&LUN_TGT->tgt_mutex);
3970 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3971 		    FCP_LUN_ONLINING);
3972 		mutex_exit(&LUN_TGT->tgt_mutex);
3973 		break;
3974 	}
3975 
3976 	case DEVCTL_BUS_DEV_CREATE: {
3977 		uchar_t			*bytes = NULL;
3978 		uint_t			nbytes;
3979 		struct fcp_tgt		*ptgt = NULL;
3980 		struct fcp_lun		*plun = NULL;
3981 		dev_info_t		*useless_dip = NULL;
3982 
3983 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3984 		    DEVCTL_CONSTRUCT, &useless_dip);
3985 		if (*rval != 0 || useless_dip == NULL) {
3986 			break;
3987 		}
3988 
3989 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3990 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3991 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3992 			*rval = EINVAL;
3993 			(void) ndi_devi_free(useless_dip);
3994 			if (bytes != NULL) {
3995 				ddi_prop_free(bytes);
3996 			}
3997 			break;
3998 		}
3999 
4000 		*rval = fcp_create_on_demand(pptr, bytes);
4001 		if (*rval == 0) {
4002 			mutex_enter(&pptr->port_mutex);
4003 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4004 			if (ptgt) {
4005 				/*
4006 				 * We now have a pointer to the target that
4007 				 * was created. Lets point to the first LUN on
4008 				 * this new target.
4009 				 */
4010 				mutex_enter(&ptgt->tgt_mutex);
4011 
4012 				plun = ptgt->tgt_lun;
4013 				/*
4014 				 * There may be stale/offline LUN entries on
4015 				 * this list (this is by design) and so we have
4016 				 * to make sure we point to the first online
4017 				 * LUN
4018 				 */
4019 				while (plun &&
4020 				    plun->lun_state & FCP_LUN_OFFLINE) {
4021 					plun = plun->lun_next;
4022 				}
4023 
4024 				mutex_exit(&ptgt->tgt_mutex);
4025 			}
4026 			mutex_exit(&pptr->port_mutex);
4027 		}
4028 
4029 		if (*rval == 0 && ptgt && plun) {
4030 			mutex_enter(&plun->lun_mutex);
4031 			/*
4032 			 * Allow up to fcp_lun_ready_retry seconds to
4033 			 * configure all the luns behind the target.
4034 			 *
4035 			 * The intent here is to allow targets with long
4036 			 * reboot/reset-recovery times to become available
4037 			 * while limiting the maximum wait time for an
4038 			 * unresponsive target.
4039 			 */
4040 			end_time = ddi_get_lbolt() +
4041 			    SEC_TO_TICK(fcp_lun_ready_retry);
4042 
4043 			while (ddi_get_lbolt() < end_time) {
4044 				retval = FC_SUCCESS;
4045 
4046 				/*
4047 				 * The new ndi interfaces for on-demand creation
4048 				 * are inflexible, Do some more work to pass on
4049 				 * a path name of some LUN (design is broken !)
4050 				 */
4051 				if (plun->lun_cip) {
4052 					if (plun->lun_mpxio == 0) {
4053 						cdip = DIP(plun->lun_cip);
4054 					} else {
4055 						cdip = mdi_pi_get_client(
4056 						    PIP(plun->lun_cip));
4057 					}
4058 					if (cdip == NULL) {
4059 						*rval = ENXIO;
4060 						break;
4061 					}
4062 
4063 					if (!i_ddi_devi_attached(cdip)) {
4064 						mutex_exit(&plun->lun_mutex);
4065 						delay(drv_usectohz(1000000));
4066 						mutex_enter(&plun->lun_mutex);
4067 					} else {
4068 						/*
4069 						 * This Lun is ready, lets
4070 						 * check the next one.
4071 						 */
4072 						mutex_exit(&plun->lun_mutex);
4073 						plun = plun->lun_next;
4074 						while (plun && (plun->lun_state
4075 						    & FCP_LUN_OFFLINE)) {
4076 							plun = plun->lun_next;
4077 						}
4078 						if (!plun) {
4079 							break;
4080 						}
4081 						mutex_enter(&plun->lun_mutex);
4082 					}
4083 				} else {
4084 					/*
4085 					 * lun_cip field for a valid lun
4086 					 * should never be NULL. Fail the
4087 					 * command.
4088 					 */
4089 					*rval = ENXIO;
4090 					break;
4091 				}
4092 			}
4093 			if (plun) {
4094 				mutex_exit(&plun->lun_mutex);
4095 			} else {
4096 				char devnm[MAXNAMELEN];
4097 				int nmlen;
4098 
4099 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4100 				    ddi_node_name(cdip),
4101 				    ddi_get_name_addr(cdip));
4102 
4103 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4104 				    0) {
4105 					*rval = EFAULT;
4106 				}
4107 			}
4108 		} else {
4109 			int	i;
4110 			char	buf[25];
4111 
4112 			for (i = 0; i < FC_WWN_SIZE; i++) {
4113 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4114 			}
4115 
4116 			fcp_log(CE_WARN, pptr->port_dip,
4117 			    "!Failed to create nodes for pwwn=%s; error=%x",
4118 			    buf, *rval);
4119 		}
4120 
4121 		(void) ndi_devi_free(useless_dip);
4122 		ddi_prop_free(bytes);
4123 		break;
4124 	}
4125 
4126 	case DEVCTL_DEVICE_RESET: {
4127 		struct fcp_lun		*plun;
4128 		child_info_t		*cip = CIP(cdip);
4129 
4130 		ASSERT(cdip != NULL);
4131 		ASSERT(pptr != NULL);
4132 		mutex_enter(&pptr->port_mutex);
4133 		if (pip != NULL) {
4134 			cip = CIP(pip);
4135 		}
4136 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4137 			mutex_exit(&pptr->port_mutex);
4138 			*rval = ENXIO;
4139 			break;
4140 		}
4141 		mutex_exit(&pptr->port_mutex);
4142 
4143 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4144 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4145 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4146 
4147 			*rval = ENXIO;
4148 			break;
4149 		}
4150 
4151 		if (plun->lun_sd == NULL) {
4152 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4153 
4154 			*rval = ENXIO;
4155 			break;
4156 		}
4157 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4158 
4159 		/*
4160 		 * set up ap so that fcp_reset can figure out
4161 		 * which target to reset
4162 		 */
4163 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4164 		    RESET_TARGET) == FALSE) {
4165 			*rval = EIO;
4166 		}
4167 		break;
4168 	}
4169 
4170 	case DEVCTL_BUS_GETSTATE:
4171 		ASSERT(dcp != NULL);
4172 		ASSERT(pptr != NULL);
4173 		ASSERT(pptr->port_dip != NULL);
4174 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4175 		    NDI_SUCCESS) {
4176 			*rval = EFAULT;
4177 		}
4178 		break;
4179 
4180 	case DEVCTL_BUS_QUIESCE:
4181 	case DEVCTL_BUS_UNQUIESCE:
4182 		*rval = ENOTSUP;
4183 		break;
4184 
4185 	case DEVCTL_BUS_RESET:
4186 	case DEVCTL_BUS_RESETALL:
4187 		ASSERT(pptr != NULL);
4188 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4189 		break;
4190 
4191 	default:
4192 		ASSERT(dcp != NULL);
4193 		*rval = ENOTTY;
4194 		break;
4195 	}
4196 
4197 	/* all done -- clean up and return */
4198 out:	if (devi_entered) {
4199 		if (is_mpxio) {
4200 			mdi_devi_exit(pptr->port_dip, circ);
4201 		} else {
4202 			ndi_devi_exit(pptr->port_dip, circ);
4203 		}
4204 	}
4205 
4206 	if (dcp != NULL) {
4207 		ndi_dc_freehdl(dcp);
4208 	}
4209 
4210 	return (retval);
4211 }
4212 
4213 
4214 /*ARGSUSED*/
4215 static int
4216 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4217     uint32_t claimed)
4218 {
4219 	uchar_t			r_ctl;
4220 	uchar_t			ls_code;
4221 	struct fcp_port	*pptr;
4222 
4223 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4224 		return (FC_UNCLAIMED);
4225 	}
4226 
4227 	mutex_enter(&pptr->port_mutex);
4228 	if (pptr->port_state & (FCP_STATE_DETACHING |
4229 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4230 		mutex_exit(&pptr->port_mutex);
4231 		return (FC_UNCLAIMED);
4232 	}
4233 	mutex_exit(&pptr->port_mutex);
4234 
4235 	r_ctl = buf->ub_frame.r_ctl;
4236 
4237 	switch (r_ctl & R_CTL_ROUTING) {
4238 	case R_CTL_EXTENDED_SVC:
4239 		if (r_ctl == R_CTL_ELS_REQ) {
4240 			ls_code = buf->ub_buffer[0];
4241 
4242 			switch (ls_code) {
4243 			case LA_ELS_PRLI:
4244 				/*
4245 				 * We really don't care if something fails.
4246 				 * If the PRLI was not sent out, then the
4247 				 * other end will time it out.
4248 				 */
4249 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4250 					return (FC_SUCCESS);
4251 				}
4252 				return (FC_UNCLAIMED);
4253 				/* NOTREACHED */
4254 
4255 			default:
4256 				break;
4257 			}
4258 		}
4259 		/* FALLTHROUGH */
4260 
4261 	default:
4262 		return (FC_UNCLAIMED);
4263 	}
4264 }
4265 
4266 
4267 /*ARGSUSED*/
4268 static int
4269 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4270     uint32_t claimed)
4271 {
4272 	return (FC_UNCLAIMED);
4273 }
4274 
4275 /*
4276  *     Function: fcp_statec_callback
4277  *
4278  *  Description: The purpose of this function is to handle a port state change.
4279  *		 It is called from fp/fctl and, in a few instances, internally.
4280  *
4281  *     Argument: ulph		fp/fctl port handle
4282  *		 port_handle	fcp_port structure
4283  *		 port_state	Physical state of the port
4284  *		 port_top	Topology
4285  *		 *devlist	Pointer to the first entry of a table
4286  *				containing the remote ports that can be
4287  *				reached.
4288  *		 dev_cnt	Number of entries pointed by devlist.
4289  *		 port_sid	Port ID of the local port.
4290  *
4291  * Return Value: None
4292  */
4293 /*ARGSUSED*/
4294 static void
4295 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4296     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4297     uint32_t dev_cnt, uint32_t port_sid)
4298 {
4299 	uint32_t		link_count;
4300 	int			map_len = 0;
4301 	struct fcp_port	*pptr;
4302 	fcp_map_tag_t		*map_tag = NULL;
4303 
4304 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4305 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4306 		return;			/* nothing to work with! */
4307 	}
4308 
4309 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4310 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4311 	    "fcp_statec_callback: port state/dev_cnt/top ="
4312 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4313 	    dev_cnt, port_top);
4314 
4315 	mutex_enter(&pptr->port_mutex);
4316 
4317 	/*
4318 	 * If a thread is in detach, don't do anything.
4319 	 */
4320 	if (pptr->port_state & (FCP_STATE_DETACHING |
4321 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4322 		mutex_exit(&pptr->port_mutex);
4323 		return;
4324 	}
4325 
4326 	/*
4327 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4328 	 * init_pkt is called, it knows whether or not the target's status
4329 	 * (or pd) might be changing.
4330 	 */
4331 
4332 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4333 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4334 	}
4335 
4336 	/*
4337 	 * the transport doesn't allocate or probe unless being
4338 	 * asked to by either the applications or ULPs
4339 	 *
4340 	 * in cases where the port is OFFLINE at the time of port
4341 	 * attach callback and the link comes ONLINE later, for
4342 	 * easier automatic node creation (i.e. without you having to
4343 	 * go out and run the utility to perform LOGINs) the
4344 	 * following conditional is helpful
4345 	 */
4346 	pptr->port_phys_state = port_state;
4347 
4348 	if (dev_cnt) {
4349 		mutex_exit(&pptr->port_mutex);
4350 
4351 		map_len = sizeof (*map_tag) * dev_cnt;
4352 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4353 		if (map_tag == NULL) {
4354 			fcp_log(CE_WARN, pptr->port_dip,
4355 			    "!fcp%d: failed to allocate for map tags; "
4356 			    " state change will not be processed",
4357 			    pptr->port_instance);
4358 
4359 			mutex_enter(&pptr->port_mutex);
4360 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4361 			mutex_exit(&pptr->port_mutex);
4362 
4363 			return;
4364 		}
4365 
4366 		mutex_enter(&pptr->port_mutex);
4367 	}
4368 
4369 	if (pptr->port_id != port_sid) {
4370 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4371 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4372 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4373 		    port_sid);
4374 		/*
4375 		 * The local port changed ID. It is the first time a port ID
4376 		 * is assigned or something drastic happened.  We might have
4377 		 * been unplugged and replugged on another loop or fabric port
4378 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4379 		 * the fabric we were plugged into.
4380 		 */
4381 		pptr->port_id = port_sid;
4382 	}
4383 
4384 	switch (FC_PORT_STATE_MASK(port_state)) {
4385 	case FC_STATE_OFFLINE:
4386 	case FC_STATE_RESET_REQUESTED:
4387 		/*
4388 		 * link has gone from online to offline -- just update the
4389 		 * state of this port to BUSY and MARKed to go offline
4390 		 */
4391 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4392 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4393 		    "link went offline");
4394 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4395 			/*
4396 			 * We were offline a while ago and this one
4397 			 * seems to indicate that the loop has gone
4398 			 * dead forever.
4399 			 */
4400 			pptr->port_tmp_cnt += dev_cnt;
4401 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4402 			pptr->port_state |= FCP_STATE_INIT;
4403 			link_count = pptr->port_link_cnt;
4404 			fcp_handle_devices(pptr, devlist, dev_cnt,
4405 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4406 		} else {
4407 			pptr->port_link_cnt++;
4408 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4409 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4410 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4411 			if (pptr->port_mpxio) {
4412 				fcp_update_mpxio_path_verifybusy(pptr);
4413 			}
4414 			pptr->port_state |= FCP_STATE_OFFLINE;
4415 			pptr->port_state &=
4416 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4417 			pptr->port_tmp_cnt = 0;
4418 		}
4419 		mutex_exit(&pptr->port_mutex);
4420 		break;
4421 
4422 	case FC_STATE_ONLINE:
4423 	case FC_STATE_LIP:
4424 	case FC_STATE_LIP_LBIT_SET:
4425 		/*
4426 		 * link has gone from offline to online
4427 		 */
4428 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4429 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4430 		    "link went online");
4431 
4432 		pptr->port_link_cnt++;
4433 
4434 		while (pptr->port_ipkt_cnt) {
4435 			mutex_exit(&pptr->port_mutex);
4436 			delay(drv_usectohz(1000000));
4437 			mutex_enter(&pptr->port_mutex);
4438 		}
4439 
4440 		pptr->port_topology = port_top;
4441 
4442 		/*
4443 		 * The state of the targets and luns accessible through this
4444 		 * port is updated.
4445 		 */
4446 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4447 		    FCP_CAUSE_LINK_CHANGE);
4448 
4449 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4450 		pptr->port_state |= FCP_STATE_ONLINING;
4451 		pptr->port_tmp_cnt = dev_cnt;
4452 		link_count = pptr->port_link_cnt;
4453 
4454 		pptr->port_deadline = fcp_watchdog_time +
4455 		    FCP_ICMD_DEADLINE;
4456 
4457 		if (!dev_cnt) {
4458 			/*
4459 			 * We go directly to the online state if no remote
4460 			 * ports were discovered.
4461 			 */
4462 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4463 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4464 			    "No remote ports discovered");
4465 
4466 			pptr->port_state &= ~FCP_STATE_ONLINING;
4467 			pptr->port_state |= FCP_STATE_ONLINE;
4468 		}
4469 
4470 		switch (port_top) {
4471 		case FC_TOP_FABRIC:
4472 		case FC_TOP_PUBLIC_LOOP:
4473 		case FC_TOP_PRIVATE_LOOP:
4474 		case FC_TOP_PT_PT:
4475 
4476 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4477 				fcp_retry_ns_registry(pptr, port_sid);
4478 			}
4479 
4480 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4481 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4482 			break;
4483 
4484 		default:
4485 			/*
4486 			 * We got here because we were provided with an unknown
4487 			 * topology.
4488 			 */
4489 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4490 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4491 			}
4492 
4493 			pptr->port_tmp_cnt -= dev_cnt;
4494 			fcp_log(CE_WARN, pptr->port_dip,
4495 			    "!unknown/unsupported topology (0x%x)", port_top);
4496 			break;
4497 		}
4498 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4499 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4500 		    "Notify ssd of the reset to reinstate the reservations");
4501 
4502 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4503 		    &pptr->port_reset_notify_listf);
4504 
4505 		mutex_exit(&pptr->port_mutex);
4506 
4507 		break;
4508 
4509 	case FC_STATE_RESET:
4510 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4511 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4512 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4513 		    "RESET state, waiting for Offline/Online state_cb");
4514 		mutex_exit(&pptr->port_mutex);
4515 		break;
4516 
4517 	case FC_STATE_DEVICE_CHANGE:
4518 		/*
4519 		 * We come here when an application has requested
4520 		 * Dynamic node creation/deletion in Fabric connectivity.
4521 		 */
4522 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4523 		    FCP_STATE_INIT)) {
4524 			/*
4525 			 * This case can happen when the FCTL is in the
4526 			 * process of giving us on online and the host on
4527 			 * the other side issues a PLOGI/PLOGO. Ideally
4528 			 * the state changes should be serialized unless
4529 			 * they are opposite (online-offline).
4530 			 * The transport will give us a final state change
4531 			 * so we can ignore this for the time being.
4532 			 */
4533 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4534 			mutex_exit(&pptr->port_mutex);
4535 			break;
4536 		}
4537 
4538 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4539 			fcp_retry_ns_registry(pptr, port_sid);
4540 		}
4541 
4542 		/*
4543 		 * Extend the deadline under steady state conditions
4544 		 * to provide more time for the device-change-commands
4545 		 */
4546 		if (!pptr->port_ipkt_cnt) {
4547 			pptr->port_deadline = fcp_watchdog_time +
4548 			    FCP_ICMD_DEADLINE;
4549 		}
4550 
4551 		/*
4552 		 * There is another race condition here, where if we were
4553 		 * in ONLINEING state and a devices in the map logs out,
4554 		 * fp will give another state change as DEVICE_CHANGE
4555 		 * and OLD. This will result in that target being offlined.
4556 		 * The pd_handle is freed. If from the first statec callback
4557 		 * we were going to fire a PLOGI/PRLI, the system will
4558 		 * panic in fc_ulp_transport with invalid pd_handle.
4559 		 * The fix is to check for the link_cnt before issuing
4560 		 * any command down.
4561 		 */
4562 		fcp_update_targets(pptr, devlist, dev_cnt,
4563 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4564 
4565 		link_count = pptr->port_link_cnt;
4566 
4567 		fcp_handle_devices(pptr, devlist, dev_cnt,
4568 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4569 
4570 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4571 
4572 		mutex_exit(&pptr->port_mutex);
4573 		break;
4574 
4575 	case FC_STATE_TARGET_PORT_RESET:
4576 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4577 			fcp_retry_ns_registry(pptr, port_sid);
4578 		}
4579 
4580 		/* Do nothing else */
4581 		mutex_exit(&pptr->port_mutex);
4582 		break;
4583 
4584 	default:
4585 		fcp_log(CE_WARN, pptr->port_dip,
4586 		    "!Invalid state change=0x%x", port_state);
4587 		mutex_exit(&pptr->port_mutex);
4588 		break;
4589 	}
4590 
4591 	if (map_tag) {
4592 		kmem_free(map_tag, map_len);
4593 	}
4594 }
4595 
4596 /*
4597  *     Function: fcp_handle_devices
4598  *
4599  *  Description: This function updates the devices currently known by
4600  *		 walking the list provided by the caller.  The list passed
4601  *		 by the caller is supposed to be the list of reachable
4602  *		 devices.
4603  *
4604  *     Argument: *pptr		Fcp port structure.
4605  *		 *devlist	Pointer to the first entry of a table
4606  *				containing the remote ports that can be
4607  *				reached.
4608  *		 dev_cnt	Number of entries pointed by devlist.
4609  *		 link_cnt	Link state count.
4610  *		 *map_tag	Array of fcp_map_tag_t structures.
4611  *		 cause		What caused this function to be called.
4612  *
4613  * Return Value: None
4614  *
4615  *	  Notes: The pptr->port_mutex must be held.
4616  */
4617 static void
4618 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4619     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4620 {
4621 	int			i;
4622 	int			check_finish_init = 0;
4623 	fc_portmap_t		*map_entry;
4624 	struct fcp_tgt	*ptgt = NULL;
4625 
4626 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4627 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4628 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4629 
4630 	if (dev_cnt) {
4631 		ASSERT(map_tag != NULL);
4632 	}
4633 
4634 	/*
4635 	 * The following code goes through the list of remote ports that are
4636 	 * accessible through this (pptr) local port (The list walked is the
4637 	 * one provided by the caller which is the list of the remote ports
4638 	 * currently reachable).  It checks if any of them was already
4639 	 * known by looking for the corresponding target structure based on
4640 	 * the world wide name.	 If a target is part of the list it is tagged
4641 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4642 	 *
4643 	 * Old comment
4644 	 * -----------
4645 	 * Before we drop port mutex; we MUST get the tags updated; This
4646 	 * two step process is somewhat slow, but more reliable.
4647 	 */
4648 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4649 		map_entry = &(devlist[i]);
4650 
4651 		/*
4652 		 * get ptr to this map entry in our port's
4653 		 * list (if any)
4654 		 */
4655 		ptgt = fcp_lookup_target(pptr,
4656 		    (uchar_t *)&(map_entry->map_pwwn));
4657 
4658 		if (ptgt) {
4659 			map_tag[i] = ptgt->tgt_change_cnt;
4660 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4661 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4662 			}
4663 		}
4664 	}
4665 
4666 	/*
4667 	 * At this point we know which devices of the new list were already
4668 	 * known (The field tgt_aux_state of the target structure has been
4669 	 * set to FCP_TGT_TAGGED).
4670 	 *
4671 	 * The following code goes through the list of targets currently known
4672 	 * by the local port (the list is actually a hashing table).  If a
4673 	 * target is found and is not tagged, it means the target cannot
4674 	 * be reached anymore through the local port (pptr).  It is offlined.
4675 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4676 	 */
4677 	for (i = 0; i < FCP_NUM_HASH; i++) {
4678 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4679 		    ptgt = ptgt->tgt_next) {
4680 			mutex_enter(&ptgt->tgt_mutex);
4681 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4682 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4683 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4684 				fcp_offline_target_now(pptr, ptgt,
4685 				    link_cnt, ptgt->tgt_change_cnt, 0);
4686 			}
4687 			mutex_exit(&ptgt->tgt_mutex);
4688 		}
4689 	}
4690 
4691 	/*
4692 	 * At this point, the devices that were known but cannot be reached
4693 	 * anymore, have most likely been offlined.
4694 	 *
4695 	 * The following section of code seems to go through the list of
4696 	 * remote ports that can now be reached.  For every single one it
4697 	 * checks if it is already known or if it is a new port.
4698 	 */
4699 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4700 
4701 		if (check_finish_init) {
4702 			ASSERT(i > 0);
4703 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4704 			    map_tag[i - 1], cause);
4705 			check_finish_init = 0;
4706 		}
4707 
4708 		/* get a pointer to this map entry */
4709 		map_entry = &(devlist[i]);
4710 
4711 		/*
4712 		 * Check for the duplicate map entry flag. If we have marked
4713 		 * this entry as a duplicate we skip it since the correct
4714 		 * (perhaps even same) state change will be encountered
4715 		 * later in the list.
4716 		 */
4717 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4718 			continue;
4719 		}
4720 
4721 		/* get ptr to this map entry in our port's list (if any) */
4722 		ptgt = fcp_lookup_target(pptr,
4723 		    (uchar_t *)&(map_entry->map_pwwn));
4724 
4725 		if (ptgt) {
4726 			/*
4727 			 * This device was already known.  The field
4728 			 * tgt_aux_state is reset (was probably set to
4729 			 * FCP_TGT_TAGGED previously in this routine).
4730 			 */
4731 			ptgt->tgt_aux_state = 0;
4732 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4733 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4734 			    "handle_devices: map did/state/type/flags = "
4735 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4736 			    "tgt_state=%d",
4737 			    map_entry->map_did.port_id, map_entry->map_state,
4738 			    map_entry->map_type, map_entry->map_flags,
4739 			    ptgt->tgt_d_id, ptgt->tgt_state);
4740 		}
4741 
4742 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4743 		    map_entry->map_type == PORT_DEVICE_NEW ||
4744 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4745 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4746 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4747 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4748 			    "map_type=%x, did = %x",
4749 			    map_entry->map_type,
4750 			    map_entry->map_did.port_id);
4751 		}
4752 
4753 		switch (map_entry->map_type) {
4754 		case PORT_DEVICE_NOCHANGE:
4755 		case PORT_DEVICE_USER_CREATE:
4756 		case PORT_DEVICE_USER_LOGIN:
4757 		case PORT_DEVICE_NEW:
4758 		case PORT_DEVICE_REPORTLUN_CHANGED:
4759 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4760 
4761 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4762 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4763 			    cause) == TRUE) {
4764 
4765 				FCP_TGT_TRACE(ptgt, map_tag[i],
4766 				    FCP_TGT_TRACE_2);
4767 				check_finish_init++;
4768 			}
4769 			break;
4770 
4771 		case PORT_DEVICE_OLD:
4772 			if (ptgt != NULL) {
4773 				FCP_TGT_TRACE(ptgt, map_tag[i],
4774 				    FCP_TGT_TRACE_3);
4775 
4776 				mutex_enter(&ptgt->tgt_mutex);
4777 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4778 					/*
4779 					 * Must do an in-line wait for I/Os
4780 					 * to get drained
4781 					 */
4782 					mutex_exit(&ptgt->tgt_mutex);
4783 					mutex_exit(&pptr->port_mutex);
4784 
4785 					mutex_enter(&ptgt->tgt_mutex);
4786 					while (ptgt->tgt_ipkt_cnt ||
4787 					    fcp_outstanding_lun_cmds(ptgt)
4788 					    == FC_SUCCESS) {
4789 						mutex_exit(&ptgt->tgt_mutex);
4790 						delay(drv_usectohz(1000000));
4791 						mutex_enter(&ptgt->tgt_mutex);
4792 					}
4793 					mutex_exit(&ptgt->tgt_mutex);
4794 
4795 					mutex_enter(&pptr->port_mutex);
4796 					mutex_enter(&ptgt->tgt_mutex);
4797 
4798 					(void) fcp_offline_target(pptr, ptgt,
4799 					    link_cnt, map_tag[i], 0, 0);
4800 				}
4801 				mutex_exit(&ptgt->tgt_mutex);
4802 			}
4803 			check_finish_init++;
4804 			break;
4805 
4806 		case PORT_DEVICE_USER_DELETE:
4807 		case PORT_DEVICE_USER_LOGOUT:
4808 			if (ptgt != NULL) {
4809 				FCP_TGT_TRACE(ptgt, map_tag[i],
4810 				    FCP_TGT_TRACE_4);
4811 
4812 				mutex_enter(&ptgt->tgt_mutex);
4813 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4814 					(void) fcp_offline_target(pptr, ptgt,
4815 					    link_cnt, map_tag[i], 1, 0);
4816 				}
4817 				mutex_exit(&ptgt->tgt_mutex);
4818 			}
4819 			check_finish_init++;
4820 			break;
4821 
4822 		case PORT_DEVICE_CHANGED:
4823 			if (ptgt != NULL) {
4824 				FCP_TGT_TRACE(ptgt, map_tag[i],
4825 				    FCP_TGT_TRACE_5);
4826 
4827 				if (fcp_device_changed(pptr, ptgt,
4828 				    map_entry, link_cnt, map_tag[i],
4829 				    cause) == TRUE) {
4830 					check_finish_init++;
4831 				}
4832 			} else {
4833 				if (fcp_handle_mapflags(pptr, ptgt,
4834 				    map_entry, link_cnt, 0, cause) == TRUE) {
4835 					check_finish_init++;
4836 				}
4837 			}
4838 			break;
4839 
4840 		default:
4841 			fcp_log(CE_WARN, pptr->port_dip,
4842 			    "!Invalid map_type=0x%x", map_entry->map_type);
4843 			check_finish_init++;
4844 			break;
4845 		}
4846 	}
4847 
4848 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4849 		ASSERT(i > 0);
4850 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4851 		    map_tag[i-1], cause);
4852 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4853 		fcp_offline_all(pptr, link_cnt, cause);
4854 	}
4855 }
4856 
4857 static int
4858 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4859 {
4860 	struct fcp_lun	*plun;
4861 	struct fcp_port *pptr;
4862 	int		 rscn_count;
4863 	int		 lun0_newalloc;
4864 	int		 ret  = TRUE;
4865 
4866 	ASSERT(ptgt);
4867 	pptr = ptgt->tgt_port;
4868 	lun0_newalloc = 0;
4869 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4870 		/*
4871 		 * no LUN struct for LUN 0 yet exists,
4872 		 * so create one
4873 		 */
4874 		plun = fcp_alloc_lun(ptgt);
4875 		if (plun == NULL) {
4876 			fcp_log(CE_WARN, pptr->port_dip,
4877 			    "!Failed to allocate lun 0 for"
4878 			    " D_ID=%x", ptgt->tgt_d_id);
4879 			return (ret);
4880 		}
4881 		lun0_newalloc = 1;
4882 	}
4883 
4884 	mutex_enter(&ptgt->tgt_mutex);
4885 	/*
4886 	 * consider lun 0 as device not connected if it is
4887 	 * offlined or newly allocated
4888 	 */
4889 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4890 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4891 	}
4892 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4893 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4894 	ptgt->tgt_lun_cnt = 1;
4895 	ptgt->tgt_report_lun_cnt = 0;
4896 	mutex_exit(&ptgt->tgt_mutex);
4897 
4898 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4899 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4900 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4901 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4902 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4903 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4904 		    "to D_ID=%x", ptgt->tgt_d_id);
4905 	} else {
4906 		ret = FALSE;
4907 	}
4908 
4909 	return (ret);
4910 }
4911 
4912 /*
4913  *     Function: fcp_handle_mapflags
4914  *
4915  *  Description: This function creates a target structure if the ptgt passed
4916  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4917  *		 into the target yet or the PRLI if we are logged into the
4918  *		 target already.  The rest of the treatment is done in the
4919  *		 callbacks of the PLOGI or PRLI.
4920  *
4921  *     Argument: *pptr		FCP Port structure.
4922  *		 *ptgt		Target structure.
4923  *		 *map_entry	Array of fc_portmap_t structures.
4924  *		 link_cnt	Link state count.
4925  *		 tgt_cnt	Target state count.
4926  *		 cause		What caused this function to be called.
4927  *
4928  * Return Value: TRUE	Failed
4929  *		 FALSE	Succeeded
4930  *
4931  *	  Notes: pptr->port_mutex must be owned.
4932  */
4933 static int
4934 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4935     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4936 {
4937 	int			lcount;
4938 	int			tcount;
4939 	int			ret = TRUE;
4940 	int			alloc;
4941 	struct fcp_ipkt	*icmd;
4942 	struct fcp_lun	*pseq_lun = NULL;
4943 	uchar_t			opcode;
4944 	int			valid_ptgt_was_passed = FALSE;
4945 
4946 	ASSERT(mutex_owned(&pptr->port_mutex));
4947 
4948 	/*
4949 	 * This case is possible where the FCTL has come up and done discovery
4950 	 * before FCP was loaded and attached. FCTL would have discovered the
4951 	 * devices and later the ULP came online. In this case ULP's would get
4952 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4953 	 */
4954 	if (ptgt == NULL) {
4955 		/* don't already have a target */
4956 		mutex_exit(&pptr->port_mutex);
4957 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4958 		mutex_enter(&pptr->port_mutex);
4959 
4960 		if (ptgt == NULL) {
4961 			fcp_log(CE_WARN, pptr->port_dip,
4962 			    "!FC target allocation failed");
4963 			return (ret);
4964 		}
4965 		mutex_enter(&ptgt->tgt_mutex);
4966 		ptgt->tgt_statec_cause = cause;
4967 		ptgt->tgt_tmp_cnt = 1;
4968 		mutex_exit(&ptgt->tgt_mutex);
4969 	} else {
4970 		valid_ptgt_was_passed = TRUE;
4971 	}
4972 
4973 	/*
4974 	 * Copy in the target parameters
4975 	 */
4976 	mutex_enter(&ptgt->tgt_mutex);
4977 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4978 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4979 	ptgt->tgt_pd_handle = map_entry->map_pd;
4980 	ptgt->tgt_fca_dev = NULL;
4981 
4982 	/* Copy port and node WWNs */
4983 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4984 	    FC_WWN_SIZE);
4985 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4986 	    FC_WWN_SIZE);
4987 
4988 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4989 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4990 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4991 	    valid_ptgt_was_passed) {
4992 		/*
4993 		 * determine if there are any tape LUNs on this target
4994 		 */
4995 		for (pseq_lun = ptgt->tgt_lun;
4996 		    pseq_lun != NULL;
4997 		    pseq_lun = pseq_lun->lun_next) {
4998 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4999 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5000 				fcp_update_tgt_state(ptgt, FCP_RESET,
5001 				    FCP_LUN_MARK);
5002 				mutex_exit(&ptgt->tgt_mutex);
5003 				return (ret);
5004 			}
5005 		}
5006 	}
5007 
5008 	/*
5009 	 * if UA'REPORT_LUN_CHANGED received,
5010 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5011 	 */
5012 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5013 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5014 		mutex_exit(&ptgt->tgt_mutex);
5015 		mutex_exit(&pptr->port_mutex);
5016 
5017 		ret = fcp_handle_reportlun_changed(ptgt, cause);
5018 
5019 		mutex_enter(&pptr->port_mutex);
5020 		return (ret);
5021 	}
5022 
5023 	/*
5024 	 * If ptgt was NULL when this function was entered, then tgt_node_state
5025 	 * was never specifically initialized but zeroed out which means
5026 	 * FCP_TGT_NODE_NONE.
5027 	 */
5028 	switch (ptgt->tgt_node_state) {
5029 	case FCP_TGT_NODE_NONE:
5030 	case FCP_TGT_NODE_ON_DEMAND:
5031 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5032 		    !fcp_enable_auto_configuration &&
5033 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5034 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5035 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5036 		    fcp_enable_auto_configuration &&
5037 		    (ptgt->tgt_manual_config_only == 1) &&
5038 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5039 			/*
5040 			 * If auto configuration is set and
5041 			 * the tgt_manual_config_only flag is set then
5042 			 * we only want the user to be able to change
5043 			 * the state through create_on_demand.
5044 			 */
5045 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5046 		} else {
5047 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5048 		}
5049 		break;
5050 
5051 	case FCP_TGT_NODE_PRESENT:
5052 		break;
5053 	}
5054 	/*
5055 	 * If we are booting from a fabric device, make sure we
5056 	 * mark the node state appropriately for this target to be
5057 	 * enumerated
5058 	 */
5059 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5060 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5061 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5062 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5063 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5064 		}
5065 	}
5066 	mutex_exit(&ptgt->tgt_mutex);
5067 
5068 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5069 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5070 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5071 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5072 	    map_entry->map_rscn_info.ulp_rscn_count);
5073 
5074 	mutex_enter(&ptgt->tgt_mutex);
5075 
5076 	/*
5077 	 * Reset target OFFLINE state and mark the target BUSY
5078 	 */
5079 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5080 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5081 
5082 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5083 	lcount = link_cnt;
5084 
5085 	mutex_exit(&ptgt->tgt_mutex);
5086 	mutex_exit(&pptr->port_mutex);
5087 
5088 	/*
5089 	 * if we are already logged in, then we do a PRLI, else
5090 	 * we do a PLOGI first (to get logged in)
5091 	 *
5092 	 * We will not check if we are the PLOGI initiator
5093 	 */
5094 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5095 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5096 
5097 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5098 
5099 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5100 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5101 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5102 
5103 	if (icmd == NULL) {
5104 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5105 		/*
5106 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5107 		 * we need to make sure we reacquire it before returning.
5108 		 */
5109 		mutex_enter(&pptr->port_mutex);
5110 		return (FALSE);
5111 	}
5112 
5113 	/* TRUE is only returned while target is intended skipped */
5114 	ret = FALSE;
5115 	/* discover info about this target */
5116 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5117 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5118 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5119 	} else {
5120 		fcp_icmd_free(pptr, icmd);
5121 		ret = TRUE;
5122 	}
5123 	mutex_enter(&pptr->port_mutex);
5124 
5125 	return (ret);
5126 }
5127 
5128 /*
5129  *     Function: fcp_send_els
5130  *
5131  *  Description: Sends an ELS to the target specified by the caller.  Supports
5132  *		 PLOGI and PRLI.
5133  *
5134  *     Argument: *pptr		Fcp port.
5135  *		 *ptgt		Target to send the ELS to.
5136  *		 *icmd		Internal packet
5137  *		 opcode		ELS opcode
5138  *		 lcount		Link state change counter
5139  *		 tcount		Target state change counter
5140  *		 cause		What caused the call
5141  *
5142  * Return Value: DDI_SUCCESS
5143  *		 Others
5144  */
5145 static int
5146 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5147     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5148 {
5149 	fc_packet_t		*fpkt;
5150 	fc_frame_hdr_t		*hp;
5151 	int			internal = 0;
5152 	int			alloc;
5153 	int			cmd_len;
5154 	int			resp_len;
5155 	int			res = DDI_FAILURE; /* default result */
5156 	int			rval = DDI_FAILURE;
5157 
5158 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5159 	ASSERT(ptgt->tgt_port == pptr);
5160 
5161 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5162 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5163 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5164 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5165 
5166 	if (opcode == LA_ELS_PLOGI) {
5167 		cmd_len = sizeof (la_els_logi_t);
5168 		resp_len = sizeof (la_els_logi_t);
5169 	} else {
5170 		ASSERT(opcode == LA_ELS_PRLI);
5171 		cmd_len = sizeof (la_els_prli_t);
5172 		resp_len = sizeof (la_els_prli_t);
5173 	}
5174 
5175 	if (icmd == NULL) {
5176 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5177 		    sizeof (la_els_prli_t));
5178 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5179 		    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5180 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5181 		if (icmd == NULL) {
5182 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5183 			return (res);
5184 		}
5185 		internal++;
5186 	}
5187 	fpkt = icmd->ipkt_fpkt;
5188 
5189 	fpkt->pkt_cmdlen = cmd_len;
5190 	fpkt->pkt_rsplen = resp_len;
5191 	fpkt->pkt_datalen = 0;
5192 	icmd->ipkt_retries = 0;
5193 
5194 	/* fill in fpkt info */
5195 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5196 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5197 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5198 
5199 	/* get ptr to frame hdr in fpkt */
5200 	hp = &fpkt->pkt_cmd_fhdr;
5201 
5202 	/*
5203 	 * fill in frame hdr
5204 	 */
5205 	hp->r_ctl = R_CTL_ELS_REQ;
5206 	hp->s_id = pptr->port_id;	/* source ID */
5207 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5208 	hp->type = FC_TYPE_EXTENDED_LS;
5209 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5210 	hp->seq_id = 0;
5211 	hp->rsvd = 0;
5212 	hp->df_ctl  = 0;
5213 	hp->seq_cnt = 0;
5214 	hp->ox_id = 0xffff;		/* i.e. none */
5215 	hp->rx_id = 0xffff;		/* i.e. none */
5216 	hp->ro = 0;
5217 
5218 	/*
5219 	 * at this point we have a filled in cmd pkt
5220 	 *
5221 	 * fill in the respective info, then use the transport to send
5222 	 * the packet
5223 	 *
5224 	 * for a PLOGI call fc_ulp_login(), and
5225 	 * for a PRLI call fc_ulp_issue_els()
5226 	 */
5227 	switch (opcode) {
5228 	case LA_ELS_PLOGI: {
5229 		struct la_els_logi logi;
5230 
5231 		bzero(&logi, sizeof (struct la_els_logi));
5232 
5233 		hp = &fpkt->pkt_cmd_fhdr;
5234 		hp->r_ctl = R_CTL_ELS_REQ;
5235 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5236 		logi.ls_code.mbz = 0;
5237 
5238 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5239 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5240 
5241 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5242 
5243 		mutex_enter(&pptr->port_mutex);
5244 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5245 
5246 			mutex_exit(&pptr->port_mutex);
5247 
5248 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5249 			if (rval == FC_SUCCESS) {
5250 				res = DDI_SUCCESS;
5251 				break;
5252 			}
5253 
5254 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5255 
5256 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5257 			    rval, "PLOGI");
5258 		} else {
5259 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5260 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5261 			    "fcp_send_els1: state change occured"
5262 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5263 			mutex_exit(&pptr->port_mutex);
5264 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5265 		}
5266 		break;
5267 	}
5268 
5269 	case LA_ELS_PRLI: {
5270 		struct la_els_prli	prli;
5271 		struct fcp_prli		*fprli;
5272 
5273 		bzero(&prli, sizeof (struct la_els_prli));
5274 
5275 		hp = &fpkt->pkt_cmd_fhdr;
5276 		hp->r_ctl = R_CTL_ELS_REQ;
5277 
5278 		/* fill in PRLI cmd ELS fields */
5279 		prli.ls_code = LA_ELS_PRLI;
5280 		prli.page_length = 0x10;	/* huh? */
5281 		prli.payload_length = sizeof (struct la_els_prli);
5282 
5283 		icmd->ipkt_opcode = LA_ELS_PRLI;
5284 
5285 		/* get ptr to PRLI service params */
5286 		fprli = (struct fcp_prli *)prli.service_params;
5287 
5288 		/* fill in service params */
5289 		fprli->type = 0x08;
5290 		fprli->resvd1 = 0;
5291 		fprli->orig_process_assoc_valid = 0;
5292 		fprli->resp_process_assoc_valid = 0;
5293 		fprli->establish_image_pair = 1;
5294 		fprli->resvd2 = 0;
5295 		fprli->resvd3 = 0;
5296 		fprli->obsolete_1 = 0;
5297 		fprli->obsolete_2 = 0;
5298 		fprli->data_overlay_allowed = 0;
5299 		fprli->initiator_fn = 1;
5300 		fprli->confirmed_compl_allowed = 1;
5301 
5302 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5303 			fprli->target_fn = 1;
5304 		} else {
5305 			fprli->target_fn = 0;
5306 		}
5307 
5308 		fprli->retry = 1;
5309 		fprli->read_xfer_rdy_disabled = 1;
5310 		fprli->write_xfer_rdy_disabled = 0;
5311 
5312 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5313 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5314 
5315 		/* issue the PRLI request */
5316 
5317 		mutex_enter(&pptr->port_mutex);
5318 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5319 
5320 			mutex_exit(&pptr->port_mutex);
5321 
5322 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5323 			if (rval == FC_SUCCESS) {
5324 				res = DDI_SUCCESS;
5325 				break;
5326 			}
5327 
5328 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5329 
5330 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5331 			    rval, "PRLI");
5332 		} else {
5333 			mutex_exit(&pptr->port_mutex);
5334 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5335 		}
5336 		break;
5337 	}
5338 
5339 	default:
5340 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5341 		break;
5342 	}
5343 
5344 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5345 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5346 	    "fcp_send_els: returning %d", res);
5347 
5348 	if (res != DDI_SUCCESS) {
5349 		if (internal) {
5350 			fcp_icmd_free(pptr, icmd);
5351 		}
5352 	}
5353 
5354 	return (res);
5355 }
5356 
5357 
5358 /*
5359  * called internally update the state of all of the tgts and each LUN
5360  * for this port (i.e. each target  known to be attached to this port)
5361  * if they are not already offline
5362  *
5363  * must be called with the port mutex owned
5364  *
5365  * acquires and releases the target mutexes for each target attached
5366  * to this port
5367  */
5368 void
5369 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5370 {
5371 	int i;
5372 	struct fcp_tgt *ptgt;
5373 
5374 	ASSERT(mutex_owned(&pptr->port_mutex));
5375 
5376 	for (i = 0; i < FCP_NUM_HASH; i++) {
5377 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5378 		    ptgt = ptgt->tgt_next) {
5379 			mutex_enter(&ptgt->tgt_mutex);
5380 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5381 			ptgt->tgt_change_cnt++;
5382 			ptgt->tgt_statec_cause = cause;
5383 			ptgt->tgt_tmp_cnt = 1;
5384 			ptgt->tgt_done = 0;
5385 			mutex_exit(&ptgt->tgt_mutex);
5386 		}
5387 	}
5388 }
5389 
5390 
5391 static void
5392 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5393 {
5394 	int i;
5395 	int ndevs;
5396 	struct fcp_tgt *ptgt;
5397 
5398 	ASSERT(mutex_owned(&pptr->port_mutex));
5399 
5400 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5401 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5402 		    ptgt = ptgt->tgt_next) {
5403 			ndevs++;
5404 		}
5405 	}
5406 
5407 	if (ndevs == 0) {
5408 		return;
5409 	}
5410 	pptr->port_tmp_cnt = ndevs;
5411 
5412 	for (i = 0; i < FCP_NUM_HASH; i++) {
5413 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5414 		    ptgt = ptgt->tgt_next) {
5415 			(void) fcp_call_finish_init_held(pptr, ptgt,
5416 			    lcount, ptgt->tgt_change_cnt, cause);
5417 		}
5418 	}
5419 }
5420 
5421 /*
5422  *     Function: fcp_update_tgt_state
5423  *
5424  *  Description: This function updates the field tgt_state of a target.	 That
5425  *		 field is a bitmap and which bit can be set or reset
5426  *		 individually.	The action applied to the target state is also
5427  *		 applied to all the LUNs belonging to the target (provided the
5428  *		 LUN is not offline).  A side effect of applying the state
5429  *		 modification to the target and the LUNs is the field tgt_trace
5430  *		 of the target and lun_trace of the LUNs is set to zero.
5431  *
5432  *
5433  *     Argument: *ptgt	Target structure.
5434  *		 flag	Flag indication what action to apply (set/reset).
5435  *		 state	State bits to update.
5436  *
5437  * Return Value: None
5438  *
5439  *	Context: Interrupt, Kernel or User context.
5440  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5441  *		 calling this function.
5442  */
5443 void
5444 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5445 {
5446 	struct fcp_lun *plun;
5447 
5448 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5449 
5450 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5451 		/* The target is not offline. */
5452 		if (flag == FCP_SET) {
5453 			ptgt->tgt_state |= state;
5454 			ptgt->tgt_trace = 0;
5455 		} else {
5456 			ptgt->tgt_state &= ~state;
5457 		}
5458 
5459 		for (plun = ptgt->tgt_lun; plun != NULL;
5460 		    plun = plun->lun_next) {
5461 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5462 				/* The LUN is not offline. */
5463 				if (flag == FCP_SET) {
5464 					plun->lun_state |= state;
5465 					plun->lun_trace = 0;
5466 				} else {
5467 					plun->lun_state &= ~state;
5468 				}
5469 			}
5470 		}
5471 	}
5472 }
5473 
5474 /*
5475  *     Function: fcp_update_tgt_state
5476  *
5477  *  Description: This function updates the field lun_state of a LUN.  That
5478  *		 field is a bitmap and which bit can be set or reset
5479  *		 individually.
5480  *
5481  *     Argument: *plun	LUN structure.
5482  *		 flag	Flag indication what action to apply (set/reset).
5483  *		 state	State bits to update.
5484  *
5485  * Return Value: None
5486  *
5487  *	Context: Interrupt, Kernel or User context.
5488  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5489  *		 calling this function.
5490  */
5491 void
5492 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5493 {
5494 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5495 
5496 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5497 
5498 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5499 		if (flag == FCP_SET) {
5500 			plun->lun_state |= state;
5501 		} else {
5502 			plun->lun_state &= ~state;
5503 		}
5504 	}
5505 }
5506 
5507 /*
5508  *     Function: fcp_get_port
5509  *
5510  *  Description: This function returns the fcp_port structure from the opaque
5511  *		 handle passed by the caller.  That opaque handle is the handle
5512  *		 used by fp/fctl to identify a particular local port.  That
5513  *		 handle has been stored in the corresponding fcp_port
5514  *		 structure.  This function is going to walk the global list of
5515  *		 fcp_port structures till one has a port_fp_handle that matches
5516  *		 the handle passed by the caller.  This function enters the
5517  *		 mutex fcp_global_mutex while walking the global list and then
5518  *		 releases it.
5519  *
5520  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5521  *				particular port.
5522  *
5523  * Return Value: NULL		Not found.
5524  *		 Not NULL	Pointer to the fcp_port structure.
5525  *
5526  *	Context: Interrupt, Kernel or User context.
5527  */
5528 static struct fcp_port *
5529 fcp_get_port(opaque_t port_handle)
5530 {
5531 	struct fcp_port *pptr;
5532 
5533 	ASSERT(port_handle != NULL);
5534 
5535 	mutex_enter(&fcp_global_mutex);
5536 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5537 		if (pptr->port_fp_handle == port_handle) {
5538 			break;
5539 		}
5540 	}
5541 	mutex_exit(&fcp_global_mutex);
5542 
5543 	return (pptr);
5544 }
5545 
5546 
5547 static void
5548 fcp_unsol_callback(fc_packet_t *fpkt)
5549 {
5550 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5551 	struct fcp_port *pptr = icmd->ipkt_port;
5552 
5553 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5554 		caddr_t state, reason, action, expln;
5555 
5556 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5557 		    &action, &expln);
5558 
5559 		fcp_log(CE_WARN, pptr->port_dip,
5560 		    "!couldn't post response to unsolicited request: "
5561 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5562 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5563 		    fpkt->pkt_cmd_fhdr.rx_id);
5564 	}
5565 	fcp_icmd_free(pptr, icmd);
5566 }
5567 
5568 
5569 /*
5570  * Perform general purpose preparation of a response to an unsolicited request
5571  */
5572 static void
5573 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5574     uchar_t r_ctl, uchar_t type)
5575 {
5576 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5577 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5578 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5579 	pkt->pkt_cmd_fhdr.type = type;
5580 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5581 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5582 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5583 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5584 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5585 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5586 	pkt->pkt_cmd_fhdr.ro = 0;
5587 	pkt->pkt_cmd_fhdr.rsvd = 0;
5588 	pkt->pkt_comp = fcp_unsol_callback;
5589 	pkt->pkt_pd = NULL;
5590 	pkt->pkt_ub_resp_token = (opaque_t)buf;
5591 }
5592 
5593 
5594 /*ARGSUSED*/
5595 static int
5596 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5597 {
5598 	fc_packet_t		*fpkt;
5599 	struct la_els_prli	prli;
5600 	struct fcp_prli		*fprli;
5601 	struct fcp_ipkt	*icmd;
5602 	struct la_els_prli	*from;
5603 	struct fcp_prli		*orig;
5604 	struct fcp_tgt	*ptgt;
5605 	int			tcount = 0;
5606 	int			lcount;
5607 
5608 	from = (struct la_els_prli *)buf->ub_buffer;
5609 	orig = (struct fcp_prli *)from->service_params;
5610 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5611 	    NULL) {
5612 		mutex_enter(&ptgt->tgt_mutex);
5613 		tcount = ptgt->tgt_change_cnt;
5614 		mutex_exit(&ptgt->tgt_mutex);
5615 	}
5616 
5617 	mutex_enter(&pptr->port_mutex);
5618 	lcount = pptr->port_link_cnt;
5619 	mutex_exit(&pptr->port_mutex);
5620 
5621 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5622 	    sizeof (la_els_prli_t), 0,
5623 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5624 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5625 		return (FC_FAILURE);
5626 	}
5627 
5628 	fpkt = icmd->ipkt_fpkt;
5629 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5630 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5631 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5632 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5633 	fpkt->pkt_rsplen = 0;
5634 	fpkt->pkt_datalen = 0;
5635 
5636 	icmd->ipkt_opcode = LA_ELS_PRLI;
5637 
5638 	bzero(&prli, sizeof (struct la_els_prli));
5639 	fprli = (struct fcp_prli *)prli.service_params;
5640 	prli.ls_code = LA_ELS_ACC;
5641 	prli.page_length = 0x10;
5642 	prli.payload_length = sizeof (struct la_els_prli);
5643 
5644 	/* fill in service params */
5645 	fprli->type = 0x08;
5646 	fprli->resvd1 = 0;
5647 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5648 	fprli->orig_process_associator = orig->orig_process_associator;
5649 	fprli->resp_process_assoc_valid = 0;
5650 	fprli->establish_image_pair = 1;
5651 	fprli->resvd2 = 0;
5652 	fprli->resvd3 = 0;
5653 	fprli->obsolete_1 = 0;
5654 	fprli->obsolete_2 = 0;
5655 	fprli->data_overlay_allowed = 0;
5656 	fprli->initiator_fn = 1;
5657 	fprli->confirmed_compl_allowed = 1;
5658 
5659 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5660 		fprli->target_fn = 1;
5661 	} else {
5662 		fprli->target_fn = 0;
5663 	}
5664 
5665 	fprli->retry = 1;
5666 	fprli->read_xfer_rdy_disabled = 1;
5667 	fprli->write_xfer_rdy_disabled = 0;
5668 
5669 	/* save the unsol prli payload first */
5670 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5671 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5672 
5673 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5674 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5675 
5676 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5677 
5678 	mutex_enter(&pptr->port_mutex);
5679 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5680 		int rval;
5681 		mutex_exit(&pptr->port_mutex);
5682 
5683 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5684 		    FC_SUCCESS) {
5685 			if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5686 			    ptgt != NULL) {
5687 				fcp_queue_ipkt(pptr, fpkt);
5688 				return (FC_SUCCESS);
5689 			}
5690 			/* Let it timeout */
5691 			fcp_icmd_free(pptr, icmd);
5692 			return (FC_FAILURE);
5693 		}
5694 	} else {
5695 		mutex_exit(&pptr->port_mutex);
5696 		fcp_icmd_free(pptr, icmd);
5697 		return (FC_FAILURE);
5698 	}
5699 
5700 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5701 
5702 	return (FC_SUCCESS);
5703 }
5704 
5705 /*
5706  *     Function: fcp_icmd_alloc
5707  *
5708  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5709  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5710  *		 modified by the caller (such as fcp_send_scsi).  The
5711  *		 structure is also tied to the state of the line and of the
5712  *		 target at a particular time.  That link is established by
5713  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5714  *		 and tcount which came respectively from pptr->link_cnt and
5715  *		 ptgt->tgt_change_cnt.
5716  *
5717  *     Argument: *pptr		Fcp port.
5718  *		 *ptgt		Target (destination of the command).
5719  *		 cmd_len	Length of the command.
5720  *		 resp_len	Length of the expected response.
5721  *		 data_len	Length of the data.
5722  *		 nodma		Indicates weither the command and response.
5723  *				will be transfer through DMA or not.
5724  *		 lcount		Link state change counter.
5725  *		 tcount		Target state change counter.
5726  *		 cause		Reason that lead to this call.
5727  *
5728  * Return Value: NULL		Failed.
5729  *		 Not NULL	Internal packet address.
5730  */
5731 static struct fcp_ipkt *
5732 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5733     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5734     uint32_t rscn_count)
5735 {
5736 	int			dma_setup = 0;
5737 	fc_packet_t		*fpkt;
5738 	struct fcp_ipkt	*icmd = NULL;
5739 
5740 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5741 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5742 	    KM_NOSLEEP);
5743 	if (icmd == NULL) {
5744 		fcp_log(CE_WARN, pptr->port_dip,
5745 		    "!internal packet allocation failed");
5746 		return (NULL);
5747 	}
5748 
5749 	/*
5750 	 * initialize the allocated packet
5751 	 */
5752 	icmd->ipkt_nodma = nodma;
5753 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5754 	icmd->ipkt_lun = NULL;
5755 
5756 	icmd->ipkt_link_cnt = lcount;
5757 	icmd->ipkt_change_cnt = tcount;
5758 	icmd->ipkt_cause = cause;
5759 
5760 	mutex_enter(&pptr->port_mutex);
5761 	icmd->ipkt_port = pptr;
5762 	mutex_exit(&pptr->port_mutex);
5763 
5764 	/* keep track of amt of data to be sent in pkt */
5765 	icmd->ipkt_cmdlen = cmd_len;
5766 	icmd->ipkt_resplen = resp_len;
5767 	icmd->ipkt_datalen = data_len;
5768 
5769 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5770 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5771 
5772 	/* set pkt's private ptr to point to cmd pkt */
5773 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5774 
5775 	/* set FCA private ptr to memory just beyond */
5776 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5777 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5778 	    pptr->port_dmacookie_sz);
5779 
5780 	/* get ptr to fpkt substruct and fill it in */
5781 	fpkt = icmd->ipkt_fpkt;
5782 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5783 	    sizeof (struct fcp_ipkt));
5784 
5785 	if (ptgt != NULL) {
5786 		icmd->ipkt_tgt = ptgt;
5787 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5788 	}
5789 
5790 	fpkt->pkt_comp = fcp_icmd_callback;
5791 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5792 	fpkt->pkt_cmdlen = cmd_len;
5793 	fpkt->pkt_rsplen = resp_len;
5794 	fpkt->pkt_datalen = data_len;
5795 
5796 	/*
5797 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5798 	 * rscn_count as fcp knows down to the transport. If a valid count was
5799 	 * passed into this function, we allocate memory to actually pass down
5800 	 * this info.
5801 	 *
5802 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5803 	 * basically mean that fcp will not be able to help transport
5804 	 * distinguish if a new RSCN has come after fcp was last informed about
5805 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5806 	 * 5068068 where the device might end up going offline in case of RSCN
5807 	 * storms.
5808 	 */
5809 	fpkt->pkt_ulp_rscn_infop = NULL;
5810 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5811 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5812 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5813 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5814 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5815 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5816 			    "Failed to alloc memory to pass rscn info");
5817 		}
5818 	}
5819 
5820 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5821 		fc_ulp_rscn_info_t	*rscnp;
5822 
5823 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5824 		rscnp->ulp_rscn_count = rscn_count;
5825 	}
5826 
5827 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5828 		goto fail;
5829 	}
5830 	dma_setup++;
5831 
5832 	/*
5833 	 * Must hold target mutex across setting of pkt_pd and call to
5834 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5835 	 * away while we're not looking.
5836 	 */
5837 	if (ptgt != NULL) {
5838 		mutex_enter(&ptgt->tgt_mutex);
5839 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5840 
5841 		/* ask transport to do its initialization on this pkt */
5842 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5843 		    != FC_SUCCESS) {
5844 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5845 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5846 			    "fc_ulp_init_packet failed");
5847 			mutex_exit(&ptgt->tgt_mutex);
5848 			goto fail;
5849 		}
5850 		mutex_exit(&ptgt->tgt_mutex);
5851 	} else {
5852 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5853 		    != FC_SUCCESS) {
5854 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5855 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5856 			    "fc_ulp_init_packet failed");
5857 			goto fail;
5858 		}
5859 	}
5860 
5861 	mutex_enter(&pptr->port_mutex);
5862 	if (pptr->port_state & (FCP_STATE_DETACHING |
5863 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5864 		int rval;
5865 
5866 		mutex_exit(&pptr->port_mutex);
5867 
5868 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5869 		ASSERT(rval == FC_SUCCESS);
5870 
5871 		goto fail;
5872 	}
5873 
5874 	if (ptgt != NULL) {
5875 		mutex_enter(&ptgt->tgt_mutex);
5876 		ptgt->tgt_ipkt_cnt++;
5877 		mutex_exit(&ptgt->tgt_mutex);
5878 	}
5879 
5880 	pptr->port_ipkt_cnt++;
5881 
5882 	mutex_exit(&pptr->port_mutex);
5883 
5884 	return (icmd);
5885 
5886 fail:
5887 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5888 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5889 		    sizeof (fc_ulp_rscn_info_t));
5890 		fpkt->pkt_ulp_rscn_infop = NULL;
5891 	}
5892 
5893 	if (dma_setup) {
5894 		fcp_free_dma(pptr, icmd);
5895 	}
5896 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5897 	    (size_t)pptr->port_dmacookie_sz);
5898 
5899 	return (NULL);
5900 }
5901 
5902 /*
5903  *     Function: fcp_icmd_free
5904  *
5905  *  Description: Frees the internal command passed by the caller.
5906  *
5907  *     Argument: *pptr		Fcp port.
5908  *		 *icmd		Internal packet to free.
5909  *
5910  * Return Value: None
5911  */
5912 static void
5913 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5914 {
5915 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5916 
5917 	/* Let the underlying layers do their cleanup. */
5918 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5919 	    icmd->ipkt_fpkt);
5920 
5921 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5922 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5923 		    sizeof (fc_ulp_rscn_info_t));
5924 	}
5925 
5926 	fcp_free_dma(pptr, icmd);
5927 
5928 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5929 	    (size_t)pptr->port_dmacookie_sz);
5930 
5931 	mutex_enter(&pptr->port_mutex);
5932 
5933 	if (ptgt) {
5934 		mutex_enter(&ptgt->tgt_mutex);
5935 		ptgt->tgt_ipkt_cnt--;
5936 		mutex_exit(&ptgt->tgt_mutex);
5937 	}
5938 
5939 	pptr->port_ipkt_cnt--;
5940 	mutex_exit(&pptr->port_mutex);
5941 }
5942 
5943 /*
5944  *     Function: fcp_alloc_dma
5945  *
5946  *  Description: Allocated the DMA resources required for the internal
5947  *		 packet.
5948  *
5949  *     Argument: *pptr	FCP port.
5950  *		 *icmd	Internal FCP packet.
5951  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5952  *		 flags	Allocation flags (Sleep or NoSleep).
5953  *
5954  * Return Value: FC_SUCCESS
5955  *		 FC_NOMEM
5956  */
5957 static int
5958 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5959     int nodma, int flags)
5960 {
5961 	int		rval;
5962 	size_t		real_size;
5963 	uint_t		ccount;
5964 	int		bound = 0;
5965 	int		cmd_resp = 0;
5966 	fc_packet_t	*fpkt;
5967 	ddi_dma_cookie_t	pkt_data_cookie;
5968 	ddi_dma_cookie_t	*cp;
5969 	uint32_t		cnt;
5970 
5971 	fpkt = &icmd->ipkt_fc_packet;
5972 
5973 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5974 	    fpkt->pkt_resp_dma == NULL);
5975 
5976 	icmd->ipkt_nodma = nodma;
5977 
5978 	if (nodma) {
5979 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5980 		if (fpkt->pkt_cmd == NULL) {
5981 			goto fail;
5982 		}
5983 
5984 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5985 		if (fpkt->pkt_resp == NULL) {
5986 			goto fail;
5987 		}
5988 	} else {
5989 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5990 
5991 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5992 		if (rval == FC_FAILURE) {
5993 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5994 			    fpkt->pkt_resp_dma == NULL);
5995 			goto fail;
5996 		}
5997 		cmd_resp++;
5998 	}
5999 
6000 	if ((fpkt->pkt_datalen != 0) &&
6001 	    !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6002 		/*
6003 		 * set up DMA handle and memory for the data in this packet
6004 		 */
6005 		if (ddi_dma_alloc_handle(pptr->port_dip,
6006 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6007 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6008 			goto fail;
6009 		}
6010 
6011 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6012 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6013 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6014 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6015 			goto fail;
6016 		}
6017 
6018 		/* was DMA mem size gotten < size asked for/needed ?? */
6019 		if (real_size < fpkt->pkt_datalen) {
6020 			goto fail;
6021 		}
6022 
6023 		/* bind DMA address and handle together */
6024 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6025 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6026 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6027 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6028 			goto fail;
6029 		}
6030 		bound++;
6031 
6032 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6033 			goto fail;
6034 		}
6035 
6036 		fpkt->pkt_data_cookie_cnt = ccount;
6037 
6038 		cp = fpkt->pkt_data_cookie;
6039 		*cp = pkt_data_cookie;
6040 		cp++;
6041 
6042 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
6043 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
6044 			    &pkt_data_cookie);
6045 			*cp = pkt_data_cookie;
6046 		}
6047 
6048 	} else if (fpkt->pkt_datalen != 0) {
6049 		/*
6050 		 * If it's a pseudo FCA, then it can't support DMA even in
6051 		 * SCSI data phase.
6052 		 */
6053 		fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6054 		if (fpkt->pkt_data == NULL) {
6055 			goto fail;
6056 		}
6057 
6058 	}
6059 
6060 	return (FC_SUCCESS);
6061 
6062 fail:
6063 	if (bound) {
6064 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6065 	}
6066 
6067 	if (fpkt->pkt_data_dma) {
6068 		if (fpkt->pkt_data) {
6069 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6070 		}
6071 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6072 	} else {
6073 		if (fpkt->pkt_data) {
6074 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6075 		}
6076 	}
6077 
6078 	if (nodma) {
6079 		if (fpkt->pkt_cmd) {
6080 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6081 		}
6082 		if (fpkt->pkt_resp) {
6083 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6084 		}
6085 	} else {
6086 		if (cmd_resp) {
6087 			fcp_free_cmd_resp(pptr, fpkt);
6088 		}
6089 	}
6090 
6091 	return (FC_NOMEM);
6092 }
6093 
6094 
6095 static void
6096 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6097 {
6098 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6099 
6100 	if (fpkt->pkt_data_dma) {
6101 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6102 		if (fpkt->pkt_data) {
6103 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6104 		}
6105 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6106 	} else {
6107 		if (fpkt->pkt_data) {
6108 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6109 		}
6110 		/*
6111 		 * Need we reset pkt_* to zero???
6112 		 */
6113 	}
6114 
6115 	if (icmd->ipkt_nodma) {
6116 		if (fpkt->pkt_cmd) {
6117 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6118 		}
6119 		if (fpkt->pkt_resp) {
6120 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6121 		}
6122 	} else {
6123 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6124 
6125 		fcp_free_cmd_resp(pptr, fpkt);
6126 	}
6127 }
6128 
6129 /*
6130  *     Function: fcp_lookup_target
6131  *
6132  *  Description: Finds a target given a WWN.
6133  *
6134  *     Argument: *pptr	FCP port.
6135  *		 *wwn	World Wide Name of the device to look for.
6136  *
6137  * Return Value: NULL		No target found
6138  *		 Not NULL	Target structure
6139  *
6140  *	Context: Interrupt context.
6141  *		 The mutex pptr->port_mutex must be owned.
6142  */
6143 /* ARGSUSED */
6144 static struct fcp_tgt *
6145 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6146 {
6147 	int			hash;
6148 	struct fcp_tgt	*ptgt;
6149 
6150 	ASSERT(mutex_owned(&pptr->port_mutex));
6151 
6152 	hash = FCP_HASH(wwn);
6153 
6154 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6155 	    ptgt = ptgt->tgt_next) {
6156 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6157 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6158 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6159 			break;
6160 		}
6161 	}
6162 
6163 	return (ptgt);
6164 }
6165 
6166 
6167 /*
6168  * Find target structure given a port identifier
6169  */
6170 static struct fcp_tgt *
6171 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6172 {
6173 	fc_portid_t		port_id;
6174 	la_wwn_t		pwwn;
6175 	struct fcp_tgt	*ptgt = NULL;
6176 
6177 	port_id.priv_lilp_posit = 0;
6178 	port_id.port_id = d_id;
6179 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6180 	    &pwwn) == FC_SUCCESS) {
6181 		mutex_enter(&pptr->port_mutex);
6182 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6183 		mutex_exit(&pptr->port_mutex);
6184 	}
6185 
6186 	return (ptgt);
6187 }
6188 
6189 
6190 /*
6191  * the packet completion callback routine for info cmd pkts
6192  *
6193  * this means fpkt pts to a response to either a PLOGI or a PRLI
6194  *
6195  * if there is an error an attempt is made to call a routine to resend
6196  * the command that failed
6197  */
6198 static void
6199 fcp_icmd_callback(fc_packet_t *fpkt)
6200 {
6201 	struct fcp_ipkt	*icmd;
6202 	struct fcp_port	*pptr;
6203 	struct fcp_tgt	*ptgt;
6204 	struct la_els_prli	*prli;
6205 	struct la_els_prli	prli_s;
6206 	struct fcp_prli		*fprli;
6207 	struct fcp_lun	*plun;
6208 	int		free_pkt = 1;
6209 	int		rval;
6210 	ls_code_t	resp;
6211 	uchar_t		prli_acc = 0;
6212 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6213 	int		lun0_newalloc;
6214 
6215 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6216 
6217 	/* get ptrs to the port and target structs for the cmd */
6218 	pptr = icmd->ipkt_port;
6219 	ptgt = icmd->ipkt_tgt;
6220 
6221 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6222 
6223 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6224 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6225 		    sizeof (prli_s));
6226 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6227 	}
6228 
6229 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6230 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6231 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6232 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6233 	    ptgt->tgt_d_id);
6234 
6235 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6236 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6237 
6238 		mutex_enter(&ptgt->tgt_mutex);
6239 		if (ptgt->tgt_pd_handle == NULL) {
6240 			/*
6241 			 * in a fabric environment the port device handles
6242 			 * get created only after successful LOGIN into the
6243 			 * transport, so the transport makes this port
6244 			 * device (pd) handle available in this packet, so
6245 			 * save it now
6246 			 */
6247 			ASSERT(fpkt->pkt_pd != NULL);
6248 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6249 		}
6250 		mutex_exit(&ptgt->tgt_mutex);
6251 
6252 		/* which ELS cmd is this response for ?? */
6253 		switch (icmd->ipkt_opcode) {
6254 		case LA_ELS_PLOGI:
6255 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6256 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6257 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6258 			    ptgt->tgt_d_id,
6259 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6260 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6261 
6262 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6263 			    FCP_TGT_TRACE_15);
6264 
6265 			/* Note that we are not allocating a new icmd */
6266 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6267 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6268 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6269 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6270 				    FCP_TGT_TRACE_16);
6271 				goto fail;
6272 			}
6273 			break;
6274 
6275 		case LA_ELS_PRLI:
6276 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6277 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6278 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6279 
6280 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6281 			    FCP_TGT_TRACE_17);
6282 
6283 			prli = &prli_s;
6284 
6285 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6286 			    sizeof (prli_s));
6287 
6288 			fprli = (struct fcp_prli *)prli->service_params;
6289 
6290 			mutex_enter(&ptgt->tgt_mutex);
6291 			ptgt->tgt_icap = fprli->initiator_fn;
6292 			ptgt->tgt_tcap = fprli->target_fn;
6293 			mutex_exit(&ptgt->tgt_mutex);
6294 
6295 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6296 				/*
6297 				 * this FCP device does not support target mode
6298 				 */
6299 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6300 				    FCP_TGT_TRACE_18);
6301 				goto fail;
6302 			}
6303 			if (fprli->retry == 1) {
6304 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6305 				    &ptgt->tgt_port_wwn);
6306 			}
6307 
6308 			/* target is no longer offline */
6309 			mutex_enter(&pptr->port_mutex);
6310 			mutex_enter(&ptgt->tgt_mutex);
6311 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6312 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6313 				    FCP_TGT_MARK);
6314 			} else {
6315 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6316 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6317 				    "fcp_icmd_callback,1: state change "
6318 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6319 				mutex_exit(&ptgt->tgt_mutex);
6320 				mutex_exit(&pptr->port_mutex);
6321 				goto fail;
6322 			}
6323 			mutex_exit(&ptgt->tgt_mutex);
6324 			mutex_exit(&pptr->port_mutex);
6325 
6326 			/*
6327 			 * lun 0 should always respond to inquiry, so
6328 			 * get the LUN struct for LUN 0
6329 			 *
6330 			 * Currently we deal with first level of addressing.
6331 			 * If / when we start supporting 0x device types
6332 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6333 			 * this logic will need revisiting.
6334 			 */
6335 			lun0_newalloc = 0;
6336 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6337 				/*
6338 				 * no LUN struct for LUN 0 yet exists,
6339 				 * so create one
6340 				 */
6341 				plun = fcp_alloc_lun(ptgt);
6342 				if (plun == NULL) {
6343 					fcp_log(CE_WARN, pptr->port_dip,
6344 					    "!Failed to allocate lun 0 for"
6345 					    " D_ID=%x", ptgt->tgt_d_id);
6346 					goto fail;
6347 				}
6348 				lun0_newalloc = 1;
6349 			}
6350 
6351 			/* fill in LUN info */
6352 			mutex_enter(&ptgt->tgt_mutex);
6353 			/*
6354 			 * consider lun 0 as device not connected if it is
6355 			 * offlined or newly allocated
6356 			 */
6357 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6358 			    lun0_newalloc) {
6359 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6360 			}
6361 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6362 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6363 			ptgt->tgt_lun_cnt = 1;
6364 			ptgt->tgt_report_lun_cnt = 0;
6365 			mutex_exit(&ptgt->tgt_mutex);
6366 
6367 			/* Retrieve the rscn count (if a valid one exists) */
6368 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6369 				rscn_count = ((fc_ulp_rscn_info_t *)
6370 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6371 				    ->ulp_rscn_count;
6372 			} else {
6373 				rscn_count = FC_INVALID_RSCN_COUNT;
6374 			}
6375 
6376 			/* send Report Lun request to target */
6377 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6378 			    sizeof (struct fcp_reportlun_resp),
6379 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6380 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6381 				mutex_enter(&pptr->port_mutex);
6382 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6383 					fcp_log(CE_WARN, pptr->port_dip,
6384 					    "!Failed to send REPORT LUN to"
6385 					    "  D_ID=%x", ptgt->tgt_d_id);
6386 				} else {
6387 					FCP_TRACE(fcp_logq,
6388 					    pptr->port_instbuf, fcp_trace,
6389 					    FCP_BUF_LEVEL_5, 0,
6390 					    "fcp_icmd_callback,2:state change"
6391 					    " occured for D_ID=0x%x",
6392 					    ptgt->tgt_d_id);
6393 				}
6394 				mutex_exit(&pptr->port_mutex);
6395 
6396 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6397 				    FCP_TGT_TRACE_19);
6398 
6399 				goto fail;
6400 			} else {
6401 				free_pkt = 0;
6402 				fcp_icmd_free(pptr, icmd);
6403 			}
6404 			break;
6405 
6406 		default:
6407 			fcp_log(CE_WARN, pptr->port_dip,
6408 			    "!fcp_icmd_callback Invalid opcode");
6409 			goto fail;
6410 		}
6411 
6412 		return;
6413 	}
6414 
6415 
6416 	/*
6417 	 * Other PLOGI failures are not retried as the
6418 	 * transport does it already
6419 	 */
6420 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6421 		if (fcp_is_retryable(icmd) &&
6422 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6423 
6424 			if (FCP_MUST_RETRY(fpkt)) {
6425 				fcp_queue_ipkt(pptr, fpkt);
6426 				return;
6427 			}
6428 
6429 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6430 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6431 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6432 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6433 			    fpkt->pkt_reason);
6434 
6435 			/*
6436 			 * Retry by recalling the routine that
6437 			 * originally queued this packet
6438 			 */
6439 			mutex_enter(&pptr->port_mutex);
6440 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6441 				caddr_t msg;
6442 
6443 				mutex_exit(&pptr->port_mutex);
6444 
6445 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6446 
6447 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6448 					fpkt->pkt_timeout +=
6449 					    FCP_TIMEOUT_DELTA;
6450 				}
6451 
6452 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6453 				    fpkt);
6454 				if (rval == FC_SUCCESS) {
6455 					return;
6456 				}
6457 
6458 				if (rval == FC_STATEC_BUSY ||
6459 				    rval == FC_OFFLINE) {
6460 					fcp_queue_ipkt(pptr, fpkt);
6461 					return;
6462 				}
6463 				(void) fc_ulp_error(rval, &msg);
6464 
6465 				fcp_log(CE_NOTE, pptr->port_dip,
6466 				    "!ELS 0x%x failed to d_id=0x%x;"
6467 				    " %s", icmd->ipkt_opcode,
6468 				    ptgt->tgt_d_id, msg);
6469 			} else {
6470 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6471 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6472 				    "fcp_icmd_callback,3: state change "
6473 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6474 				mutex_exit(&pptr->port_mutex);
6475 			}
6476 		}
6477 	} else {
6478 		if (fcp_is_retryable(icmd) &&
6479 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6480 			if (FCP_MUST_RETRY(fpkt)) {
6481 				fcp_queue_ipkt(pptr, fpkt);
6482 				return;
6483 			}
6484 		}
6485 		mutex_enter(&pptr->port_mutex);
6486 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6487 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6488 			mutex_exit(&pptr->port_mutex);
6489 			fcp_print_error(fpkt);
6490 		} else {
6491 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6492 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6493 			    "fcp_icmd_callback,4: state change occured"
6494 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6495 			mutex_exit(&pptr->port_mutex);
6496 		}
6497 	}
6498 
6499 fail:
6500 	if (free_pkt) {
6501 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6502 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6503 		fcp_icmd_free(pptr, icmd);
6504 	}
6505 }
6506 
6507 
6508 /*
6509  * called internally to send an info cmd using the transport
6510  *
6511  * sends either an INQ or a REPORT_LUN
6512  *
6513  * when the packet is completed fcp_scsi_callback is called
6514  */
6515 static int
6516 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6517     int lcount, int tcount, int cause, uint32_t rscn_count)
6518 {
6519 	int			nodma;
6520 	struct fcp_ipkt		*icmd;
6521 	struct fcp_tgt		*ptgt;
6522 	struct fcp_port		*pptr;
6523 	fc_frame_hdr_t		*hp;
6524 	fc_packet_t		*fpkt;
6525 	struct fcp_cmd		fcp_cmd;
6526 	struct fcp_cmd		*fcmd;
6527 	union scsi_cdb		*scsi_cdb;
6528 
6529 	ASSERT(plun != NULL);
6530 
6531 	ptgt = plun->lun_tgt;
6532 	ASSERT(ptgt != NULL);
6533 
6534 	pptr = ptgt->tgt_port;
6535 	ASSERT(pptr != NULL);
6536 
6537 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6538 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6539 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6540 
6541 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6542 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6543 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6544 	    rscn_count);
6545 
6546 	if (icmd == NULL) {
6547 		return (DDI_FAILURE);
6548 	}
6549 
6550 	fpkt = icmd->ipkt_fpkt;
6551 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6552 	icmd->ipkt_retries = 0;
6553 	icmd->ipkt_opcode = opcode;
6554 	icmd->ipkt_lun = plun;
6555 
6556 	if (nodma) {
6557 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6558 	} else {
6559 		fcmd = &fcp_cmd;
6560 	}
6561 	bzero(fcmd, sizeof (struct fcp_cmd));
6562 
6563 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6564 
6565 	hp = &fpkt->pkt_cmd_fhdr;
6566 
6567 	hp->s_id = pptr->port_id;
6568 	hp->d_id = ptgt->tgt_d_id;
6569 	hp->r_ctl = R_CTL_COMMAND;
6570 	hp->type = FC_TYPE_SCSI_FCP;
6571 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6572 	hp->rsvd = 0;
6573 	hp->seq_id = 0;
6574 	hp->seq_cnt = 0;
6575 	hp->ox_id = 0xffff;
6576 	hp->rx_id = 0xffff;
6577 	hp->ro = 0;
6578 
6579 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6580 
6581 	/*
6582 	 * Request SCSI target for expedited processing
6583 	 */
6584 
6585 	/*
6586 	 * Set up for untagged queuing because we do not
6587 	 * know if the fibre device supports queuing.
6588 	 */
6589 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6590 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6591 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6592 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6593 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6594 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6595 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6596 
6597 	switch (opcode) {
6598 	case SCMD_INQUIRY_PAGE83:
6599 		/*
6600 		 * Prepare to get the Inquiry VPD page 83 information
6601 		 */
6602 		fcmd->fcp_cntl.cntl_read_data = 1;
6603 		fcmd->fcp_cntl.cntl_write_data = 0;
6604 		fcmd->fcp_data_len = alloc_len;
6605 
6606 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6607 		fpkt->pkt_comp = fcp_scsi_callback;
6608 
6609 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6610 		scsi_cdb->g0_addr2 = 0x01;
6611 		scsi_cdb->g0_addr1 = 0x83;
6612 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6613 		break;
6614 
6615 	case SCMD_INQUIRY:
6616 		fcmd->fcp_cntl.cntl_read_data = 1;
6617 		fcmd->fcp_cntl.cntl_write_data = 0;
6618 		fcmd->fcp_data_len = alloc_len;
6619 
6620 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6621 		fpkt->pkt_comp = fcp_scsi_callback;
6622 
6623 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6624 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6625 		break;
6626 
6627 	case SCMD_REPORT_LUN: {
6628 		fc_portid_t	d_id;
6629 		opaque_t	fca_dev;
6630 
6631 		ASSERT(alloc_len >= 16);
6632 
6633 		d_id.priv_lilp_posit = 0;
6634 		d_id.port_id = ptgt->tgt_d_id;
6635 
6636 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6637 
6638 		mutex_enter(&ptgt->tgt_mutex);
6639 		ptgt->tgt_fca_dev = fca_dev;
6640 		mutex_exit(&ptgt->tgt_mutex);
6641 
6642 		fcmd->fcp_cntl.cntl_read_data = 1;
6643 		fcmd->fcp_cntl.cntl_write_data = 0;
6644 		fcmd->fcp_data_len = alloc_len;
6645 
6646 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6647 		fpkt->pkt_comp = fcp_scsi_callback;
6648 
6649 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6650 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6651 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6652 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6653 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6654 		break;
6655 	}
6656 
6657 	default:
6658 		fcp_log(CE_WARN, pptr->port_dip,
6659 		    "!fcp_send_scsi Invalid opcode");
6660 		break;
6661 	}
6662 
6663 	if (!nodma) {
6664 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6665 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6666 	}
6667 
6668 	mutex_enter(&pptr->port_mutex);
6669 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6670 
6671 		mutex_exit(&pptr->port_mutex);
6672 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6673 		    FC_SUCCESS) {
6674 			fcp_icmd_free(pptr, icmd);
6675 			return (DDI_FAILURE);
6676 		}
6677 		return (DDI_SUCCESS);
6678 	} else {
6679 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6680 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6681 		    "fcp_send_scsi,1: state change occured"
6682 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6683 		mutex_exit(&pptr->port_mutex);
6684 		fcp_icmd_free(pptr, icmd);
6685 		return (DDI_FAILURE);
6686 	}
6687 }
6688 
6689 
6690 /*
6691  * called by fcp_scsi_callback to check to handle the case where
6692  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6693  */
6694 static int
6695 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6696 {
6697 	uchar_t				rqlen;
6698 	int				rval = DDI_FAILURE;
6699 	struct scsi_extended_sense	sense_info, *sense;
6700 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6701 	    fpkt->pkt_ulp_private;
6702 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6703 	struct fcp_port		*pptr = ptgt->tgt_port;
6704 
6705 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6706 
6707 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6708 		/*
6709 		 * SCSI-II Reserve Release support. Some older FC drives return
6710 		 * Reservation conflict for Report Luns command.
6711 		 */
6712 		if (icmd->ipkt_nodma) {
6713 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6714 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6715 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6716 		} else {
6717 			fcp_rsp_t	new_resp;
6718 
6719 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6720 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6721 
6722 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6723 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6724 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6725 
6726 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6727 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6728 		}
6729 
6730 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6731 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6732 
6733 		return (DDI_SUCCESS);
6734 	}
6735 
6736 	sense = &sense_info;
6737 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6738 		/* no need to continue if sense length is not set */
6739 		return (rval);
6740 	}
6741 
6742 	/* casting 64-bit integer to 8-bit */
6743 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6744 	    sizeof (struct scsi_extended_sense));
6745 
6746 	if (rqlen < 14) {
6747 		/* no need to continue if request length isn't long enough */
6748 		return (rval);
6749 	}
6750 
6751 	if (icmd->ipkt_nodma) {
6752 		/*
6753 		 * We can safely use fcp_response_len here since the
6754 		 * only path that calls fcp_check_reportlun,
6755 		 * fcp_scsi_callback, has already called
6756 		 * fcp_validate_fcp_response.
6757 		 */
6758 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6759 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6760 	} else {
6761 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6762 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6763 		    sizeof (struct scsi_extended_sense));
6764 	}
6765 
6766 	if (!FCP_SENSE_NO_LUN(sense)) {
6767 		mutex_enter(&ptgt->tgt_mutex);
6768 		/* clear the flag if any */
6769 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6770 		mutex_exit(&ptgt->tgt_mutex);
6771 	}
6772 
6773 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6774 	    (sense->es_add_code == 0x20)) {
6775 		if (icmd->ipkt_nodma) {
6776 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6777 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6778 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6779 		} else {
6780 			fcp_rsp_t	new_resp;
6781 
6782 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6783 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6784 
6785 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6786 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6787 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6788 
6789 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6790 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6791 		}
6792 
6793 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6794 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6795 
6796 		return (DDI_SUCCESS);
6797 	}
6798 
6799 	/*
6800 	 * This is for the STK library which returns a check condition,
6801 	 * to indicate device is not ready, manual assistance needed.
6802 	 * This is to a report lun command when the door is open.
6803 	 */
6804 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6805 		if (icmd->ipkt_nodma) {
6806 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6807 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6808 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6809 		} else {
6810 			fcp_rsp_t	new_resp;
6811 
6812 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6813 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6814 
6815 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6816 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6817 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6818 
6819 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6820 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6821 		}
6822 
6823 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6824 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6825 
6826 		return (DDI_SUCCESS);
6827 	}
6828 
6829 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6830 	    (FCP_SENSE_NO_LUN(sense))) {
6831 		mutex_enter(&ptgt->tgt_mutex);
6832 		if ((FCP_SENSE_NO_LUN(sense)) &&
6833 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6834 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6835 			mutex_exit(&ptgt->tgt_mutex);
6836 			/*
6837 			 * reconfig was triggred by ILLEGAL REQUEST but
6838 			 * got ILLEGAL REQUEST again
6839 			 */
6840 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6841 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6842 			    "!FCP: Unable to obtain Report Lun data"
6843 			    " target=%x", ptgt->tgt_d_id);
6844 		} else {
6845 			if (ptgt->tgt_tid == NULL) {
6846 				timeout_id_t	tid;
6847 				/*
6848 				 * REPORT LUN data has changed.	 Kick off
6849 				 * rediscovery
6850 				 */
6851 				tid = timeout(fcp_reconfigure_luns,
6852 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6853 
6854 				ptgt->tgt_tid = tid;
6855 				ptgt->tgt_state |= FCP_TGT_BUSY;
6856 			}
6857 			if (FCP_SENSE_NO_LUN(sense)) {
6858 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6859 			}
6860 			mutex_exit(&ptgt->tgt_mutex);
6861 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6862 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6863 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6864 				    "!FCP:Report Lun Has Changed"
6865 				    " target=%x", ptgt->tgt_d_id);
6866 			} else if (FCP_SENSE_NO_LUN(sense)) {
6867 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6868 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6869 				    "!FCP:LU Not Supported"
6870 				    " target=%x", ptgt->tgt_d_id);
6871 			}
6872 		}
6873 		rval = DDI_SUCCESS;
6874 	}
6875 
6876 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6877 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6878 	    "D_ID=%x, sense=%x, status=%x",
6879 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6880 	    rsp->fcp_u.fcp_status.scsi_status);
6881 
6882 	return (rval);
6883 }
6884 
6885 /*
6886  *     Function: fcp_scsi_callback
6887  *
6888  *  Description: This is the callback routine set by fcp_send_scsi() after
6889  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6890  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6891  *		 INQUIRY_PAGE83.
6892  *
6893  *     Argument: *fpkt	 FC packet used to convey the command
6894  *
6895  * Return Value: None
6896  */
6897 static void
6898 fcp_scsi_callback(fc_packet_t *fpkt)
6899 {
6900 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6901 	    fpkt->pkt_ulp_private;
6902 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6903 	struct fcp_port	*pptr;
6904 	struct fcp_tgt	*ptgt;
6905 	struct fcp_lun	*plun;
6906 	struct fcp_rsp		response, *rsp;
6907 
6908 	ptgt = icmd->ipkt_tgt;
6909 	pptr = ptgt->tgt_port;
6910 	plun = icmd->ipkt_lun;
6911 
6912 	if (icmd->ipkt_nodma) {
6913 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6914 	} else {
6915 		rsp = &response;
6916 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6917 		    sizeof (struct fcp_rsp));
6918 	}
6919 
6920 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6921 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6922 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6923 	    "status=%x, lun num=%x",
6924 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6925 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6926 
6927 	/*
6928 	 * Pre-init LUN GUID with NWWN if it is not a device that
6929 	 * supports multiple luns and we know it's not page83
6930 	 * compliant.  Although using a NWWN is not lun unique,
6931 	 * we will be fine since there is only one lun behind the taget
6932 	 * in this case.
6933 	 */
6934 	if ((plun->lun_guid_size == 0) &&
6935 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6936 	    (fcp_symmetric_device_probe(plun) == 0)) {
6937 
6938 		char ascii_wwn[FC_WWN_SIZE*2+1];
6939 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6940 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6941 	}
6942 
6943 	/*
6944 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6945 	 * when thay have more data than what is asked in CDB. An overrun
6946 	 * is really when FCP_DL is smaller than the data length in CDB.
6947 	 * In the case here we know that REPORT LUN command we formed within
6948 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6949 	 * behavior. In reality this is FC_SUCCESS.
6950 	 */
6951 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6952 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6953 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6954 		fpkt->pkt_state = FC_PKT_SUCCESS;
6955 	}
6956 
6957 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6958 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6959 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6960 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6961 		    ptgt->tgt_d_id);
6962 
6963 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6964 			/*
6965 			 * Inquiry VPD page command on A5K SES devices would
6966 			 * result in data CRC errors.
6967 			 */
6968 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6969 				(void) fcp_handle_page83(fpkt, icmd, 1);
6970 				return;
6971 			}
6972 		}
6973 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6974 		    FCP_MUST_RETRY(fpkt)) {
6975 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6976 			fcp_retry_scsi_cmd(fpkt);
6977 			return;
6978 		}
6979 
6980 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6981 		    FCP_TGT_TRACE_20);
6982 
6983 		mutex_enter(&pptr->port_mutex);
6984 		mutex_enter(&ptgt->tgt_mutex);
6985 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6986 			mutex_exit(&ptgt->tgt_mutex);
6987 			mutex_exit(&pptr->port_mutex);
6988 			fcp_print_error(fpkt);
6989 		} else {
6990 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6991 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6992 			    "fcp_scsi_callback,1: state change occured"
6993 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6994 			mutex_exit(&ptgt->tgt_mutex);
6995 			mutex_exit(&pptr->port_mutex);
6996 		}
6997 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6998 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6999 		fcp_icmd_free(pptr, icmd);
7000 		return;
7001 	}
7002 
7003 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7004 
7005 	mutex_enter(&pptr->port_mutex);
7006 	mutex_enter(&ptgt->tgt_mutex);
7007 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7008 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7009 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7010 		    "fcp_scsi_callback,2: state change occured"
7011 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7012 		mutex_exit(&ptgt->tgt_mutex);
7013 		mutex_exit(&pptr->port_mutex);
7014 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7015 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7016 		fcp_icmd_free(pptr, icmd);
7017 		return;
7018 	}
7019 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7020 
7021 	mutex_exit(&ptgt->tgt_mutex);
7022 	mutex_exit(&pptr->port_mutex);
7023 
7024 	if (icmd->ipkt_nodma) {
7025 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7026 		    sizeof (struct fcp_rsp));
7027 	} else {
7028 		bep = &fcp_rsp_err;
7029 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7030 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7031 	}
7032 
7033 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7034 		fcp_retry_scsi_cmd(fpkt);
7035 		return;
7036 	}
7037 
7038 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7039 	    FCP_NO_FAILURE) {
7040 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7041 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7042 		    "rsp_code=0x%x, rsp_len_set=0x%x",
7043 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7044 		fcp_retry_scsi_cmd(fpkt);
7045 		return;
7046 	}
7047 
7048 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7049 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7050 		fcp_queue_ipkt(pptr, fpkt);
7051 		return;
7052 	}
7053 
7054 	/*
7055 	 * Devices that do not support INQUIRY_PAGE83, return check condition
7056 	 * with illegal request as per SCSI spec.
7057 	 * Crossbridge is one such device and Daktari's SES node is another.
7058 	 * We want to ideally enumerate these devices as a non-mpxio devices.
7059 	 * SES nodes (Daktari only currently) are an exception to this.
7060 	 */
7061 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7062 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7063 
7064 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7065 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
7066 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7067 		    "check condition. May enumerate as non-mpxio device",
7068 		    ptgt->tgt_d_id, plun->lun_type);
7069 
7070 		/*
7071 		 * If we let Daktari's SES be enumerated as a non-mpxio
7072 		 * device, there will be a discrepency in that the other
7073 		 * internal FC disks will get enumerated as mpxio devices.
7074 		 * Applications like luxadm expect this to be consistent.
7075 		 *
7076 		 * So, we put in a hack here to check if this is an SES device
7077 		 * and handle it here.
7078 		 */
7079 		if (plun->lun_type == DTYPE_ESI) {
7080 			/*
7081 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7082 			 * at this stage, we fake a failure here so that
7083 			 * fcp_handle_page83 will create a device path using
7084 			 * the WWN instead of the GUID which is not there anyway
7085 			 */
7086 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7087 			(void) fcp_handle_page83(fpkt, icmd, 1);
7088 			return;
7089 		}
7090 
7091 		mutex_enter(&ptgt->tgt_mutex);
7092 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7093 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7094 		mutex_exit(&ptgt->tgt_mutex);
7095 
7096 		(void) fcp_call_finish_init(pptr, ptgt,
7097 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7098 		    icmd->ipkt_cause);
7099 		fcp_icmd_free(pptr, icmd);
7100 		return;
7101 	}
7102 
7103 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7104 		int rval = DDI_FAILURE;
7105 
7106 		/*
7107 		 * handle cases where report lun isn't supported
7108 		 * by faking up our own REPORT_LUN response or
7109 		 * UNIT ATTENTION
7110 		 */
7111 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7112 			rval = fcp_check_reportlun(rsp, fpkt);
7113 
7114 			/*
7115 			 * fcp_check_reportlun might have modified the
7116 			 * FCP response. Copy it in again to get an updated
7117 			 * FCP response
7118 			 */
7119 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7120 				rsp = &response;
7121 
7122 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7123 				    fpkt->pkt_resp_acc,
7124 				    sizeof (struct fcp_rsp));
7125 			}
7126 		}
7127 
7128 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7129 			if (rval == DDI_SUCCESS) {
7130 				(void) fcp_call_finish_init(pptr, ptgt,
7131 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7132 				    icmd->ipkt_cause);
7133 				fcp_icmd_free(pptr, icmd);
7134 			} else {
7135 				fcp_retry_scsi_cmd(fpkt);
7136 			}
7137 
7138 			return;
7139 		}
7140 	} else {
7141 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7142 			mutex_enter(&ptgt->tgt_mutex);
7143 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7144 			mutex_exit(&ptgt->tgt_mutex);
7145 		}
7146 	}
7147 
7148 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7149 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7150 		(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7151 		    DDI_DMA_SYNC_FORCPU);
7152 	}
7153 
7154 	switch (icmd->ipkt_opcode) {
7155 	case SCMD_INQUIRY:
7156 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7157 		fcp_handle_inquiry(fpkt, icmd);
7158 		break;
7159 
7160 	case SCMD_REPORT_LUN:
7161 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7162 		    FCP_TGT_TRACE_22);
7163 		fcp_handle_reportlun(fpkt, icmd);
7164 		break;
7165 
7166 	case SCMD_INQUIRY_PAGE83:
7167 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7168 		(void) fcp_handle_page83(fpkt, icmd, 0);
7169 		break;
7170 
7171 	default:
7172 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7173 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7174 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7175 		fcp_icmd_free(pptr, icmd);
7176 		break;
7177 	}
7178 }
7179 
7180 
7181 static void
7182 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7183 {
7184 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7185 	    fpkt->pkt_ulp_private;
7186 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7187 	struct fcp_port	*pptr = ptgt->tgt_port;
7188 
7189 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7190 	    fcp_is_retryable(icmd)) {
7191 		mutex_enter(&pptr->port_mutex);
7192 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7193 			mutex_exit(&pptr->port_mutex);
7194 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7195 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7196 			    "Retrying %s to %x; state=%x, reason=%x",
7197 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7198 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7199 			    fpkt->pkt_state, fpkt->pkt_reason);
7200 
7201 			fcp_queue_ipkt(pptr, fpkt);
7202 		} else {
7203 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7204 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7205 			    "fcp_retry_scsi_cmd,1: state change occured"
7206 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7207 			mutex_exit(&pptr->port_mutex);
7208 			(void) fcp_call_finish_init(pptr, ptgt,
7209 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7210 			    icmd->ipkt_cause);
7211 			fcp_icmd_free(pptr, icmd);
7212 		}
7213 	} else {
7214 		fcp_print_error(fpkt);
7215 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7216 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7217 		fcp_icmd_free(pptr, icmd);
7218 	}
7219 }
7220 
7221 /*
7222  *     Function: fcp_handle_page83
7223  *
7224  *  Description: Treats the response to INQUIRY_PAGE83.
7225  *
7226  *     Argument: *fpkt	FC packet used to convey the command.
7227  *		 *icmd	Original fcp_ipkt structure.
7228  *		 ignore_page83_data
7229  *			if it's 1, that means it's a special devices's
7230  *			page83 response, it should be enumerated under mpxio
7231  *
7232  * Return Value: None
7233  */
7234 static void
7235 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7236     int ignore_page83_data)
7237 {
7238 	struct fcp_port	*pptr;
7239 	struct fcp_lun	*plun;
7240 	struct fcp_tgt	*ptgt;
7241 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7242 	int			fail = 0;
7243 	ddi_devid_t		devid;
7244 	char			*guid = NULL;
7245 	int			ret;
7246 
7247 	ASSERT(icmd != NULL && fpkt != NULL);
7248 
7249 	pptr = icmd->ipkt_port;
7250 	ptgt = icmd->ipkt_tgt;
7251 	plun = icmd->ipkt_lun;
7252 
7253 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7254 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7255 
7256 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7257 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7258 
7259 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7260 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7261 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7262 		    "dtype=0x%x, lun num=%x",
7263 		    pptr->port_instance, ptgt->tgt_d_id,
7264 		    dev_id_page[0], plun->lun_num);
7265 
7266 		ret = ddi_devid_scsi_encode(
7267 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7268 		    NULL,		/* driver name */
7269 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7270 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7271 		    NULL,		/* page 80 data */
7272 		    0,		/* page 80 len */
7273 		    dev_id_page,	/* page 83 data */
7274 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7275 		    &devid);
7276 
7277 		if (ret == DDI_SUCCESS) {
7278 
7279 			guid = ddi_devid_to_guid(devid);
7280 
7281 			if (guid) {
7282 				/*
7283 				 * Check our current guid.  If it's non null
7284 				 * and it has changed, we need to copy it into
7285 				 * lun_old_guid since we might still need it.
7286 				 */
7287 				if (plun->lun_guid &&
7288 				    strcmp(guid, plun->lun_guid)) {
7289 					unsigned int len;
7290 
7291 					/*
7292 					 * If the guid of the LUN changes,
7293 					 * reconfiguration should be triggered
7294 					 * to reflect the changes.
7295 					 * i.e. we should offline the LUN with
7296 					 * the old guid, and online the LUN with
7297 					 * the new guid.
7298 					 */
7299 					plun->lun_state |= FCP_LUN_CHANGED;
7300 
7301 					if (plun->lun_old_guid) {
7302 						kmem_free(plun->lun_old_guid,
7303 						    plun->lun_old_guid_size);
7304 					}
7305 
7306 					len = plun->lun_guid_size;
7307 					plun->lun_old_guid_size = len;
7308 
7309 					plun->lun_old_guid = kmem_zalloc(len,
7310 					    KM_NOSLEEP);
7311 
7312 					if (plun->lun_old_guid) {
7313 						/*
7314 						 * The alloc was successful then
7315 						 * let's do the copy.
7316 						 */
7317 						bcopy(plun->lun_guid,
7318 						    plun->lun_old_guid, len);
7319 					} else {
7320 						fail = 1;
7321 						plun->lun_old_guid_size = 0;
7322 					}
7323 				}
7324 				if (!fail) {
7325 					if (fcp_copy_guid_2_lun_block(
7326 					    plun, guid)) {
7327 						fail = 1;
7328 					}
7329 				}
7330 				ddi_devid_free_guid(guid);
7331 
7332 			} else {
7333 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7334 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7335 				    "fcp_handle_page83: unable to create "
7336 				    "GUID");
7337 
7338 				/* couldn't create good guid from devid */
7339 				fail = 1;
7340 			}
7341 			ddi_devid_free(devid);
7342 
7343 		} else if (ret == DDI_NOT_WELL_FORMED) {
7344 			/* NULL filled data for page 83 */
7345 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7346 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7347 			    "fcp_handle_page83: retry GUID");
7348 
7349 			icmd->ipkt_retries = 0;
7350 			fcp_retry_scsi_cmd(fpkt);
7351 			return;
7352 		} else {
7353 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7354 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7355 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7356 			    ret);
7357 			/*
7358 			 * Since the page83 validation
7359 			 * introduced late, we are being
7360 			 * tolerant to the existing devices
7361 			 * that already found to be working
7362 			 * under mpxio, like A5200's SES device,
7363 			 * its page83 response will not be standard-compliant,
7364 			 * but we still want it to be enumerated under mpxio.
7365 			 */
7366 			if (fcp_symmetric_device_probe(plun) != 0) {
7367 				fail = 1;
7368 			}
7369 		}
7370 
7371 	} else {
7372 		/* bad packet state */
7373 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7374 
7375 		/*
7376 		 * For some special devices (A5K SES and Daktari's SES devices),
7377 		 * they should be enumerated under mpxio
7378 		 * or "luxadm dis" will fail
7379 		 */
7380 		if (ignore_page83_data) {
7381 			fail = 0;
7382 		} else {
7383 			fail = 1;
7384 		}
7385 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7386 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7387 		    "!Devid page cmd failed. "
7388 		    "fpkt_state: %x fpkt_reason: %x",
7389 		    "ignore_page83: %d",
7390 		    fpkt->pkt_state, fpkt->pkt_reason,
7391 		    ignore_page83_data);
7392 	}
7393 
7394 	mutex_enter(&pptr->port_mutex);
7395 	mutex_enter(&plun->lun_mutex);
7396 	/*
7397 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7398 	 * mismatch between lun_cip and lun_mpxio.
7399 	 */
7400 	if (plun->lun_cip == NULL) {
7401 		/*
7402 		 * If we don't have a guid for this lun it's because we were
7403 		 * unable to glean one from the page 83 response.  Set the
7404 		 * control flag to 0 here to make sure that we don't attempt to
7405 		 * enumerate it under mpxio.
7406 		 */
7407 		if (fail || pptr->port_mpxio == 0) {
7408 			plun->lun_mpxio = 0;
7409 		} else {
7410 			plun->lun_mpxio = 1;
7411 		}
7412 	}
7413 	mutex_exit(&plun->lun_mutex);
7414 	mutex_exit(&pptr->port_mutex);
7415 
7416 	mutex_enter(&ptgt->tgt_mutex);
7417 	plun->lun_state &=
7418 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7419 	mutex_exit(&ptgt->tgt_mutex);
7420 
7421 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7422 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7423 
7424 	fcp_icmd_free(pptr, icmd);
7425 }
7426 
7427 /*
7428  *     Function: fcp_handle_inquiry
7429  *
7430  *  Description: Called by fcp_scsi_callback to handle the response to an
7431  *		 INQUIRY request.
7432  *
7433  *     Argument: *fpkt	FC packet used to convey the command.
7434  *		 *icmd	Original fcp_ipkt structure.
7435  *
7436  * Return Value: None
7437  */
7438 static void
7439 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7440 {
7441 	struct fcp_port	*pptr;
7442 	struct fcp_lun	*plun;
7443 	struct fcp_tgt	*ptgt;
7444 	uchar_t		dtype;
7445 	uchar_t		pqual;
7446 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7447 
7448 	ASSERT(icmd != NULL && fpkt != NULL);
7449 
7450 	pptr = icmd->ipkt_port;
7451 	ptgt = icmd->ipkt_tgt;
7452 	plun = icmd->ipkt_lun;
7453 
7454 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7455 	    sizeof (struct scsi_inquiry));
7456 
7457 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7458 	pqual = plun->lun_inq.inq_dtype >> 5;
7459 
7460 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7461 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7462 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7463 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7464 	    plun->lun_num, dtype, pqual);
7465 
7466 	if (pqual != 0) {
7467 		/*
7468 		 * Non-zero peripheral qualifier
7469 		 */
7470 		fcp_log(CE_CONT, pptr->port_dip,
7471 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7472 		    "Device type=0x%x Peripheral qual=0x%x\n",
7473 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7474 
7475 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7476 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7477 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7478 		    "Device type=0x%x Peripheral qual=0x%x\n",
7479 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7480 
7481 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7482 
7483 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7484 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7485 		fcp_icmd_free(pptr, icmd);
7486 		return;
7487 	}
7488 
7489 	/*
7490 	 * If the device is already initialized, check the dtype
7491 	 * for a change. If it has changed then update the flags
7492 	 * so the create_luns will offline the old device and
7493 	 * create the new device. Refer to bug: 4764752
7494 	 */
7495 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7496 		plun->lun_state |= FCP_LUN_CHANGED;
7497 	}
7498 	plun->lun_type = plun->lun_inq.inq_dtype;
7499 
7500 	/*
7501 	 * This code is setting/initializing the throttling in the FCA
7502 	 * driver.
7503 	 */
7504 	mutex_enter(&pptr->port_mutex);
7505 	if (!pptr->port_notify) {
7506 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7507 			uint32_t cmd = 0;
7508 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7509 			    ((cmd & 0xFFFFFF00 >> 8) |
7510 			    FCP_SVE_THROTTLE << 8));
7511 			pptr->port_notify = 1;
7512 			mutex_exit(&pptr->port_mutex);
7513 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7514 			mutex_enter(&pptr->port_mutex);
7515 		}
7516 	}
7517 
7518 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7519 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7520 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7521 		    "fcp_handle_inquiry,1:state change occured"
7522 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7523 		mutex_exit(&pptr->port_mutex);
7524 
7525 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7526 		(void) fcp_call_finish_init(pptr, ptgt,
7527 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7528 		    icmd->ipkt_cause);
7529 		fcp_icmd_free(pptr, icmd);
7530 		return;
7531 	}
7532 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7533 	mutex_exit(&pptr->port_mutex);
7534 
7535 	/* Retrieve the rscn count (if a valid one exists) */
7536 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7537 		rscn_count = ((fc_ulp_rscn_info_t *)
7538 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7539 	} else {
7540 		rscn_count = FC_INVALID_RSCN_COUNT;
7541 	}
7542 
7543 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7544 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7545 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7546 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7547 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7548 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7549 		(void) fcp_call_finish_init(pptr, ptgt,
7550 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7551 		    icmd->ipkt_cause);
7552 	}
7553 
7554 	/*
7555 	 * Read Inquiry VPD Page 0x83 to uniquely
7556 	 * identify this logical unit.
7557 	 */
7558 	fcp_icmd_free(pptr, icmd);
7559 }
7560 
7561 /*
7562  *     Function: fcp_handle_reportlun
7563  *
7564  *  Description: Called by fcp_scsi_callback to handle the response to a
7565  *		 REPORT_LUN request.
7566  *
7567  *     Argument: *fpkt	FC packet used to convey the command.
7568  *		 *icmd	Original fcp_ipkt structure.
7569  *
7570  * Return Value: None
7571  */
7572 static void
7573 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7574 {
7575 	int				i;
7576 	int				nluns_claimed;
7577 	int				nluns_bufmax;
7578 	int				len;
7579 	uint16_t			lun_num;
7580 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7581 	struct fcp_port			*pptr;
7582 	struct fcp_tgt			*ptgt;
7583 	struct fcp_lun			*plun;
7584 	struct fcp_reportlun_resp	*report_lun;
7585 
7586 	pptr = icmd->ipkt_port;
7587 	ptgt = icmd->ipkt_tgt;
7588 	len = fpkt->pkt_datalen;
7589 
7590 	if ((len < FCP_LUN_HEADER) ||
7591 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7592 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7593 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7594 		fcp_icmd_free(pptr, icmd);
7595 		return;
7596 	}
7597 
7598 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7599 	    fpkt->pkt_datalen);
7600 
7601 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7602 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7603 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7604 	    pptr->port_instance, ptgt->tgt_d_id);
7605 
7606 	/*
7607 	 * Get the number of luns (which is supplied as LUNS * 8) the
7608 	 * device claims it has.
7609 	 */
7610 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7611 
7612 	/*
7613 	 * Get the maximum number of luns the buffer submitted can hold.
7614 	 */
7615 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7616 
7617 	/*
7618 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7619 	 */
7620 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7621 		kmem_free(report_lun, len);
7622 
7623 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7624 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7625 		    ptgt->tgt_d_id);
7626 
7627 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7628 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7629 		fcp_icmd_free(pptr, icmd);
7630 		return;
7631 	}
7632 
7633 	/*
7634 	 * If there are more LUNs than we have allocated memory for,
7635 	 * allocate more space and send down yet another report lun if
7636 	 * the maximum number of attempts hasn't been reached.
7637 	 */
7638 	mutex_enter(&ptgt->tgt_mutex);
7639 
7640 	if ((nluns_claimed > nluns_bufmax) &&
7641 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7642 
7643 		struct fcp_lun *plun;
7644 
7645 		ptgt->tgt_report_lun_cnt++;
7646 		plun = ptgt->tgt_lun;
7647 		ASSERT(plun != NULL);
7648 		mutex_exit(&ptgt->tgt_mutex);
7649 
7650 		kmem_free(report_lun, len);
7651 
7652 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7653 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7654 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7655 		    nluns_claimed, ptgt->tgt_d_id);
7656 
7657 		/* Retrieve the rscn count (if a valid one exists) */
7658 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7659 			rscn_count = ((fc_ulp_rscn_info_t *)
7660 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7661 			    ulp_rscn_count;
7662 		} else {
7663 			rscn_count = FC_INVALID_RSCN_COUNT;
7664 		}
7665 
7666 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7667 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7668 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7670 			(void) fcp_call_finish_init(pptr, ptgt,
7671 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7672 			    icmd->ipkt_cause);
7673 		}
7674 
7675 		fcp_icmd_free(pptr, icmd);
7676 		return;
7677 	}
7678 
7679 	if (nluns_claimed > nluns_bufmax) {
7680 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7681 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7682 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7683 		    "	 Number of LUNs lost=%x",
7684 		    ptgt->tgt_port_wwn.raw_wwn[0],
7685 		    ptgt->tgt_port_wwn.raw_wwn[1],
7686 		    ptgt->tgt_port_wwn.raw_wwn[2],
7687 		    ptgt->tgt_port_wwn.raw_wwn[3],
7688 		    ptgt->tgt_port_wwn.raw_wwn[4],
7689 		    ptgt->tgt_port_wwn.raw_wwn[5],
7690 		    ptgt->tgt_port_wwn.raw_wwn[6],
7691 		    ptgt->tgt_port_wwn.raw_wwn[7],
7692 		    nluns_claimed - nluns_bufmax);
7693 
7694 		nluns_claimed = nluns_bufmax;
7695 	}
7696 	ptgt->tgt_lun_cnt = nluns_claimed;
7697 
7698 	/*
7699 	 * Identify missing LUNs and print warning messages
7700 	 */
7701 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7702 		int offline;
7703 		int exists = 0;
7704 
7705 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7706 
7707 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7708 			uchar_t		*lun_string;
7709 
7710 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7711 
7712 			switch (lun_string[0] & 0xC0) {
7713 			case FCP_LUN_ADDRESSING:
7714 			case FCP_PD_ADDRESSING:
7715 			case FCP_VOLUME_ADDRESSING:
7716 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7717 				    lun_string[1];
7718 				if (plun->lun_num == lun_num) {
7719 					exists++;
7720 					break;
7721 				}
7722 				break;
7723 
7724 			default:
7725 				break;
7726 			}
7727 		}
7728 
7729 		if (!exists && !offline) {
7730 			mutex_exit(&ptgt->tgt_mutex);
7731 
7732 			mutex_enter(&pptr->port_mutex);
7733 			mutex_enter(&ptgt->tgt_mutex);
7734 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7735 				/*
7736 				 * set disappear flag when device was connected
7737 				 */
7738 				if (!(plun->lun_state &
7739 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7740 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7741 				}
7742 				mutex_exit(&ptgt->tgt_mutex);
7743 				mutex_exit(&pptr->port_mutex);
7744 				if (!(plun->lun_state &
7745 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7746 					fcp_log(CE_NOTE, pptr->port_dip,
7747 					    "!Lun=%x for target=%x disappeared",
7748 					    plun->lun_num, ptgt->tgt_d_id);
7749 				}
7750 				mutex_enter(&ptgt->tgt_mutex);
7751 			} else {
7752 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7753 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7754 				    "fcp_handle_reportlun,1: state change"
7755 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7756 				mutex_exit(&ptgt->tgt_mutex);
7757 				mutex_exit(&pptr->port_mutex);
7758 				kmem_free(report_lun, len);
7759 				(void) fcp_call_finish_init(pptr, ptgt,
7760 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7761 				    icmd->ipkt_cause);
7762 				fcp_icmd_free(pptr, icmd);
7763 				return;
7764 			}
7765 		} else if (exists) {
7766 			/*
7767 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7768 			 * actually exists in REPORT_LUN response
7769 			 */
7770 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7771 				plun->lun_state &=
7772 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7773 			}
7774 			if (offline || plun->lun_num == 0) {
7775 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7776 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7777 					mutex_exit(&ptgt->tgt_mutex);
7778 					fcp_log(CE_NOTE, pptr->port_dip,
7779 					    "!Lun=%x for target=%x reappeared",
7780 					    plun->lun_num, ptgt->tgt_d_id);
7781 					mutex_enter(&ptgt->tgt_mutex);
7782 				}
7783 			}
7784 		}
7785 	}
7786 
7787 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7788 	mutex_exit(&ptgt->tgt_mutex);
7789 
7790 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7791 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7792 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7793 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7794 
7795 	/* scan each lun */
7796 	for (i = 0; i < nluns_claimed; i++) {
7797 		uchar_t	*lun_string;
7798 
7799 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7800 
7801 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7802 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7803 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7804 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7805 		    lun_string[0]);
7806 
7807 		switch (lun_string[0] & 0xC0) {
7808 		case FCP_LUN_ADDRESSING:
7809 		case FCP_PD_ADDRESSING:
7810 		case FCP_VOLUME_ADDRESSING:
7811 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7812 
7813 			/* We will skip masked LUNs because of the blacklist. */
7814 			if (fcp_lun_blacklist != NULL) {
7815 				mutex_enter(&ptgt->tgt_mutex);
7816 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7817 				    lun_num) == TRUE) {
7818 					ptgt->tgt_lun_cnt--;
7819 					mutex_exit(&ptgt->tgt_mutex);
7820 					break;
7821 				}
7822 				mutex_exit(&ptgt->tgt_mutex);
7823 			}
7824 
7825 			/* see if this LUN is already allocated */
7826 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7827 				plun = fcp_alloc_lun(ptgt);
7828 				if (plun == NULL) {
7829 					fcp_log(CE_NOTE, pptr->port_dip,
7830 					    "!Lun allocation failed"
7831 					    " target=%x lun=%x",
7832 					    ptgt->tgt_d_id, lun_num);
7833 					break;
7834 				}
7835 			}
7836 
7837 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7838 			/* convert to LUN */
7839 			plun->lun_addr.ent_addr_0 =
7840 			    BE_16(*(uint16_t *)&(lun_string[0]));
7841 			plun->lun_addr.ent_addr_1 =
7842 			    BE_16(*(uint16_t *)&(lun_string[2]));
7843 			plun->lun_addr.ent_addr_2 =
7844 			    BE_16(*(uint16_t *)&(lun_string[4]));
7845 			plun->lun_addr.ent_addr_3 =
7846 			    BE_16(*(uint16_t *)&(lun_string[6]));
7847 
7848 			plun->lun_num = lun_num;
7849 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7850 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7851 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7852 
7853 			/* Retrieve the rscn count (if a valid one exists) */
7854 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7855 				rscn_count = ((fc_ulp_rscn_info_t *)
7856 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7857 				    ulp_rscn_count;
7858 			} else {
7859 				rscn_count = FC_INVALID_RSCN_COUNT;
7860 			}
7861 
7862 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7863 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7864 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7865 				mutex_enter(&pptr->port_mutex);
7866 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7867 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7868 					fcp_log(CE_NOTE, pptr->port_dip,
7869 					    "!failed to send INQUIRY"
7870 					    " target=%x lun=%x",
7871 					    ptgt->tgt_d_id, plun->lun_num);
7872 				} else {
7873 					FCP_TRACE(fcp_logq,
7874 					    pptr->port_instbuf, fcp_trace,
7875 					    FCP_BUF_LEVEL_5, 0,
7876 					    "fcp_handle_reportlun,2: state"
7877 					    " change occured for D_ID=0x%x",
7878 					    ptgt->tgt_d_id);
7879 				}
7880 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7881 				mutex_exit(&pptr->port_mutex);
7882 			} else {
7883 				continue;
7884 			}
7885 			break;
7886 
7887 		default:
7888 			fcp_log(CE_WARN, NULL,
7889 			    "!Unsupported LUN Addressing method %x "
7890 			    "in response to REPORT_LUN", lun_string[0]);
7891 			break;
7892 		}
7893 
7894 		/*
7895 		 * each time through this loop we should decrement
7896 		 * the tmp_cnt by one -- since we go through this loop
7897 		 * one time for each LUN, the tmp_cnt should never be <=0
7898 		 */
7899 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7900 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7901 	}
7902 
7903 	if (i == 0) {
7904 		fcp_log(CE_WARN, pptr->port_dip,
7905 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7906 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7907 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7908 	}
7909 
7910 	kmem_free(report_lun, len);
7911 	fcp_icmd_free(pptr, icmd);
7912 }
7913 
7914 
7915 /*
7916  * called internally to return a LUN given a target and a LUN number
7917  */
7918 static struct fcp_lun *
7919 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7920 {
7921 	struct fcp_lun	*plun;
7922 
7923 	mutex_enter(&ptgt->tgt_mutex);
7924 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7925 		if (plun->lun_num == lun_num) {
7926 			mutex_exit(&ptgt->tgt_mutex);
7927 			return (plun);
7928 		}
7929 	}
7930 	mutex_exit(&ptgt->tgt_mutex);
7931 
7932 	return (NULL);
7933 }
7934 
7935 
7936 /*
7937  * handle finishing one target for fcp_finish_init
7938  *
7939  * return true (non-zero) if we want finish_init to continue with the
7940  * next target
7941  *
7942  * called with the port mutex held
7943  */
7944 /*ARGSUSED*/
7945 static int
7946 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7947     int link_cnt, int tgt_cnt, int cause)
7948 {
7949 	int	rval = 1;
7950 	ASSERT(pptr != NULL);
7951 	ASSERT(ptgt != NULL);
7952 
7953 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7954 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7955 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7956 	    ptgt->tgt_state);
7957 
7958 	ASSERT(mutex_owned(&pptr->port_mutex));
7959 
7960 	if ((pptr->port_link_cnt != link_cnt) ||
7961 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7962 		/*
7963 		 * oh oh -- another link reset or target change
7964 		 * must have occurred while we are in here
7965 		 */
7966 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7967 
7968 		return (0);
7969 	} else {
7970 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7971 	}
7972 
7973 	mutex_enter(&ptgt->tgt_mutex);
7974 
7975 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7976 		/*
7977 		 * tgt is not offline -- is it marked (i.e. needs
7978 		 * to be offlined) ??
7979 		 */
7980 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7981 			/*
7982 			 * this target not offline *and*
7983 			 * marked
7984 			 */
7985 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7986 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7987 			    tgt_cnt, 0, 0);
7988 		} else {
7989 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7990 
7991 			/* create the LUNs */
7992 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7993 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7994 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7995 				    cause);
7996 				ptgt->tgt_device_created = 1;
7997 			} else {
7998 				fcp_update_tgt_state(ptgt, FCP_RESET,
7999 				    FCP_LUN_BUSY);
8000 			}
8001 		}
8002 	}
8003 
8004 	mutex_exit(&ptgt->tgt_mutex);
8005 
8006 	return (rval);
8007 }
8008 
8009 
8010 /*
8011  * this routine is called to finish port initialization
8012  *
8013  * Each port has a "temp" counter -- when a state change happens (e.g.
8014  * port online), the temp count is set to the number of devices in the map.
8015  * Then, as each device gets "discovered", the temp counter is decremented
8016  * by one.  When this count reaches zero we know that all of the devices
8017  * in the map have been discovered (or an error has occurred), so we can
8018  * then finish initialization -- which is done by this routine (well, this
8019  * and fcp-finish_tgt())
8020  *
8021  * acquires and releases the global mutex
8022  *
8023  * called with the port mutex owned
8024  */
8025 static void
8026 fcp_finish_init(struct fcp_port *pptr)
8027 {
8028 #ifdef	DEBUG
8029 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8030 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8031 	    FCP_STACK_DEPTH);
8032 #endif /* DEBUG */
8033 
8034 	ASSERT(mutex_owned(&pptr->port_mutex));
8035 
8036 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8037 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8038 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8039 
8040 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
8041 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
8042 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8043 		pptr->port_state &= ~FCP_STATE_ONLINING;
8044 		pptr->port_state |= FCP_STATE_ONLINE;
8045 	}
8046 
8047 	/* Wake up threads waiting on config done */
8048 	cv_broadcast(&pptr->port_config_cv);
8049 }
8050 
8051 
8052 /*
8053  * called from fcp_finish_init to create the LUNs for a target
8054  *
8055  * called with the port mutex owned
8056  */
8057 static void
8058 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8059 {
8060 	struct fcp_lun	*plun;
8061 	struct fcp_port	*pptr;
8062 	child_info_t		*cip = NULL;
8063 
8064 	ASSERT(ptgt != NULL);
8065 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8066 
8067 	pptr = ptgt->tgt_port;
8068 
8069 	ASSERT(pptr != NULL);
8070 
8071 	/* scan all LUNs for this target */
8072 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8073 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8074 			continue;
8075 		}
8076 
8077 		if (plun->lun_state & FCP_LUN_MARK) {
8078 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8079 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8080 			    "fcp_create_luns: offlining marked LUN!");
8081 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8082 			continue;
8083 		}
8084 
8085 		plun->lun_state &= ~FCP_LUN_BUSY;
8086 
8087 		/*
8088 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8089 		 * but we have a valid plun->lun_cip. To cover this case also
8090 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8091 		 */
8092 		if (plun->lun_mpxio && plun->lun_cip &&
8093 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8094 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8095 		    0, 0))) {
8096 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8097 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8098 			    "fcp_create_luns: enable lun %p failed!",
8099 			    plun);
8100 		}
8101 
8102 		if (plun->lun_state & FCP_LUN_INIT &&
8103 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8104 			continue;
8105 		}
8106 
8107 		if (cause == FCP_CAUSE_USER_CREATE) {
8108 			continue;
8109 		}
8110 
8111 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8112 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8113 		    "create_luns: passing ONLINE elem to HP thread");
8114 
8115 		/*
8116 		 * If lun has changed, prepare for offlining the old path.
8117 		 * Do not offline the old path right now, since it may be
8118 		 * still opened.
8119 		 */
8120 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8121 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8122 		}
8123 
8124 		/* pass an ONLINE element to the hotplug thread */
8125 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8126 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8127 
8128 			/*
8129 			 * We can not synchronous attach (i.e pass
8130 			 * NDI_ONLINE_ATTACH) here as we might be
8131 			 * coming from an interrupt or callback
8132 			 * thread.
8133 			 */
8134 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8135 			    link_cnt, tgt_cnt, 0, 0)) {
8136 				fcp_log(CE_CONT, pptr->port_dip,
8137 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8138 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8139 			}
8140 		}
8141 	}
8142 }
8143 
8144 
8145 /*
8146  * function to online/offline devices
8147  */
8148 static int
8149 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8150     int online, int lcount, int tcount, int flags)
8151 {
8152 	int			rval = NDI_FAILURE;
8153 	int			circ;
8154 	child_info_t		*ccip;
8155 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8156 	int			is_mpxio = pptr->port_mpxio;
8157 	dev_info_t		*cdip, *pdip;
8158 	char			*devname;
8159 
8160 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8161 		/*
8162 		 * When this event gets serviced, lun_cip and lun_mpxio
8163 		 * has changed, so it should be invalidated now.
8164 		 */
8165 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8166 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8167 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8168 		return (rval);
8169 	}
8170 
8171 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8172 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8173 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8174 	    "flags=%x mpxio=%x\n",
8175 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8176 	    plun->lun_mpxio);
8177 
8178 	/*
8179 	 * lun_mpxio needs checking here because we can end up in a race
8180 	 * condition where this task has been dispatched while lun_mpxio is
8181 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8182 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8183 	 * the flag. We rely on the serialization of the tasks here. We return
8184 	 * NDI_SUCCESS so any callers continue without reporting spurious
8185 	 * errors, and the still think we're an MPXIO LUN.
8186 	 */
8187 
8188 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8189 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8190 		if (plun->lun_mpxio) {
8191 			rval = fcp_update_mpxio_path(plun, cip, online);
8192 		} else {
8193 			rval = NDI_SUCCESS;
8194 		}
8195 		return (rval);
8196 	}
8197 
8198 	/*
8199 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8200 	 * executing devfs_clean() if parent lock is held.
8201 	 */
8202 	ASSERT(!servicing_interrupt());
8203 	if (online == FCP_OFFLINE) {
8204 		if (plun->lun_mpxio == 0) {
8205 			if (plun->lun_cip == cip) {
8206 				cdip = DIP(plun->lun_cip);
8207 			} else {
8208 				cdip = DIP(cip);
8209 			}
8210 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8211 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8212 		} else if ((plun->lun_cip != cip) && cip) {
8213 			/*
8214 			 * This means a DTYPE/GUID change, we shall get the
8215 			 * dip of the old cip instead of the current lun_cip.
8216 			 */
8217 			cdip = mdi_pi_get_client(PIP(cip));
8218 		}
8219 		if (cdip) {
8220 			if (i_ddi_devi_attached(cdip)) {
8221 				pdip = ddi_get_parent(cdip);
8222 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8223 				ndi_devi_enter(pdip, &circ);
8224 				(void) ddi_deviname(cdip, devname);
8225 				ndi_devi_exit(pdip, circ);
8226 				/*
8227 				 * Release parent lock before calling
8228 				 * devfs_clean().
8229 				 */
8230 				rval = devfs_clean(pdip, devname + 1,
8231 				    DV_CLEAN_FORCE);
8232 				kmem_free(devname, MAXNAMELEN + 1);
8233 				/*
8234 				 * Return if devfs_clean() fails for
8235 				 * non-MPXIO case.
8236 				 * For MPXIO case, another path could be
8237 				 * offlined.
8238 				 */
8239 				if (rval && plun->lun_mpxio == 0) {
8240 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8241 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8242 					    "fcp_trigger_lun: devfs_clean "
8243 					    "failed rval=%x  dip=%p",
8244 					    rval, pdip);
8245 					return (NDI_FAILURE);
8246 				}
8247 			}
8248 		}
8249 	}
8250 
8251 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8252 		return (NDI_FAILURE);
8253 	}
8254 
8255 	if (is_mpxio) {
8256 		mdi_devi_enter(pptr->port_dip, &circ);
8257 	} else {
8258 		ndi_devi_enter(pptr->port_dip, &circ);
8259 	}
8260 
8261 	mutex_enter(&pptr->port_mutex);
8262 	mutex_enter(&plun->lun_mutex);
8263 
8264 	if (online == FCP_ONLINE) {
8265 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8266 		if (ccip == NULL) {
8267 			goto fail;
8268 		}
8269 	} else {
8270 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8271 			goto fail;
8272 		}
8273 		ccip = cip;
8274 	}
8275 
8276 	if (online == FCP_ONLINE) {
8277 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8278 		    &circ);
8279 		fc_ulp_log_device_event(pptr->port_fp_handle,
8280 		    FC_ULP_DEVICE_ONLINE);
8281 	} else {
8282 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8283 		    &circ);
8284 		fc_ulp_log_device_event(pptr->port_fp_handle,
8285 		    FC_ULP_DEVICE_OFFLINE);
8286 	}
8287 
8288 fail:	mutex_exit(&plun->lun_mutex);
8289 	mutex_exit(&pptr->port_mutex);
8290 
8291 	if (is_mpxio) {
8292 		mdi_devi_exit(pptr->port_dip, circ);
8293 	} else {
8294 		ndi_devi_exit(pptr->port_dip, circ);
8295 	}
8296 
8297 	fc_ulp_idle_port(pptr->port_fp_handle);
8298 
8299 	return (rval);
8300 }
8301 
8302 
8303 /*
8304  * take a target offline by taking all of its LUNs offline
8305  */
8306 /*ARGSUSED*/
8307 static int
8308 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8309     int link_cnt, int tgt_cnt, int nowait, int flags)
8310 {
8311 	struct fcp_tgt_elem	*elem;
8312 
8313 	ASSERT(mutex_owned(&pptr->port_mutex));
8314 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8315 
8316 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8317 
8318 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8319 	    ptgt->tgt_change_cnt)) {
8320 		mutex_exit(&ptgt->tgt_mutex);
8321 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8322 		mutex_enter(&ptgt->tgt_mutex);
8323 
8324 		return (0);
8325 	}
8326 
8327 	ptgt->tgt_pd_handle = NULL;
8328 	mutex_exit(&ptgt->tgt_mutex);
8329 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8330 	mutex_enter(&ptgt->tgt_mutex);
8331 
8332 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8333 
8334 	if (ptgt->tgt_tcap &&
8335 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8336 		elem->flags = flags;
8337 		elem->time = fcp_watchdog_time;
8338 		if (nowait == 0) {
8339 			elem->time += fcp_offline_delay;
8340 		}
8341 		elem->ptgt = ptgt;
8342 		elem->link_cnt = link_cnt;
8343 		elem->tgt_cnt = tgt_cnt;
8344 		elem->next = pptr->port_offline_tgts;
8345 		pptr->port_offline_tgts = elem;
8346 	} else {
8347 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8348 	}
8349 
8350 	return (1);
8351 }
8352 
8353 
8354 static void
8355 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8356     int link_cnt, int tgt_cnt, int flags)
8357 {
8358 	ASSERT(mutex_owned(&pptr->port_mutex));
8359 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8360 
8361 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8362 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8363 	ptgt->tgt_pd_handle = NULL;
8364 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8365 }
8366 
8367 
8368 static void
8369 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8370     int flags)
8371 {
8372 	struct	fcp_lun	*plun;
8373 
8374 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8375 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8376 
8377 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8378 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8379 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8380 		}
8381 	}
8382 }
8383 
8384 
8385 /*
8386  * take a LUN offline
8387  *
8388  * enters and leaves with the target mutex held, releasing it in the process
8389  *
8390  * allocates memory in non-sleep mode
8391  */
8392 static void
8393 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8394     int nowait, int flags)
8395 {
8396 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8397 	struct fcp_lun_elem	*elem;
8398 
8399 	ASSERT(plun != NULL);
8400 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8401 
8402 	if (nowait) {
8403 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8404 		return;
8405 	}
8406 
8407 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8408 		elem->flags = flags;
8409 		elem->time = fcp_watchdog_time;
8410 		if (nowait == 0) {
8411 			elem->time += fcp_offline_delay;
8412 		}
8413 		elem->plun = plun;
8414 		elem->link_cnt = link_cnt;
8415 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8416 		elem->next = pptr->port_offline_luns;
8417 		pptr->port_offline_luns = elem;
8418 	} else {
8419 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8420 	}
8421 }
8422 
8423 
8424 static void
8425 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8426 {
8427 	struct fcp_pkt	*head = NULL;
8428 
8429 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8430 
8431 	mutex_exit(&LUN_TGT->tgt_mutex);
8432 
8433 	head = fcp_scan_commands(plun);
8434 	if (head != NULL) {
8435 		fcp_abort_commands(head, LUN_PORT);
8436 	}
8437 
8438 	mutex_enter(&LUN_TGT->tgt_mutex);
8439 
8440 	if (plun->lun_cip && plun->lun_mpxio) {
8441 		/*
8442 		 * Intimate MPxIO lun busy is cleared
8443 		 */
8444 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8445 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8446 		    0, 0)) {
8447 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8448 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8449 			    LUN_TGT->tgt_d_id, plun->lun_num);
8450 		}
8451 		/*
8452 		 * Intimate MPxIO that the lun is now marked for offline
8453 		 */
8454 		mutex_exit(&LUN_TGT->tgt_mutex);
8455 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8456 		mutex_enter(&LUN_TGT->tgt_mutex);
8457 	}
8458 }
8459 
8460 static void
8461 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8462     int flags)
8463 {
8464 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8465 
8466 	mutex_exit(&LUN_TGT->tgt_mutex);
8467 	fcp_update_offline_flags(plun);
8468 	mutex_enter(&LUN_TGT->tgt_mutex);
8469 
8470 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8471 
8472 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8473 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8474 	    "offline_lun: passing OFFLINE elem to HP thread");
8475 
8476 	if (plun->lun_cip) {
8477 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8478 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8479 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8480 		    LUN_TGT->tgt_trace);
8481 
8482 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8483 		    link_cnt, tgt_cnt, flags, 0)) {
8484 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8485 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8486 			    LUN_TGT->tgt_d_id, plun->lun_num);
8487 		}
8488 	}
8489 }
8490 
8491 static void
8492 fcp_scan_offline_luns(struct fcp_port *pptr)
8493 {
8494 	struct fcp_lun_elem	*elem;
8495 	struct fcp_lun_elem	*prev;
8496 	struct fcp_lun_elem	*next;
8497 
8498 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8499 
8500 	prev = NULL;
8501 	elem = pptr->port_offline_luns;
8502 	while (elem) {
8503 		next = elem->next;
8504 		if (elem->time <= fcp_watchdog_time) {
8505 			int			changed = 1;
8506 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8507 
8508 			mutex_enter(&ptgt->tgt_mutex);
8509 			if (pptr->port_link_cnt == elem->link_cnt &&
8510 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8511 				changed = 0;
8512 			}
8513 
8514 			if (!changed &&
8515 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8516 				fcp_offline_lun_now(elem->plun,
8517 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8518 			}
8519 			mutex_exit(&ptgt->tgt_mutex);
8520 
8521 			kmem_free(elem, sizeof (*elem));
8522 
8523 			if (prev) {
8524 				prev->next = next;
8525 			} else {
8526 				pptr->port_offline_luns = next;
8527 			}
8528 		} else {
8529 			prev = elem;
8530 		}
8531 		elem = next;
8532 	}
8533 }
8534 
8535 
8536 static void
8537 fcp_scan_offline_tgts(struct fcp_port *pptr)
8538 {
8539 	struct fcp_tgt_elem	*elem;
8540 	struct fcp_tgt_elem	*prev;
8541 	struct fcp_tgt_elem	*next;
8542 
8543 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8544 
8545 	prev = NULL;
8546 	elem = pptr->port_offline_tgts;
8547 	while (elem) {
8548 		next = elem->next;
8549 		if (elem->time <= fcp_watchdog_time) {
8550 			int		outdated = 1;
8551 			struct fcp_tgt	*ptgt = elem->ptgt;
8552 
8553 			mutex_enter(&ptgt->tgt_mutex);
8554 
8555 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8556 				/* No change on tgt since elem was created. */
8557 				outdated = 0;
8558 			} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8559 			    pptr->port_link_cnt == elem->link_cnt + 1 &&
8560 			    ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8561 				/*
8562 				 * Exactly one thing happened to the target
8563 				 * inbetween: the local port went offline.
8564 				 * For fp the remote port is already gone so
8565 				 * it will not tell us again to offline the
8566 				 * target. We must offline it now.
8567 				 */
8568 				outdated = 0;
8569 			}
8570 
8571 			if (!outdated && !(ptgt->tgt_state &
8572 			    FCP_TGT_OFFLINE)) {
8573 				fcp_offline_target_now(pptr,
8574 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8575 				    elem->flags);
8576 			}
8577 
8578 			mutex_exit(&ptgt->tgt_mutex);
8579 
8580 			kmem_free(elem, sizeof (*elem));
8581 
8582 			if (prev) {
8583 				prev->next = next;
8584 			} else {
8585 				pptr->port_offline_tgts = next;
8586 			}
8587 		} else {
8588 			prev = elem;
8589 		}
8590 		elem = next;
8591 	}
8592 }
8593 
8594 
8595 static void
8596 fcp_update_offline_flags(struct fcp_lun *plun)
8597 {
8598 	struct fcp_port	*pptr = LUN_PORT;
8599 	ASSERT(plun != NULL);
8600 
8601 	mutex_enter(&LUN_TGT->tgt_mutex);
8602 	plun->lun_state |= FCP_LUN_OFFLINE;
8603 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8604 
8605 	mutex_enter(&plun->lun_mutex);
8606 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8607 		dev_info_t *cdip = NULL;
8608 
8609 		mutex_exit(&LUN_TGT->tgt_mutex);
8610 
8611 		if (plun->lun_mpxio == 0) {
8612 			cdip = DIP(plun->lun_cip);
8613 		} else if (plun->lun_cip) {
8614 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8615 		}
8616 
8617 		mutex_exit(&plun->lun_mutex);
8618 		if (cdip) {
8619 			(void) ndi_event_retrieve_cookie(
8620 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8621 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8622 			(void) ndi_event_run_callbacks(
8623 			    pptr->port_ndi_event_hdl, cdip,
8624 			    fcp_remove_eid, NULL);
8625 		}
8626 	} else {
8627 		mutex_exit(&plun->lun_mutex);
8628 		mutex_exit(&LUN_TGT->tgt_mutex);
8629 	}
8630 }
8631 
8632 
8633 /*
8634  * Scan all of the command pkts for this port, moving pkts that
8635  * match our LUN onto our own list (headed by "head")
8636  */
8637 static struct fcp_pkt *
8638 fcp_scan_commands(struct fcp_lun *plun)
8639 {
8640 	struct fcp_port	*pptr = LUN_PORT;
8641 
8642 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8643 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8644 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8645 
8646 	struct fcp_pkt	*head = NULL;	/* head of our list */
8647 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8648 
8649 	int			cmds_found = 0;
8650 
8651 	mutex_enter(&pptr->port_pkt_mutex);
8652 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8653 		struct fcp_lun *tlun =
8654 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8655 
8656 		ncmd = cmd->cmd_next;	/* set next command */
8657 
8658 		/*
8659 		 * if this pkt is for a different LUN  or the
8660 		 * command is sent down, skip it.
8661 		 */
8662 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8663 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8664 			pcmd = cmd;
8665 			continue;
8666 		}
8667 		cmds_found++;
8668 		if (pcmd != NULL) {
8669 			ASSERT(pptr->port_pkt_head != cmd);
8670 			pcmd->cmd_next = cmd->cmd_next;
8671 		} else {
8672 			ASSERT(cmd == pptr->port_pkt_head);
8673 			pptr->port_pkt_head = cmd->cmd_next;
8674 		}
8675 
8676 		if (cmd == pptr->port_pkt_tail) {
8677 			pptr->port_pkt_tail = pcmd;
8678 			if (pcmd) {
8679 				pcmd->cmd_next = NULL;
8680 			}
8681 		}
8682 
8683 		if (head == NULL) {
8684 			head = tail = cmd;
8685 		} else {
8686 			ASSERT(tail != NULL);
8687 
8688 			tail->cmd_next = cmd;
8689 			tail = cmd;
8690 		}
8691 		cmd->cmd_next = NULL;
8692 	}
8693 	mutex_exit(&pptr->port_pkt_mutex);
8694 
8695 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8696 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8697 	    "scan commands: %d cmd(s) found", cmds_found);
8698 
8699 	return (head);
8700 }
8701 
8702 
8703 /*
8704  * Abort all the commands in the command queue
8705  */
8706 static void
8707 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8708 {
8709 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8710 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8711 
8712 	ASSERT(mutex_owned(&pptr->port_mutex));
8713 
8714 	/* scan through the pkts and invalid them */
8715 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8716 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8717 
8718 		ncmd = cmd->cmd_next;
8719 		ASSERT(pkt != NULL);
8720 
8721 		/*
8722 		 * The lun is going to be marked offline. Indicate
8723 		 * the target driver not to requeue or retry this command
8724 		 * as the device is going to be offlined pretty soon.
8725 		 */
8726 		pkt->pkt_reason = CMD_DEV_GONE;
8727 		pkt->pkt_statistics = 0;
8728 		pkt->pkt_state = 0;
8729 
8730 		/* reset cmd flags/state */
8731 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8732 		cmd->cmd_state = FCP_PKT_IDLE;
8733 
8734 		/*
8735 		 * ensure we have a packet completion routine,
8736 		 * then call it.
8737 		 */
8738 		ASSERT(pkt->pkt_comp != NULL);
8739 
8740 		mutex_exit(&pptr->port_mutex);
8741 		fcp_post_callback(cmd);
8742 		mutex_enter(&pptr->port_mutex);
8743 	}
8744 }
8745 
8746 
8747 /*
8748  * the pkt_comp callback for command packets
8749  */
8750 static void
8751 fcp_cmd_callback(fc_packet_t *fpkt)
8752 {
8753 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8754 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8755 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8756 
8757 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8758 
8759 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8760 		cmn_err(CE_PANIC, "Packet already completed %p",
8761 		    (void *)cmd);
8762 	}
8763 
8764 	/*
8765 	 * Watch thread should be freeing the packet, ignore the pkt.
8766 	 */
8767 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8768 		fcp_log(CE_CONT, pptr->port_dip,
8769 		    "!FCP: Pkt completed while aborting\n");
8770 		return;
8771 	}
8772 	cmd->cmd_state = FCP_PKT_IDLE;
8773 
8774 	fcp_complete_pkt(fpkt);
8775 
8776 #ifdef	DEBUG
8777 	mutex_enter(&pptr->port_pkt_mutex);
8778 	pptr->port_npkts--;
8779 	mutex_exit(&pptr->port_pkt_mutex);
8780 #endif /* DEBUG */
8781 
8782 	fcp_post_callback(cmd);
8783 }
8784 
8785 
8786 static void
8787 fcp_complete_pkt(fc_packet_t *fpkt)
8788 {
8789 	int			error = 0;
8790 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8791 	    fpkt->pkt_ulp_private;
8792 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8793 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8794 	struct fcp_lun	*plun;
8795 	struct fcp_tgt	*ptgt;
8796 	struct fcp_rsp		*rsp;
8797 	struct scsi_address	save;
8798 
8799 #ifdef	DEBUG
8800 	save = pkt->pkt_address;
8801 #endif /* DEBUG */
8802 
8803 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8804 
8805 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8806 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8807 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8808 			    sizeof (struct fcp_rsp));
8809 		}
8810 
8811 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8812 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8813 
8814 		pkt->pkt_resid = 0;
8815 
8816 		if (fpkt->pkt_datalen) {
8817 			pkt->pkt_state |= STATE_XFERRED_DATA;
8818 			if (fpkt->pkt_data_resid) {
8819 				error++;
8820 			}
8821 		}
8822 
8823 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8824 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8825 			/*
8826 			 * The next two checks make sure that if there
8827 			 * is no sense data or a valid response and
8828 			 * the command came back with check condition,
8829 			 * the command should be retried.
8830 			 */
8831 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8832 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8833 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8834 				pkt->pkt_resid = cmd->cmd_dmacount;
8835 			}
8836 		}
8837 
8838 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8839 			return;
8840 		}
8841 
8842 		plun = ADDR2LUN(&pkt->pkt_address);
8843 		ptgt = plun->lun_tgt;
8844 		ASSERT(ptgt != NULL);
8845 
8846 		/*
8847 		 * Update the transfer resid, if appropriate
8848 		 */
8849 		if (rsp->fcp_u.fcp_status.resid_over ||
8850 		    rsp->fcp_u.fcp_status.resid_under) {
8851 			pkt->pkt_resid = rsp->fcp_resid;
8852 		}
8853 
8854 		/*
8855 		 * First see if we got a FCP protocol error.
8856 		 */
8857 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8858 			struct fcp_rsp_info	*bep;
8859 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8860 			    sizeof (struct fcp_rsp));
8861 
8862 			if (fcp_validate_fcp_response(rsp, pptr) !=
8863 			    FC_SUCCESS) {
8864 				pkt->pkt_reason = CMD_CMPLT;
8865 				*(pkt->pkt_scbp) = STATUS_CHECK;
8866 
8867 				fcp_log(CE_WARN, pptr->port_dip,
8868 				    "!SCSI command to d_id=0x%x lun=0x%x"
8869 				    " failed, Bad FCP response values:"
8870 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8871 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8872 				    ptgt->tgt_d_id, plun->lun_num,
8873 				    rsp->reserved_0, rsp->reserved_1,
8874 				    rsp->fcp_u.fcp_status.reserved_0,
8875 				    rsp->fcp_u.fcp_status.reserved_1,
8876 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8877 
8878 				return;
8879 			}
8880 
8881 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8882 				FCP_CP_IN(fpkt->pkt_resp +
8883 				    sizeof (struct fcp_rsp), bep,
8884 				    fpkt->pkt_resp_acc,
8885 				    sizeof (struct fcp_rsp_info));
8886 			}
8887 
8888 			if (bep->rsp_code != FCP_NO_FAILURE) {
8889 				child_info_t	*cip;
8890 
8891 				pkt->pkt_reason = CMD_TRAN_ERR;
8892 
8893 				mutex_enter(&plun->lun_mutex);
8894 				cip = plun->lun_cip;
8895 				mutex_exit(&plun->lun_mutex);
8896 
8897 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8898 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8899 				    "FCP response error on cmd=%p"
8900 				    " target=0x%x, cip=%p", cmd,
8901 				    ptgt->tgt_d_id, cip);
8902 			}
8903 		}
8904 
8905 		/*
8906 		 * See if we got a SCSI error with sense data
8907 		 */
8908 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8909 			uchar_t				rqlen;
8910 			caddr_t				sense_from;
8911 			child_info_t			*cip;
8912 			timeout_id_t			tid;
8913 			struct scsi_arq_status		*arq;
8914 			struct scsi_extended_sense	*sense_to;
8915 
8916 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8917 			sense_to = &arq->sts_sensedata;
8918 
8919 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8920 			    sizeof (struct scsi_extended_sense));
8921 
8922 			sense_from = (caddr_t)fpkt->pkt_resp +
8923 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8924 
8925 			if (fcp_validate_fcp_response(rsp, pptr) !=
8926 			    FC_SUCCESS) {
8927 				pkt->pkt_reason = CMD_CMPLT;
8928 				*(pkt->pkt_scbp) = STATUS_CHECK;
8929 
8930 				fcp_log(CE_WARN, pptr->port_dip,
8931 				    "!SCSI command to d_id=0x%x lun=0x%x"
8932 				    " failed, Bad FCP response values:"
8933 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8934 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8935 				    ptgt->tgt_d_id, plun->lun_num,
8936 				    rsp->reserved_0, rsp->reserved_1,
8937 				    rsp->fcp_u.fcp_status.reserved_0,
8938 				    rsp->fcp_u.fcp_status.reserved_1,
8939 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8940 
8941 				return;
8942 			}
8943 
8944 			/*
8945 			 * copy in sense information
8946 			 */
8947 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8948 				FCP_CP_IN(sense_from, sense_to,
8949 				    fpkt->pkt_resp_acc, rqlen);
8950 			} else {
8951 				bcopy(sense_from, sense_to, rqlen);
8952 			}
8953 
8954 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8955 			    (FCP_SENSE_NO_LUN(sense_to))) {
8956 				mutex_enter(&ptgt->tgt_mutex);
8957 				if (ptgt->tgt_tid == NULL) {
8958 					/*
8959 					 * Kick off rediscovery
8960 					 */
8961 					tid = timeout(fcp_reconfigure_luns,
8962 					    (caddr_t)ptgt, drv_usectohz(1));
8963 
8964 					ptgt->tgt_tid = tid;
8965 					ptgt->tgt_state |= FCP_TGT_BUSY;
8966 				}
8967 				mutex_exit(&ptgt->tgt_mutex);
8968 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8969 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8970 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8971 					    "!FCP: Report Lun Has Changed"
8972 					    " target=%x", ptgt->tgt_d_id);
8973 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8974 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8975 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8976 					    "!FCP: LU Not Supported"
8977 					    " target=%x", ptgt->tgt_d_id);
8978 				}
8979 			}
8980 			ASSERT(pkt->pkt_scbp != NULL);
8981 
8982 			pkt->pkt_state |= STATE_ARQ_DONE;
8983 
8984 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8985 
8986 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8987 			arq->sts_rqpkt_reason = 0;
8988 			arq->sts_rqpkt_statistics = 0;
8989 
8990 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8991 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8992 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8993 			    STATE_XFERRED_DATA;
8994 
8995 			mutex_enter(&plun->lun_mutex);
8996 			cip = plun->lun_cip;
8997 			mutex_exit(&plun->lun_mutex);
8998 
8999 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9000 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9001 			    "SCSI Check condition on cmd=%p target=0x%x"
9002 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
9003 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9004 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
9005 			    rsp->fcp_u.fcp_status.scsi_status,
9006 			    sense_to->es_key, sense_to->es_add_code,
9007 			    sense_to->es_qual_code);
9008 		}
9009 	} else {
9010 		plun = ADDR2LUN(&pkt->pkt_address);
9011 		ptgt = plun->lun_tgt;
9012 		ASSERT(ptgt != NULL);
9013 
9014 		/*
9015 		 * Work harder to translate errors into target driver
9016 		 * understandable ones. Note with despair that the target
9017 		 * drivers don't decode pkt_state and pkt_reason exhaustively
9018 		 * They resort to using the big hammer most often, which
9019 		 * may not get fixed in the life time of this driver.
9020 		 */
9021 		pkt->pkt_state = 0;
9022 		pkt->pkt_statistics = 0;
9023 
9024 		switch (fpkt->pkt_state) {
9025 		case FC_PKT_TRAN_ERROR:
9026 			switch (fpkt->pkt_reason) {
9027 			case FC_REASON_OVERRUN:
9028 				pkt->pkt_reason = CMD_CMD_OVR;
9029 				pkt->pkt_statistics |= STAT_ABORTED;
9030 				break;
9031 
9032 			case FC_REASON_XCHG_BSY: {
9033 				caddr_t ptr;
9034 
9035 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9036 
9037 				ptr = (caddr_t)pkt->pkt_scbp;
9038 				if (ptr) {
9039 					*ptr = STATUS_BUSY;
9040 				}
9041 				break;
9042 			}
9043 
9044 			case FC_REASON_ABORTED:
9045 				pkt->pkt_reason = CMD_TRAN_ERR;
9046 				pkt->pkt_statistics |= STAT_ABORTED;
9047 				break;
9048 
9049 			case FC_REASON_ABORT_FAILED:
9050 				pkt->pkt_reason = CMD_ABORT_FAIL;
9051 				break;
9052 
9053 			case FC_REASON_NO_SEQ_INIT:
9054 			case FC_REASON_CRC_ERROR:
9055 				pkt->pkt_reason = CMD_TRAN_ERR;
9056 				pkt->pkt_statistics |= STAT_ABORTED;
9057 				break;
9058 			default:
9059 				pkt->pkt_reason = CMD_TRAN_ERR;
9060 				break;
9061 			}
9062 			break;
9063 
9064 		case FC_PKT_PORT_OFFLINE: {
9065 			dev_info_t	*cdip = NULL;
9066 			caddr_t		ptr;
9067 
9068 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9069 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9070 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
9071 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9072 				    ptgt->tgt_d_id);
9073 			}
9074 
9075 			mutex_enter(&plun->lun_mutex);
9076 			if (plun->lun_mpxio == 0) {
9077 				cdip = DIP(plun->lun_cip);
9078 			} else if (plun->lun_cip) {
9079 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9080 			}
9081 
9082 			mutex_exit(&plun->lun_mutex);
9083 
9084 			if (cdip) {
9085 				(void) ndi_event_retrieve_cookie(
9086 				    pptr->port_ndi_event_hdl, cdip,
9087 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9088 				    NDI_EVENT_NOPASS);
9089 				(void) ndi_event_run_callbacks(
9090 				    pptr->port_ndi_event_hdl, cdip,
9091 				    fcp_remove_eid, NULL);
9092 			}
9093 
9094 			/*
9095 			 * If the link goes off-line for a lip,
9096 			 * this will cause a error to the ST SG
9097 			 * SGEN drivers. By setting BUSY we will
9098 			 * give the drivers the chance to retry
9099 			 * before it blows of the job. ST will
9100 			 * remember how many times it has retried.
9101 			 */
9102 
9103 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9104 			    (plun->lun_type == DTYPE_CHANGER)) {
9105 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9106 				ptr = (caddr_t)pkt->pkt_scbp;
9107 				if (ptr) {
9108 					*ptr = STATUS_BUSY;
9109 				}
9110 			} else {
9111 				pkt->pkt_reason = CMD_TRAN_ERR;
9112 				pkt->pkt_statistics |= STAT_BUS_RESET;
9113 			}
9114 			break;
9115 		}
9116 
9117 		case FC_PKT_TRAN_BSY:
9118 			/*
9119 			 * Use the ssd Qfull handling here.
9120 			 */
9121 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9122 			pkt->pkt_state = STATE_GOT_BUS;
9123 			break;
9124 
9125 		case FC_PKT_TIMEOUT:
9126 			pkt->pkt_reason = CMD_TIMEOUT;
9127 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9128 				pkt->pkt_statistics |= STAT_TIMEOUT;
9129 			} else {
9130 				pkt->pkt_statistics |= STAT_ABORTED;
9131 			}
9132 			break;
9133 
9134 		case FC_PKT_LOCAL_RJT:
9135 			switch (fpkt->pkt_reason) {
9136 			case FC_REASON_OFFLINE: {
9137 				dev_info_t	*cdip = NULL;
9138 
9139 				mutex_enter(&plun->lun_mutex);
9140 				if (plun->lun_mpxio == 0) {
9141 					cdip = DIP(plun->lun_cip);
9142 				} else if (plun->lun_cip) {
9143 					cdip = mdi_pi_get_client(
9144 					    PIP(plun->lun_cip));
9145 				}
9146 				mutex_exit(&plun->lun_mutex);
9147 
9148 				if (cdip) {
9149 					(void) ndi_event_retrieve_cookie(
9150 					    pptr->port_ndi_event_hdl, cdip,
9151 					    FCAL_REMOVE_EVENT,
9152 					    &fcp_remove_eid,
9153 					    NDI_EVENT_NOPASS);
9154 					(void) ndi_event_run_callbacks(
9155 					    pptr->port_ndi_event_hdl,
9156 					    cdip, fcp_remove_eid, NULL);
9157 				}
9158 
9159 				pkt->pkt_reason = CMD_TRAN_ERR;
9160 				pkt->pkt_statistics |= STAT_BUS_RESET;
9161 
9162 				break;
9163 			}
9164 
9165 			case FC_REASON_NOMEM:
9166 			case FC_REASON_QFULL: {
9167 				caddr_t ptr;
9168 
9169 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9170 				ptr = (caddr_t)pkt->pkt_scbp;
9171 				if (ptr) {
9172 					*ptr = STATUS_BUSY;
9173 				}
9174 				break;
9175 			}
9176 
9177 			case FC_REASON_DMA_ERROR:
9178 				pkt->pkt_reason = CMD_DMA_DERR;
9179 				pkt->pkt_statistics |= STAT_ABORTED;
9180 				break;
9181 
9182 			case FC_REASON_CRC_ERROR:
9183 			case FC_REASON_UNDERRUN: {
9184 				uchar_t		status;
9185 				/*
9186 				 * Work around for Bugid: 4240945.
9187 				 * IB on A5k doesn't set the Underrun bit
9188 				 * in the fcp status, when it is transferring
9189 				 * less than requested amount of data. Work
9190 				 * around the ses problem to keep luxadm
9191 				 * happy till ibfirmware is fixed.
9192 				 */
9193 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9194 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9195 					    fpkt->pkt_resp_acc,
9196 					    sizeof (struct fcp_rsp));
9197 				}
9198 				status = rsp->fcp_u.fcp_status.scsi_status;
9199 				if (((plun->lun_type & DTYPE_MASK) ==
9200 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9201 					pkt->pkt_reason = CMD_CMPLT;
9202 					*pkt->pkt_scbp = status;
9203 					pkt->pkt_resid = 0;
9204 				} else {
9205 					pkt->pkt_reason = CMD_TRAN_ERR;
9206 					pkt->pkt_statistics |= STAT_ABORTED;
9207 				}
9208 				break;
9209 			}
9210 
9211 			case FC_REASON_NO_CONNECTION:
9212 			case FC_REASON_UNSUPPORTED:
9213 			case FC_REASON_ILLEGAL_REQ:
9214 			case FC_REASON_BAD_SID:
9215 			case FC_REASON_DIAG_BUSY:
9216 			case FC_REASON_FCAL_OPN_FAIL:
9217 			case FC_REASON_BAD_XID:
9218 			default:
9219 				pkt->pkt_reason = CMD_TRAN_ERR;
9220 				pkt->pkt_statistics |= STAT_ABORTED;
9221 				break;
9222 
9223 			}
9224 			break;
9225 
9226 		case FC_PKT_NPORT_RJT:
9227 		case FC_PKT_FABRIC_RJT:
9228 		case FC_PKT_NPORT_BSY:
9229 		case FC_PKT_FABRIC_BSY:
9230 		default:
9231 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9232 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9233 			    "FC Status 0x%x, reason 0x%x",
9234 			    fpkt->pkt_state, fpkt->pkt_reason);
9235 			pkt->pkt_reason = CMD_TRAN_ERR;
9236 			pkt->pkt_statistics |= STAT_ABORTED;
9237 			break;
9238 		}
9239 
9240 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9241 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9242 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9243 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9244 		    fpkt->pkt_reason);
9245 	}
9246 
9247 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9248 }
9249 
9250 
9251 static int
9252 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9253 {
9254 	if (rsp->reserved_0 || rsp->reserved_1 ||
9255 	    rsp->fcp_u.fcp_status.reserved_0 ||
9256 	    rsp->fcp_u.fcp_status.reserved_1) {
9257 		/*
9258 		 * These reserved fields should ideally be zero. FCP-2 does say
9259 		 * that the recipient need not check for reserved fields to be
9260 		 * zero. If they are not zero, we will not make a fuss about it
9261 		 * - just log it (in debug to both trace buffer and messages
9262 		 * file and to trace buffer only in non-debug) and move on.
9263 		 *
9264 		 * Non-zero reserved fields were seen with minnows.
9265 		 *
9266 		 * qlc takes care of some of this but we cannot assume that all
9267 		 * FCAs will do so.
9268 		 */
9269 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9270 		    FCP_BUF_LEVEL_5, 0,
9271 		    "Got fcp response packet with non-zero reserved fields "
9272 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9273 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9274 		    rsp->reserved_0, rsp->reserved_1,
9275 		    rsp->fcp_u.fcp_status.reserved_0,
9276 		    rsp->fcp_u.fcp_status.reserved_1);
9277 	}
9278 
9279 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9280 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9281 		return (FC_FAILURE);
9282 	}
9283 
9284 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9285 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9286 	    sizeof (struct fcp_rsp))) {
9287 		return (FC_FAILURE);
9288 	}
9289 
9290 	return (FC_SUCCESS);
9291 }
9292 
9293 
9294 /*
9295  * This is called when there is a change the in device state. The case we're
9296  * handling here is, if the d_id s does not match, offline this tgt and online
9297  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9298  * port_mutex held.
9299  */
9300 static int
9301 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9302     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9303 {
9304 	ASSERT(mutex_owned(&pptr->port_mutex));
9305 
9306 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9307 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9308 	    "Starting fcp_device_changed...");
9309 
9310 	/*
9311 	 * The two cases where the port_device_changed is called is
9312 	 * either it changes it's d_id or it's hard address.
9313 	 */
9314 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9315 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9316 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9317 
9318 		/* offline this target */
9319 		mutex_enter(&ptgt->tgt_mutex);
9320 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9321 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9322 			    0, 1, NDI_DEVI_REMOVE);
9323 		}
9324 		mutex_exit(&ptgt->tgt_mutex);
9325 
9326 		fcp_log(CE_NOTE, pptr->port_dip,
9327 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9328 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9329 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9330 		    map_entry->map_hard_addr.hard_addr);
9331 	}
9332 
9333 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9334 	    link_cnt, tgt_cnt, cause));
9335 }
9336 
9337 /*
9338  *     Function: fcp_alloc_lun
9339  *
9340  *  Description: Creates a new lun structure and adds it to the list
9341  *		 of luns of the target.
9342  *
9343  *     Argument: ptgt		Target the lun will belong to.
9344  *
9345  * Return Value: NULL		Failed
9346  *		 Not NULL	Succeeded
9347  *
9348  *	Context: Kernel context
9349  */
9350 static struct fcp_lun *
9351 fcp_alloc_lun(struct fcp_tgt *ptgt)
9352 {
9353 	struct fcp_lun *plun;
9354 
9355 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9356 	if (plun != NULL) {
9357 		/*
9358 		 * Initialize the mutex before putting in the target list
9359 		 * especially before releasing the target mutex.
9360 		 */
9361 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9362 		plun->lun_tgt = ptgt;
9363 
9364 		mutex_enter(&ptgt->tgt_mutex);
9365 		plun->lun_next = ptgt->tgt_lun;
9366 		ptgt->tgt_lun = plun;
9367 		plun->lun_old_guid = NULL;
9368 		plun->lun_old_guid_size = 0;
9369 		mutex_exit(&ptgt->tgt_mutex);
9370 	}
9371 
9372 	return (plun);
9373 }
9374 
9375 /*
9376  *     Function: fcp_dealloc_lun
9377  *
9378  *  Description: Frees the LUN structure passed by the caller.
9379  *
9380  *     Argument: plun		LUN structure to free.
9381  *
9382  * Return Value: None
9383  *
9384  *	Context: Kernel context.
9385  */
9386 static void
9387 fcp_dealloc_lun(struct fcp_lun *plun)
9388 {
9389 	mutex_enter(&plun->lun_mutex);
9390 	if (plun->lun_cip) {
9391 		fcp_remove_child(plun);
9392 	}
9393 	mutex_exit(&plun->lun_mutex);
9394 
9395 	mutex_destroy(&plun->lun_mutex);
9396 	if (plun->lun_guid) {
9397 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9398 	}
9399 	if (plun->lun_old_guid) {
9400 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9401 	}
9402 	kmem_free(plun, sizeof (*plun));
9403 }
9404 
9405 /*
9406  *     Function: fcp_alloc_tgt
9407  *
9408  *  Description: Creates a new target structure and adds it to the port
9409  *		 hash list.
9410  *
9411  *     Argument: pptr		fcp port structure
9412  *		 *map_entry	entry describing the target to create
9413  *		 link_cnt	Link state change counter
9414  *
9415  * Return Value: NULL		Failed
9416  *		 Not NULL	Succeeded
9417  *
9418  *	Context: Kernel context.
9419  */
9420 static struct fcp_tgt *
9421 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9422 {
9423 	int			hash;
9424 	uchar_t			*wwn;
9425 	struct fcp_tgt	*ptgt;
9426 
9427 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9428 	if (ptgt != NULL) {
9429 		mutex_enter(&pptr->port_mutex);
9430 		if (link_cnt != pptr->port_link_cnt) {
9431 			/*
9432 			 * oh oh -- another link reset
9433 			 * in progress -- give up
9434 			 */
9435 			mutex_exit(&pptr->port_mutex);
9436 			kmem_free(ptgt, sizeof (*ptgt));
9437 			ptgt = NULL;
9438 		} else {
9439 			/*
9440 			 * initialize the mutex before putting in the port
9441 			 * wwn list, especially before releasing the port
9442 			 * mutex.
9443 			 */
9444 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9445 
9446 			/* add new target entry to the port's hash list */
9447 			wwn = (uchar_t *)&map_entry->map_pwwn;
9448 			hash = FCP_HASH(wwn);
9449 
9450 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9451 			pptr->port_tgt_hash_table[hash] = ptgt;
9452 
9453 			/* save cross-ptr */
9454 			ptgt->tgt_port = pptr;
9455 
9456 			ptgt->tgt_change_cnt = 1;
9457 
9458 			/* initialize the target manual_config_only flag */
9459 			if (fcp_enable_auto_configuration) {
9460 				ptgt->tgt_manual_config_only = 0;
9461 			} else {
9462 				ptgt->tgt_manual_config_only = 1;
9463 			}
9464 
9465 			mutex_exit(&pptr->port_mutex);
9466 		}
9467 	}
9468 
9469 	return (ptgt);
9470 }
9471 
9472 /*
9473  *     Function: fcp_dealloc_tgt
9474  *
9475  *  Description: Frees the target structure passed by the caller.
9476  *
9477  *     Argument: ptgt		Target structure to free.
9478  *
9479  * Return Value: None
9480  *
9481  *	Context: Kernel context.
9482  */
9483 static void
9484 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9485 {
9486 	mutex_destroy(&ptgt->tgt_mutex);
9487 	kmem_free(ptgt, sizeof (*ptgt));
9488 }
9489 
9490 
9491 /*
9492  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9493  *
9494  *	Device discovery commands will not be retried for-ever as
9495  *	this will have repercussions on other devices that need to
9496  *	be submitted to the hotplug thread. After a quick glance
9497  *	at the SCSI-3 spec, it was found that the spec doesn't
9498  *	mandate a forever retry, rather recommends a delayed retry.
9499  *
9500  *	Since Photon IB is single threaded, STATUS_BUSY is common
9501  *	in a 4+initiator environment. Make sure the total time
9502  *	spent on retries (including command timeout) does not
9503  *	60 seconds
9504  */
9505 static void
9506 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9507 {
9508 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9509 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9510 
9511 	mutex_enter(&pptr->port_mutex);
9512 	mutex_enter(&ptgt->tgt_mutex);
9513 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9514 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9515 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9516 		    "fcp_queue_ipkt,1:state change occured"
9517 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9518 		mutex_exit(&ptgt->tgt_mutex);
9519 		mutex_exit(&pptr->port_mutex);
9520 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9521 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9522 		fcp_icmd_free(pptr, icmd);
9523 		return;
9524 	}
9525 	mutex_exit(&ptgt->tgt_mutex);
9526 
9527 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9528 
9529 	if (pptr->port_ipkt_list != NULL) {
9530 		/* add pkt to front of doubly-linked list */
9531 		pptr->port_ipkt_list->ipkt_prev = icmd;
9532 		icmd->ipkt_next = pptr->port_ipkt_list;
9533 		pptr->port_ipkt_list = icmd;
9534 		icmd->ipkt_prev = NULL;
9535 	} else {
9536 		/* this is the first/only pkt on the list */
9537 		pptr->port_ipkt_list = icmd;
9538 		icmd->ipkt_next = NULL;
9539 		icmd->ipkt_prev = NULL;
9540 	}
9541 	mutex_exit(&pptr->port_mutex);
9542 }
9543 
9544 /*
9545  *     Function: fcp_transport
9546  *
9547  *  Description: This function submits the Fibre Channel packet to the transort
9548  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9549  *		 fails the submission, the treatment depends on the value of
9550  *		 the variable internal.
9551  *
9552  *     Argument: port_handle	fp/fctl port handle.
9553  *		 *fpkt		Packet to submit to the transport layer.
9554  *		 internal	Not zero when it's an internal packet.
9555  *
9556  * Return Value: FC_TRAN_BUSY
9557  *		 FC_STATEC_BUSY
9558  *		 FC_OFFLINE
9559  *		 FC_LOGINREQ
9560  *		 FC_DEVICE_BUSY
9561  *		 FC_SUCCESS
9562  */
9563 static int
9564 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9565 {
9566 	int	rval;
9567 
9568 	rval = fc_ulp_transport(port_handle, fpkt);
9569 	if (rval == FC_SUCCESS) {
9570 		return (rval);
9571 	}
9572 
9573 	/*
9574 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9575 	 * a command, if the underlying modules see that there is a state
9576 	 * change, or if a port is OFFLINE, that means, that state change
9577 	 * hasn't reached FCP yet, so re-queue the command for deferred
9578 	 * submission.
9579 	 */
9580 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9581 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9582 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9583 		/*
9584 		 * Defer packet re-submission. Life hang is possible on
9585 		 * internal commands if the port driver sends FC_STATEC_BUSY
9586 		 * for ever, but that shouldn't happen in a good environment.
9587 		 * Limiting re-transport for internal commands is probably a
9588 		 * good idea..
9589 		 * A race condition can happen when a port sees barrage of
9590 		 * link transitions offline to online. If the FCTL has
9591 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9592 		 * internal commands should be queued to do the discovery.
9593 		 * The race condition is when an online comes and FCP starts
9594 		 * its internal discovery and the link goes offline. It is
9595 		 * possible that the statec_callback has not reached FCP
9596 		 * and FCP is carrying on with its internal discovery.
9597 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9598 		 * that the link has gone offline. At this point FCP should
9599 		 * drop all the internal commands and wait for the
9600 		 * statec_callback. It will be facilitated by incrementing
9601 		 * port_link_cnt.
9602 		 *
9603 		 * For external commands, the (FC)pkt_timeout is decremented
9604 		 * by the QUEUE Delay added by our driver, Care is taken to
9605 		 * ensure that it doesn't become zero (zero means no timeout)
9606 		 * If the time expires right inside driver queue itself,
9607 		 * the watch thread will return it to the original caller
9608 		 * indicating that the command has timed-out.
9609 		 */
9610 		if (internal) {
9611 			char			*op;
9612 			struct fcp_ipkt	*icmd;
9613 
9614 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9615 			switch (icmd->ipkt_opcode) {
9616 			case SCMD_REPORT_LUN:
9617 				op = "REPORT LUN";
9618 				break;
9619 
9620 			case SCMD_INQUIRY:
9621 				op = "INQUIRY";
9622 				break;
9623 
9624 			case SCMD_INQUIRY_PAGE83:
9625 				op = "INQUIRY-83";
9626 				break;
9627 
9628 			default:
9629 				op = "Internal SCSI COMMAND";
9630 				break;
9631 			}
9632 
9633 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9634 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9635 				rval = FC_SUCCESS;
9636 			}
9637 		} else {
9638 			struct fcp_pkt *cmd;
9639 			struct fcp_port *pptr;
9640 
9641 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9642 			cmd->cmd_state = FCP_PKT_IDLE;
9643 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9644 
9645 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9646 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9647 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9648 				    "fcp_transport: xport busy for pkt %p",
9649 				    cmd->cmd_pkt);
9650 				rval = FC_TRAN_BUSY;
9651 			} else {
9652 				fcp_queue_pkt(pptr, cmd);
9653 				rval = FC_SUCCESS;
9654 			}
9655 		}
9656 	}
9657 
9658 	return (rval);
9659 }
9660 
9661 /*VARARGS3*/
9662 static void
9663 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9664 {
9665 	char		buf[256];
9666 	va_list		ap;
9667 
9668 	if (dip == NULL) {
9669 		dip = fcp_global_dip;
9670 	}
9671 
9672 	va_start(ap, fmt);
9673 	(void) vsprintf(buf, fmt, ap);
9674 	va_end(ap);
9675 
9676 	scsi_log(dip, "fcp", level, buf);
9677 }
9678 
9679 /*
9680  * This function retries NS registry of FC4 type.
9681  * It assumes that fcp_mutex is held.
9682  * The function does nothing if topology is not fabric
9683  * So, the topology has to be set before this function can be called
9684  */
9685 static void
9686 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9687 {
9688 	int	rval;
9689 
9690 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9691 
9692 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9693 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9694 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9695 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9696 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9697 		}
9698 		return;
9699 	}
9700 	mutex_exit(&pptr->port_mutex);
9701 	rval = fcp_do_ns_registry(pptr, s_id);
9702 	mutex_enter(&pptr->port_mutex);
9703 
9704 	if (rval == 0) {
9705 		/* Registry successful. Reset flag */
9706 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9707 	}
9708 }
9709 
9710 /*
9711  * This function registers the ULP with the switch by calling transport i/f
9712  */
9713 static int
9714 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9715 {
9716 	fc_ns_cmd_t		ns_cmd;
9717 	ns_rfc_type_t		rfc;
9718 	uint32_t		types[8];
9719 
9720 	/*
9721 	 * Prepare the Name server structure to
9722 	 * register with the transport in case of
9723 	 * Fabric configuration.
9724 	 */
9725 	bzero(&rfc, sizeof (rfc));
9726 	bzero(types, sizeof (types));
9727 
9728 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9729 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9730 
9731 	rfc.rfc_port_id.port_id = s_id;
9732 	bcopy(types, rfc.rfc_types, sizeof (types));
9733 
9734 	ns_cmd.ns_flags = 0;
9735 	ns_cmd.ns_cmd = NS_RFT_ID;
9736 	ns_cmd.ns_req_len = sizeof (rfc);
9737 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9738 	ns_cmd.ns_resp_len = 0;
9739 	ns_cmd.ns_resp_payload = NULL;
9740 
9741 	/*
9742 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9743 	 */
9744 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9745 		fcp_log(CE_WARN, pptr->port_dip,
9746 		    "!ns_registry: failed name server registration");
9747 		return (1);
9748 	}
9749 
9750 	return (0);
9751 }
9752 
9753 /*
9754  *     Function: fcp_handle_port_attach
9755  *
9756  *  Description: This function is called from fcp_port_attach() to attach a
9757  *		 new port. This routine does the following:
9758  *
9759  *		1) Allocates an fcp_port structure and initializes it.
9760  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9761  *		   server.
9762  *		3) Kicks off the enumeration of the targets/luns visible
9763  *		   through this new port.  That is done by calling
9764  *		   fcp_statec_callback() if the port is online.
9765  *
9766  *     Argument: ulph		fp/fctl port handle.
9767  *		 *pinfo		Port information.
9768  *		 s_id		Port ID.
9769  *		 instance	Device instance number for the local port
9770  *				(returned by ddi_get_instance()).
9771  *
9772  * Return Value: DDI_SUCCESS
9773  *		 DDI_FAILURE
9774  *
9775  *	Context: User and Kernel context.
9776  */
9777 /*ARGSUSED*/
9778 int
9779 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9780     uint32_t s_id, int instance)
9781 {
9782 	int			res = DDI_FAILURE;
9783 	scsi_hba_tran_t		*tran;
9784 	int			mutex_initted = FALSE;
9785 	int			hba_attached = FALSE;
9786 	int			soft_state_linked = FALSE;
9787 	int			event_bind = FALSE;
9788 	struct fcp_port		*pptr;
9789 	fc_portmap_t		*tmp_list = NULL;
9790 	uint32_t		max_cnt, alloc_cnt;
9791 	uchar_t			*boot_wwn = NULL;
9792 	uint_t			nbytes;
9793 	int			manual_cfg;
9794 
9795 	/*
9796 	 * this port instance attaching for the first time (or after
9797 	 * being detached before)
9798 	 */
9799 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9800 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9801 
9802 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9803 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9804 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9805 		    instance);
9806 		return (res);
9807 	}
9808 
9809 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9810 		/* this shouldn't happen */
9811 		ddi_soft_state_free(fcp_softstate, instance);
9812 		cmn_err(CE_WARN, "fcp: bad soft state");
9813 		return (res);
9814 	}
9815 
9816 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9817 
9818 	/*
9819 	 * Make a copy of ulp_port_info as fctl allocates
9820 	 * a temp struct.
9821 	 */
9822 	(void) fcp_cp_pinfo(pptr, pinfo);
9823 
9824 	/*
9825 	 * Check for manual_configuration_only property.
9826 	 * Enable manual configurtion if the property is
9827 	 * set to 1, otherwise disable manual configuration.
9828 	 */
9829 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9830 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9831 	    MANUAL_CFG_ONLY,
9832 	    -1)) != -1) {
9833 		if (manual_cfg == 1) {
9834 			char	*pathname;
9835 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9836 			(void) ddi_pathname(pptr->port_dip, pathname);
9837 			cmn_err(CE_NOTE,
9838 			    "%s (%s%d) %s is enabled via %s.conf.",
9839 			    pathname,
9840 			    ddi_driver_name(pptr->port_dip),
9841 			    ddi_get_instance(pptr->port_dip),
9842 			    MANUAL_CFG_ONLY,
9843 			    ddi_driver_name(pptr->port_dip));
9844 			fcp_enable_auto_configuration = 0;
9845 			kmem_free(pathname, MAXPATHLEN);
9846 		}
9847 	}
9848 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9849 	pptr->port_link_cnt = 1;
9850 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9851 	pptr->port_id = s_id;
9852 	pptr->port_instance = instance;
9853 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9854 	pptr->port_state = FCP_STATE_INIT;
9855 	if (pinfo->port_acc_attr == NULL) {
9856 		/*
9857 		 * The corresponding FCA doesn't support DMA at all
9858 		 */
9859 		pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9860 	}
9861 
9862 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9863 
9864 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9865 		/*
9866 		 * If FCA supports DMA in SCSI data phase, we need preallocate
9867 		 * dma cookie, so stash the cookie size
9868 		 */
9869 		pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9870 		    pptr->port_data_dma_attr.dma_attr_sgllen;
9871 	}
9872 
9873 	/*
9874 	 * The two mutexes of fcp_port are initialized.	 The variable
9875 	 * mutex_initted is incremented to remember that fact.	That variable
9876 	 * is checked when the routine fails and the mutexes have to be
9877 	 * destroyed.
9878 	 */
9879 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9880 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9881 	mutex_initted++;
9882 
9883 	/*
9884 	 * The SCSI tran structure is allocate and initialized now.
9885 	 */
9886 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9887 		fcp_log(CE_WARN, pptr->port_dip,
9888 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9889 		goto fail;
9890 	}
9891 
9892 	/* link in the transport structure then fill it in */
9893 	pptr->port_tran = tran;
9894 	tran->tran_hba_private		= pptr;
9895 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9896 	tran->tran_tgt_probe		= NULL;
9897 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9898 	tran->tran_start		= fcp_scsi_start;
9899 	tran->tran_reset		= fcp_scsi_reset;
9900 	tran->tran_abort		= fcp_scsi_abort;
9901 	tran->tran_getcap		= fcp_scsi_getcap;
9902 	tran->tran_setcap		= fcp_scsi_setcap;
9903 	tran->tran_init_pkt		= NULL;
9904 	tran->tran_destroy_pkt		= NULL;
9905 	tran->tran_dmafree		= NULL;
9906 	tran->tran_sync_pkt		= NULL;
9907 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9908 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9909 	tran->tran_get_name		= fcp_scsi_get_name;
9910 	tran->tran_clear_aca		= NULL;
9911 	tran->tran_clear_task_set	= NULL;
9912 	tran->tran_terminate_task	= NULL;
9913 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9914 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9915 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9916 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9917 	tran->tran_quiesce		= NULL;
9918 	tran->tran_unquiesce		= NULL;
9919 	tran->tran_bus_reset		= NULL;
9920 	tran->tran_bus_config		= fcp_scsi_bus_config;
9921 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9922 	tran->tran_bus_power		= NULL;
9923 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9924 
9925 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9926 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9927 	tran->tran_setup_pkt		= fcp_pkt_setup;
9928 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9929 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9930 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9931 	if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9932 		/*
9933 		 * If FCA don't support DMA, then we use different vectors to
9934 		 * minimize the effects on DMA code flow path
9935 		 */
9936 		tran->tran_start	   = fcp_pseudo_start;
9937 		tran->tran_init_pkt	   = fcp_pseudo_init_pkt;
9938 		tran->tran_destroy_pkt	   = fcp_pseudo_destroy_pkt;
9939 		tran->tran_sync_pkt	   = fcp_pseudo_sync_pkt;
9940 		tran->tran_dmafree	   = fcp_pseudo_dmafree;
9941 		tran->tran_setup_pkt	   = NULL;
9942 		tran->tran_teardown_pkt	   = NULL;
9943 		tran->tran_pkt_constructor = NULL;
9944 		tran->tran_pkt_destructor  = NULL;
9945 		pptr->port_data_dma_attr   = pseudo_fca_dma_attr;
9946 	}
9947 
9948 	/*
9949 	 * Allocate an ndi event handle
9950 	 */
9951 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9952 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9953 
9954 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9955 	    sizeof (fcp_ndi_event_defs));
9956 
9957 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9958 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9959 
9960 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9961 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9962 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9963 
9964 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9965 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9966 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9967 		goto fail;
9968 	}
9969 	event_bind++;	/* Checked in fail case */
9970 
9971 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9972 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9973 	    != DDI_SUCCESS) {
9974 		fcp_log(CE_WARN, pptr->port_dip,
9975 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9976 		goto fail;
9977 	}
9978 	hba_attached++;	/* Checked in fail case */
9979 
9980 	pptr->port_mpxio = 0;
9981 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9982 	    MDI_SUCCESS) {
9983 		pptr->port_mpxio++;
9984 	}
9985 
9986 	/*
9987 	 * The following code is putting the new port structure in the global
9988 	 * list of ports and, if it is the first port to attach, it start the
9989 	 * fcp_watchdog_tick.
9990 	 *
9991 	 * Why put this new port in the global before we are done attaching it?
9992 	 * We are actually making the structure globally known before we are
9993 	 * done attaching it.  The reason for that is: because of the code that
9994 	 * follows.  At this point the resources to handle the port are
9995 	 * allocated.  This function is now going to do the following:
9996 	 *
9997 	 *   1) It is going to try to register with the name server advertizing
9998 	 *	the new FCP capability of the port.
9999 	 *   2) It is going to play the role of the fp/fctl layer by building
10000 	 *	a list of worlwide names reachable through this port and call
10001 	 *	itself on fcp_statec_callback().  That requires the port to
10002 	 *	be part of the global list.
10003 	 */
10004 	mutex_enter(&fcp_global_mutex);
10005 	if (fcp_port_head == NULL) {
10006 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10007 	}
10008 	pptr->port_next = fcp_port_head;
10009 	fcp_port_head = pptr;
10010 	soft_state_linked++;
10011 
10012 	if (fcp_watchdog_init++ == 0) {
10013 		fcp_watchdog_tick = fcp_watchdog_timeout *
10014 		    drv_usectohz(1000000);
10015 		fcp_watchdog_id = timeout(fcp_watch, NULL,
10016 		    fcp_watchdog_tick);
10017 	}
10018 	mutex_exit(&fcp_global_mutex);
10019 
10020 	/*
10021 	 * Here an attempt is made to register with the name server, the new
10022 	 * FCP capability.  That is done using an RTF_ID to the name server.
10023 	 * It is done synchronously.  The function fcp_do_ns_registry()
10024 	 * doesn't return till the name server responded.
10025 	 * On failures, just ignore it for now and it will get retried during
10026 	 * state change callbacks. We'll set a flag to show this failure
10027 	 */
10028 	if (fcp_do_ns_registry(pptr, s_id)) {
10029 		mutex_enter(&pptr->port_mutex);
10030 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10031 		mutex_exit(&pptr->port_mutex);
10032 	} else {
10033 		mutex_enter(&pptr->port_mutex);
10034 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10035 		mutex_exit(&pptr->port_mutex);
10036 	}
10037 
10038 	/*
10039 	 * Lookup for boot WWN property
10040 	 */
10041 	if (modrootloaded != 1) {
10042 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10043 		    ddi_get_parent(pinfo->port_dip),
10044 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10045 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10046 		    (nbytes == FC_WWN_SIZE)) {
10047 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10048 		}
10049 		if (boot_wwn) {
10050 			ddi_prop_free(boot_wwn);
10051 		}
10052 	}
10053 
10054 	/*
10055 	 * Handle various topologies and link states.
10056 	 */
10057 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10058 	case FC_STATE_OFFLINE:
10059 
10060 		/*
10061 		 * we're attaching a port where the link is offline
10062 		 *
10063 		 * Wait for ONLINE, at which time a state
10064 		 * change will cause a statec_callback
10065 		 *
10066 		 * in the mean time, do not do anything
10067 		 */
10068 		res = DDI_SUCCESS;
10069 		pptr->port_state |= FCP_STATE_OFFLINE;
10070 		break;
10071 
10072 	case FC_STATE_ONLINE: {
10073 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
10074 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10075 			res = DDI_SUCCESS;
10076 			break;
10077 		}
10078 		/*
10079 		 * discover devices and create nodes (a private
10080 		 * loop or point-to-point)
10081 		 */
10082 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10083 
10084 		/*
10085 		 * At this point we are going to build a list of all the ports
10086 		 * that	can be reached through this local port.	 It looks like
10087 		 * we cannot handle more than FCP_MAX_DEVICES per local port
10088 		 * (128).
10089 		 */
10090 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10091 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10092 		    KM_NOSLEEP)) == NULL) {
10093 			fcp_log(CE_WARN, pptr->port_dip,
10094 			    "!fcp%d: failed to allocate portmap",
10095 			    instance);
10096 			goto fail;
10097 		}
10098 
10099 		/*
10100 		 * fc_ulp_getportmap() is going to provide us with the list of
10101 		 * remote ports in the buffer we just allocated.  The way the
10102 		 * list is going to be retrieved depends on the topology.
10103 		 * However, if we are connected to a Fabric, a name server
10104 		 * request may be sent to get the list of FCP capable ports.
10105 		 * It should be noted that is the case the request is
10106 		 * synchronous.	 This means we are stuck here till the name
10107 		 * server replies.  A lot of things can change during that time
10108 		 * and including, may be, being called on
10109 		 * fcp_statec_callback() for different reasons. I'm not sure
10110 		 * the code can handle that.
10111 		 */
10112 		max_cnt = FCP_MAX_DEVICES;
10113 		alloc_cnt = FCP_MAX_DEVICES;
10114 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10115 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10116 		    FC_SUCCESS) {
10117 			caddr_t msg;
10118 
10119 			(void) fc_ulp_error(res, &msg);
10120 
10121 			/*
10122 			 * this	 just means the transport is
10123 			 * busy perhaps building a portmap so,
10124 			 * for now, succeed this port attach
10125 			 * when the transport has a new map,
10126 			 * it'll send us a state change then
10127 			 */
10128 			fcp_log(CE_WARN, pptr->port_dip,
10129 			    "!failed to get port map : %s", msg);
10130 
10131 			res = DDI_SUCCESS;
10132 			break;	/* go return result */
10133 		}
10134 		if (max_cnt > alloc_cnt) {
10135 			alloc_cnt = max_cnt;
10136 		}
10137 
10138 		/*
10139 		 * We are now going to call fcp_statec_callback() ourselves.
10140 		 * By issuing this call we are trying to kick off the enumera-
10141 		 * tion process.
10142 		 */
10143 		/*
10144 		 * let the state change callback do the SCSI device
10145 		 * discovery and create the devinfos
10146 		 */
10147 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10148 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10149 		    max_cnt, pptr->port_id);
10150 
10151 		res = DDI_SUCCESS;
10152 		break;
10153 	}
10154 
10155 	default:
10156 		/* unknown port state */
10157 		fcp_log(CE_WARN, pptr->port_dip,
10158 		    "!fcp%d: invalid port state at attach=0x%x",
10159 		    instance, pptr->port_phys_state);
10160 
10161 		mutex_enter(&pptr->port_mutex);
10162 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10163 		mutex_exit(&pptr->port_mutex);
10164 
10165 		res = DDI_SUCCESS;
10166 		break;
10167 	}
10168 
10169 	/* free temp list if used */
10170 	if (tmp_list != NULL) {
10171 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10172 	}
10173 
10174 	/* note the attach time */
10175 	pptr->port_attach_time = ddi_get_lbolt64();
10176 
10177 	/* all done */
10178 	return (res);
10179 
10180 	/* a failure we have to clean up after */
10181 fail:
10182 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10183 
10184 	if (soft_state_linked) {
10185 		/* remove this fcp_port from the linked list */
10186 		(void) fcp_soft_state_unlink(pptr);
10187 	}
10188 
10189 	/* unbind and free event set */
10190 	if (pptr->port_ndi_event_hdl) {
10191 		if (event_bind) {
10192 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10193 			    &pptr->port_ndi_events, NDI_SLEEP);
10194 		}
10195 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10196 	}
10197 
10198 	if (pptr->port_ndi_event_defs) {
10199 		(void) kmem_free(pptr->port_ndi_event_defs,
10200 		    sizeof (fcp_ndi_event_defs));
10201 	}
10202 
10203 	/*
10204 	 * Clean up mpxio stuff
10205 	 */
10206 	if (pptr->port_mpxio) {
10207 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10208 		pptr->port_mpxio--;
10209 	}
10210 
10211 	/* undo SCSI HBA setup */
10212 	if (hba_attached) {
10213 		(void) scsi_hba_detach(pptr->port_dip);
10214 	}
10215 	if (pptr->port_tran != NULL) {
10216 		scsi_hba_tran_free(pptr->port_tran);
10217 	}
10218 
10219 	mutex_enter(&fcp_global_mutex);
10220 
10221 	/*
10222 	 * We check soft_state_linked, because it is incremented right before
10223 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10224 	 * soft_state_linked is still FALSE, we do not want to decrement
10225 	 * fcp_watchdog_init or possibly call untimeout.
10226 	 */
10227 
10228 	if (soft_state_linked) {
10229 		if (--fcp_watchdog_init == 0) {
10230 			timeout_id_t	tid = fcp_watchdog_id;
10231 
10232 			mutex_exit(&fcp_global_mutex);
10233 			(void) untimeout(tid);
10234 		} else {
10235 			mutex_exit(&fcp_global_mutex);
10236 		}
10237 	} else {
10238 		mutex_exit(&fcp_global_mutex);
10239 	}
10240 
10241 	if (mutex_initted) {
10242 		mutex_destroy(&pptr->port_mutex);
10243 		mutex_destroy(&pptr->port_pkt_mutex);
10244 	}
10245 
10246 	if (tmp_list != NULL) {
10247 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10248 	}
10249 
10250 	/* this makes pptr invalid */
10251 	ddi_soft_state_free(fcp_softstate, instance);
10252 
10253 	return (DDI_FAILURE);
10254 }
10255 
10256 
10257 static int
10258 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10259 {
10260 	int count = 0;
10261 
10262 	mutex_enter(&pptr->port_mutex);
10263 
10264 	/*
10265 	 * if the port is powered down or suspended, nothing else
10266 	 * to do; just return.
10267 	 */
10268 	if (flag != FCP_STATE_DETACHING) {
10269 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10270 		    FCP_STATE_SUSPENDED)) {
10271 			pptr->port_state |= flag;
10272 			mutex_exit(&pptr->port_mutex);
10273 			return (FC_SUCCESS);
10274 		}
10275 	}
10276 
10277 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10278 		mutex_exit(&pptr->port_mutex);
10279 		return (FC_FAILURE);
10280 	}
10281 
10282 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10283 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10284 	    "fcp_handle_port_detach: port is detaching");
10285 
10286 	pptr->port_state |= flag;
10287 
10288 	/*
10289 	 * Wait for any ongoing reconfig/ipkt to complete, that
10290 	 * ensures the freeing to targets/luns is safe.
10291 	 * No more ref to this port should happen from statec/ioctl
10292 	 * after that as it was removed from the global port list.
10293 	 */
10294 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10295 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10296 		/*
10297 		 * Let's give sufficient time for reconfig/ipkt
10298 		 * to complete.
10299 		 */
10300 		if (count++ >= FCP_ICMD_DEADLINE) {
10301 			break;
10302 		}
10303 		mutex_exit(&pptr->port_mutex);
10304 		delay(drv_usectohz(1000000));
10305 		mutex_enter(&pptr->port_mutex);
10306 	}
10307 
10308 	/*
10309 	 * if the driver is still busy then fail to
10310 	 * suspend/power down.
10311 	 */
10312 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10313 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10314 		pptr->port_state &= ~flag;
10315 		mutex_exit(&pptr->port_mutex);
10316 		return (FC_FAILURE);
10317 	}
10318 
10319 	if (flag == FCP_STATE_DETACHING) {
10320 		pptr = fcp_soft_state_unlink(pptr);
10321 		ASSERT(pptr != NULL);
10322 	}
10323 
10324 	pptr->port_link_cnt++;
10325 	pptr->port_state |= FCP_STATE_OFFLINE;
10326 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10327 
10328 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10329 	    FCP_CAUSE_LINK_DOWN);
10330 	mutex_exit(&pptr->port_mutex);
10331 
10332 	/* kill watch dog timer if we're the last */
10333 	mutex_enter(&fcp_global_mutex);
10334 	if (--fcp_watchdog_init == 0) {
10335 		timeout_id_t	tid = fcp_watchdog_id;
10336 		mutex_exit(&fcp_global_mutex);
10337 		(void) untimeout(tid);
10338 	} else {
10339 		mutex_exit(&fcp_global_mutex);
10340 	}
10341 
10342 	/* clean up the port structures */
10343 	if (flag == FCP_STATE_DETACHING) {
10344 		fcp_cleanup_port(pptr, instance);
10345 	}
10346 
10347 	return (FC_SUCCESS);
10348 }
10349 
10350 
10351 static void
10352 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10353 {
10354 	ASSERT(pptr != NULL);
10355 
10356 	/* unbind and free event set */
10357 	if (pptr->port_ndi_event_hdl) {
10358 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10359 		    &pptr->port_ndi_events, NDI_SLEEP);
10360 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10361 	}
10362 
10363 	if (pptr->port_ndi_event_defs) {
10364 		(void) kmem_free(pptr->port_ndi_event_defs,
10365 		    sizeof (fcp_ndi_event_defs));
10366 	}
10367 
10368 	/* free the lun/target structures and devinfos */
10369 	fcp_free_targets(pptr);
10370 
10371 	/*
10372 	 * Clean up mpxio stuff
10373 	 */
10374 	if (pptr->port_mpxio) {
10375 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10376 		pptr->port_mpxio--;
10377 	}
10378 
10379 	/* clean up SCSA stuff */
10380 	(void) scsi_hba_detach(pptr->port_dip);
10381 	if (pptr->port_tran != NULL) {
10382 		scsi_hba_tran_free(pptr->port_tran);
10383 	}
10384 
10385 #ifdef	KSTATS_CODE
10386 	/* clean up kstats */
10387 	if (pptr->fcp_ksp != NULL) {
10388 		kstat_delete(pptr->fcp_ksp);
10389 	}
10390 #endif
10391 
10392 	/* clean up soft state mutexes/condition variables */
10393 	mutex_destroy(&pptr->port_mutex);
10394 	mutex_destroy(&pptr->port_pkt_mutex);
10395 
10396 	/* all done with soft state */
10397 	ddi_soft_state_free(fcp_softstate, instance);
10398 }
10399 
10400 /*
10401  *     Function: fcp_kmem_cache_constructor
10402  *
10403  *  Description: This function allocates and initializes the resources required
10404  *		 to build a scsi_pkt structure the target driver.  The result
10405  *		 of the allocation and initialization will be cached in the
10406  *		 memory cache.	As DMA resources may be allocated here, that
10407  *		 means DMA resources will be tied up in the cache manager.
10408  *		 This is a tradeoff that has been made for performance reasons.
10409  *
10410  *     Argument: *buf		Memory to preinitialize.
10411  *		 *arg		FCP port structure (fcp_port).
10412  *		 kmflags	Value passed to kmem_cache_alloc() and
10413  *				propagated to the constructor.
10414  *
10415  * Return Value: 0	Allocation/Initialization was successful.
10416  *		 -1	Allocation or Initialization failed.
10417  *
10418  *
10419  * If the returned value is 0, the buffer is initialized like this:
10420  *
10421  *		    +================================+
10422  *	     +----> |	      struct scsi_pkt	     |
10423  *	     |	    |				     |
10424  *	     | +--- | pkt_ha_private		     |
10425  *	     | |    |				     |
10426  *	     | |    +================================+
10427  *	     | |
10428  *	     | |    +================================+
10429  *	     | +--> |	    struct fcp_pkt	     | <---------+
10430  *	     |	    |				     |		 |
10431  *	     +----- | cmd_pkt			     |		 |
10432  *		    |			  cmd_fp_pkt | ---+	 |
10433  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10434  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10435  *	  |    |    |--------------------------------|	  |	 |
10436  *	  |    |    |	      struct fc_packet	     | <--+	 |
10437  *	  |    |    |				     |		 |
10438  *	  |    |    |		     pkt_ulp_private | ----------+
10439  *	  |    |    |		     pkt_fca_private | -----+
10440  *	  |    |    |		     pkt_data_cookie | ---+ |
10441  *	  |    |    | pkt_cmdlen		     |	  | |
10442  *	  |    |(a) | pkt_rsplen		     |	  | |
10443  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10444  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10445  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10446  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10447  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10448  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10449  *		    +================================+	  | |  |   |   |    |
10450  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10451  *		    |				     |	    |  |   |   |    |
10452  *		    +================================+	    |  |   |   |    |
10453  *		    |	      fca_private	     | <----+  |   |   |    |
10454  *		    |				     |	       |   |   |    |
10455  *		    +================================+	       |   |   |    |
10456  *							       |   |   |    |
10457  *							       |   |   |    |
10458  *		    +================================+	 (d)   |   |   |    |
10459  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10460  *		    |				     |		   |   |    |
10461  *		    +================================+		   |   |    |
10462  *								   |   |    |
10463  *		    +================================+	 (d)	   |   |    |
10464  *		    |		fcp_resp	     | <-----------+   |    |
10465  *		    |	(DMA resources associated)   |		       |    |
10466  *		    +================================+		       |    |
10467  *								       |    |
10468  *								       |    |
10469  *								       |    |
10470  *		    +================================+	 (c)	       |    |
10471  *		    |	     fcp_cmd cookies	     | <---------------+    |
10472  *		    |				     |			    |
10473  *		    +================================+			    |
10474  *									    |
10475  *		    +================================+	 (c)		    |
10476  *		    |		 fcp_cmd	     | <--------------------+
10477  *		    |	(DMA resources associated)   |
10478  *		    +================================+
10479  *
10480  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10481  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10482  * (c) Only if DMA is used for the FCP_CMD buffer.
10483  * (d) Only if DMA is used for the FCP_RESP buffer
10484  */
10485 static int
10486 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10487     int kmflags)
10488 {
10489 	struct fcp_pkt	*cmd;
10490 	struct fcp_port	*pptr;
10491 	fc_packet_t	*fpkt;
10492 
10493 	pptr = (struct fcp_port *)tran->tran_hba_private;
10494 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10495 	bzero(cmd, tran->tran_hba_len);
10496 
10497 	cmd->cmd_pkt = pkt;
10498 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10499 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10500 	cmd->cmd_fp_pkt = fpkt;
10501 
10502 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10503 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10504 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10505 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10506 
10507 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10508 	    sizeof (struct fcp_pkt));
10509 
10510 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10511 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10512 
10513 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10514 		/*
10515 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10516 		 * fcp_resp.  The transfer of information will be done by
10517 		 * bcopy.
10518 		 * The naming of the flags (that is actually a value) is
10519 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10520 		 * DMA" but instead "NO DMA".
10521 		 */
10522 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10523 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10524 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10525 	} else {
10526 		/*
10527 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10528 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10529 		 * interfaces.
10530 		 */
10531 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10532 			return (-1);
10533 		}
10534 	}
10535 
10536 	return (0);
10537 }
10538 
10539 /*
10540  *     Function: fcp_kmem_cache_destructor
10541  *
10542  *  Description: Called by the destructor of the cache managed by SCSA.
10543  *		 All the resources pre-allocated in fcp_pkt_constructor
10544  *		 and the data also pre-initialized in fcp_pkt_constructor
10545  *		 are freed and uninitialized here.
10546  *
10547  *     Argument: *buf		Memory to uninitialize.
10548  *		 *arg		FCP port structure (fcp_port).
10549  *
10550  * Return Value: None
10551  *
10552  *	Context: kernel
10553  */
10554 static void
10555 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10556 {
10557 	struct fcp_pkt	*cmd;
10558 	struct fcp_port	*pptr;
10559 
10560 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10561 	cmd = pkt->pkt_ha_private;
10562 
10563 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10564 		/*
10565 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10566 		 * buffer and DMA resources allocated to do so are released.
10567 		 */
10568 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10569 	}
10570 }
10571 
10572 /*
10573  *     Function: fcp_alloc_cmd_resp
10574  *
10575  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10576  *		 will be DMAed by the HBA.  The buffer is allocated applying
10577  *		 the DMA requirements for the HBA.  The buffers allocated will
10578  *		 also be bound.	 DMA resources are allocated in the process.
10579  *		 They will be released by fcp_free_cmd_resp().
10580  *
10581  *     Argument: *pptr	FCP port.
10582  *		 *fpkt	fc packet for which the cmd and resp packet should be
10583  *			allocated.
10584  *		 flags	Allocation flags.
10585  *
10586  * Return Value: FC_FAILURE
10587  *		 FC_SUCCESS
10588  *
10589  *	Context: User or Kernel context only if flags == KM_SLEEP.
10590  *		 Interrupt context if the KM_SLEEP is not specified.
10591  */
10592 static int
10593 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10594 {
10595 	int			rval;
10596 	int			cmd_len;
10597 	int			resp_len;
10598 	ulong_t			real_len;
10599 	int			(*cb) (caddr_t);
10600 	ddi_dma_cookie_t	pkt_cookie;
10601 	ddi_dma_cookie_t	*cp;
10602 	uint32_t		cnt;
10603 
10604 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10605 
10606 	cmd_len = fpkt->pkt_cmdlen;
10607 	resp_len = fpkt->pkt_rsplen;
10608 
10609 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10610 
10611 	/* Allocation of a DMA handle used in subsequent calls. */
10612 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10613 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10614 		return (FC_FAILURE);
10615 	}
10616 
10617 	/* A buffer is allocated that satisfies the DMA requirements. */
10618 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10619 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10620 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10621 
10622 	if (rval != DDI_SUCCESS) {
10623 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10624 		return (FC_FAILURE);
10625 	}
10626 
10627 	if (real_len < cmd_len) {
10628 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10629 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10630 		return (FC_FAILURE);
10631 	}
10632 
10633 	/* The buffer allocated is DMA bound. */
10634 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10635 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10636 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10637 
10638 	if (rval != DDI_DMA_MAPPED) {
10639 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10640 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10641 		return (FC_FAILURE);
10642 	}
10643 
10644 	if (fpkt->pkt_cmd_cookie_cnt >
10645 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10646 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10647 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10648 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10649 		return (FC_FAILURE);
10650 	}
10651 
10652 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10653 
10654 	/*
10655 	 * The buffer where the scatter/gather list is going to be built is
10656 	 * allocated.
10657 	 */
10658 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10659 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10660 	    KM_NOSLEEP);
10661 
10662 	if (cp == NULL) {
10663 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10664 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10665 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10666 		return (FC_FAILURE);
10667 	}
10668 
10669 	/*
10670 	 * The scatter/gather list for the buffer we just allocated is built
10671 	 * here.
10672 	 */
10673 	*cp = pkt_cookie;
10674 	cp++;
10675 
10676 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10677 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10678 		    &pkt_cookie);
10679 		*cp = pkt_cookie;
10680 	}
10681 
10682 	ASSERT(fpkt->pkt_resp_dma == NULL);
10683 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10684 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10685 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10686 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10687 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10688 		return (FC_FAILURE);
10689 	}
10690 
10691 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10692 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10693 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10694 	    &fpkt->pkt_resp_acc);
10695 
10696 	if (rval != DDI_SUCCESS) {
10697 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10698 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10699 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10700 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10701 		kmem_free(fpkt->pkt_cmd_cookie,
10702 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10703 		return (FC_FAILURE);
10704 	}
10705 
10706 	if (real_len < resp_len) {
10707 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10708 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10709 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10710 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10711 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10712 		kmem_free(fpkt->pkt_cmd_cookie,
10713 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10714 		return (FC_FAILURE);
10715 	}
10716 
10717 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10718 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10719 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10720 
10721 	if (rval != DDI_DMA_MAPPED) {
10722 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10723 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10724 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10725 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10726 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10727 		kmem_free(fpkt->pkt_cmd_cookie,
10728 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10729 		return (FC_FAILURE);
10730 	}
10731 
10732 	if (fpkt->pkt_resp_cookie_cnt >
10733 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10734 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10735 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10736 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10737 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10738 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10739 		kmem_free(fpkt->pkt_cmd_cookie,
10740 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10741 		return (FC_FAILURE);
10742 	}
10743 
10744 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10745 
10746 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10747 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10748 	    KM_NOSLEEP);
10749 
10750 	if (cp == NULL) {
10751 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10752 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10753 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10754 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10755 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10756 		kmem_free(fpkt->pkt_cmd_cookie,
10757 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10758 		return (FC_FAILURE);
10759 	}
10760 
10761 	*cp = pkt_cookie;
10762 	cp++;
10763 
10764 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10765 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10766 		    &pkt_cookie);
10767 		*cp = pkt_cookie;
10768 	}
10769 
10770 	return (FC_SUCCESS);
10771 }
10772 
10773 /*
10774  *     Function: fcp_free_cmd_resp
10775  *
10776  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10777  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10778  *		 associated with them.	That includes the DMA resources and the
10779  *		 buffer allocated for the cookies of each one of them.
10780  *
10781  *     Argument: *pptr		FCP port context.
10782  *		 *fpkt		fc packet containing the cmd and resp packet
10783  *				to be released.
10784  *
10785  * Return Value: None
10786  *
10787  *	Context: Interrupt, User and Kernel context.
10788  */
10789 /* ARGSUSED */
10790 static void
10791 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10792 {
10793 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10794 
10795 	if (fpkt->pkt_resp_dma) {
10796 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10797 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10798 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10799 	}
10800 
10801 	if (fpkt->pkt_resp_cookie) {
10802 		kmem_free(fpkt->pkt_resp_cookie,
10803 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10804 		fpkt->pkt_resp_cookie = NULL;
10805 	}
10806 
10807 	if (fpkt->pkt_cmd_dma) {
10808 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10809 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10810 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10811 	}
10812 
10813 	if (fpkt->pkt_cmd_cookie) {
10814 		kmem_free(fpkt->pkt_cmd_cookie,
10815 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10816 		fpkt->pkt_cmd_cookie = NULL;
10817 	}
10818 }
10819 
10820 
10821 /*
10822  * called by the transport to do our own target initialization
10823  *
10824  * can acquire and release the global mutex
10825  */
10826 /* ARGSUSED */
10827 static int
10828 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10829     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10830 {
10831 	uchar_t			*bytes;
10832 	uint_t			nbytes;
10833 	uint16_t		lun_num;
10834 	struct fcp_tgt	*ptgt;
10835 	struct fcp_lun	*plun;
10836 	struct fcp_port	*pptr = (struct fcp_port *)
10837 	    hba_tran->tran_hba_private;
10838 
10839 	ASSERT(pptr != NULL);
10840 
10841 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10842 	    FCP_BUF_LEVEL_8, 0,
10843 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10844 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10845 
10846 	/* get our port WWN property */
10847 	bytes = NULL;
10848 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10849 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10850 	    (nbytes != FC_WWN_SIZE)) {
10851 		/* no port WWN property */
10852 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10853 		    FCP_BUF_LEVEL_8, 0,
10854 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10855 		    " for %s (instance %d): bytes=%p nbytes=%x",
10856 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10857 		    nbytes);
10858 
10859 		if (bytes != NULL) {
10860 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10861 		}
10862 
10863 		return (DDI_NOT_WELL_FORMED);
10864 	}
10865 	ASSERT(bytes != NULL);
10866 
10867 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10868 	    LUN_PROP, 0xFFFF);
10869 	if (lun_num == 0xFFFF) {
10870 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10871 		    FCP_BUF_LEVEL_8, 0,
10872 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10873 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10874 		    ddi_get_instance(tgt_dip));
10875 
10876 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10877 		return (DDI_NOT_WELL_FORMED);
10878 	}
10879 
10880 	mutex_enter(&pptr->port_mutex);
10881 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10882 		mutex_exit(&pptr->port_mutex);
10883 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10884 		    FCP_BUF_LEVEL_8, 0,
10885 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10886 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10887 		    ddi_get_instance(tgt_dip));
10888 
10889 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10890 		return (DDI_FAILURE);
10891 	}
10892 
10893 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10894 	    FC_WWN_SIZE) == 0);
10895 	ASSERT(plun->lun_num == lun_num);
10896 
10897 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10898 
10899 	ptgt = plun->lun_tgt;
10900 
10901 	mutex_enter(&ptgt->tgt_mutex);
10902 	plun->lun_tgt_count++;
10903 	scsi_device_hba_private_set(sd, plun);
10904 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10905 	plun->lun_sd = sd;
10906 	mutex_exit(&ptgt->tgt_mutex);
10907 	mutex_exit(&pptr->port_mutex);
10908 
10909 	return (DDI_SUCCESS);
10910 }
10911 
10912 /*ARGSUSED*/
10913 static int
10914 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10915     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10916 {
10917 	uchar_t			*bytes;
10918 	uint_t			nbytes;
10919 	uint16_t		lun_num;
10920 	struct fcp_tgt	*ptgt;
10921 	struct fcp_lun	*plun;
10922 	struct fcp_port	*pptr = (struct fcp_port *)
10923 	    hba_tran->tran_hba_private;
10924 	child_info_t		*cip;
10925 
10926 	ASSERT(pptr != NULL);
10927 
10928 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10929 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10930 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10931 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10932 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10933 
10934 	cip = (child_info_t *)sd->sd_pathinfo;
10935 	if (cip == NULL) {
10936 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10937 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10938 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10939 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10940 		    ddi_get_instance(tgt_dip));
10941 
10942 		return (DDI_NOT_WELL_FORMED);
10943 	}
10944 
10945 	/* get our port WWN property */
10946 	bytes = NULL;
10947 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10948 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10949 	    (nbytes != FC_WWN_SIZE)) {
10950 		if (bytes) {
10951 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10952 		}
10953 		return (DDI_NOT_WELL_FORMED);
10954 	}
10955 
10956 	ASSERT(bytes != NULL);
10957 
10958 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10959 	    LUN_PROP, 0xFFFF);
10960 	if (lun_num == 0xFFFF) {
10961 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10962 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10963 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10964 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10965 		    ddi_get_instance(tgt_dip));
10966 
10967 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10968 		return (DDI_NOT_WELL_FORMED);
10969 	}
10970 
10971 	mutex_enter(&pptr->port_mutex);
10972 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10973 		mutex_exit(&pptr->port_mutex);
10974 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10975 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10976 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10977 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10978 		    ddi_get_instance(tgt_dip));
10979 
10980 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10981 		return (DDI_FAILURE);
10982 	}
10983 
10984 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10985 	    FC_WWN_SIZE) == 0);
10986 	ASSERT(plun->lun_num == lun_num);
10987 
10988 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10989 
10990 	ptgt = plun->lun_tgt;
10991 
10992 	mutex_enter(&ptgt->tgt_mutex);
10993 	plun->lun_tgt_count++;
10994 	scsi_device_hba_private_set(sd, plun);
10995 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10996 	plun->lun_sd = sd;
10997 	mutex_exit(&ptgt->tgt_mutex);
10998 	mutex_exit(&pptr->port_mutex);
10999 
11000 	return (DDI_SUCCESS);
11001 }
11002 
11003 
11004 /*
11005  * called by the transport to do our own target initialization
11006  *
11007  * can acquire and release the global mutex
11008  */
11009 /* ARGSUSED */
11010 static int
11011 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11012     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11013 {
11014 	struct fcp_port	*pptr = (struct fcp_port *)
11015 	    hba_tran->tran_hba_private;
11016 	int			rval;
11017 
11018 	ASSERT(pptr != NULL);
11019 
11020 	/*
11021 	 * Child node is getting initialized.  Look at the mpxio component
11022 	 * type on the child device to see if this device is mpxio managed
11023 	 * or not.
11024 	 */
11025 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11026 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11027 	} else {
11028 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11029 	}
11030 
11031 	return (rval);
11032 }
11033 
11034 
11035 /* ARGSUSED */
11036 static void
11037 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11038     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11039 {
11040 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
11041 	struct fcp_tgt	*ptgt;
11042 
11043 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11044 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
11045 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11046 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11047 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11048 
11049 	if (plun == NULL) {
11050 		return;
11051 	}
11052 	ptgt = plun->lun_tgt;
11053 
11054 	ASSERT(ptgt != NULL);
11055 
11056 	mutex_enter(&ptgt->tgt_mutex);
11057 	ASSERT(plun->lun_tgt_count > 0);
11058 
11059 	if (--plun->lun_tgt_count == 0) {
11060 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11061 	}
11062 	plun->lun_sd = NULL;
11063 	mutex_exit(&ptgt->tgt_mutex);
11064 }
11065 
11066 /*
11067  *     Function: fcp_scsi_start
11068  *
11069  *  Description: This function is called by the target driver to request a
11070  *		 command to be sent.
11071  *
11072  *     Argument: *ap		SCSI address of the device.
11073  *		 *pkt		SCSI packet containing the cmd to send.
11074  *
11075  * Return Value: TRAN_ACCEPT
11076  *		 TRAN_BUSY
11077  *		 TRAN_BADPKT
11078  *		 TRAN_FATAL_ERROR
11079  */
11080 static int
11081 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11082 {
11083 	struct fcp_port	*pptr = ADDR2FCP(ap);
11084 	struct fcp_lun	*plun = ADDR2LUN(ap);
11085 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
11086 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11087 	int			rval;
11088 
11089 	/* ensure command isn't already issued */
11090 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11091 
11092 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11093 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
11094 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11095 
11096 	/*
11097 	 * It is strange that we enter the fcp_port mutex and the target
11098 	 * mutex to check the lun state (which has a mutex of its own).
11099 	 */
11100 	mutex_enter(&pptr->port_mutex);
11101 	mutex_enter(&ptgt->tgt_mutex);
11102 
11103 	/*
11104 	 * If the device is offline and is not in the process of coming
11105 	 * online, fail the request.
11106 	 */
11107 
11108 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11109 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
11110 		mutex_exit(&ptgt->tgt_mutex);
11111 		mutex_exit(&pptr->port_mutex);
11112 
11113 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11114 			pkt->pkt_reason = CMD_DEV_GONE;
11115 		}
11116 
11117 		return (TRAN_FATAL_ERROR);
11118 	}
11119 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11120 
11121 	/*
11122 	 * If we are suspended, kernel is trying to dump, so don't
11123 	 * block, fail or defer requests - send them down right away.
11124 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11125 	 * assume we have been suspended.  There is hardware such as
11126 	 * the v880 that doesn't do PM.	 Thus, the check for
11127 	 * ddi_in_panic.
11128 	 *
11129 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11130 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11131 	 * either the device will have gone away or changed and we can fail
11132 	 * the request, or we can proceed if the device didn't change.
11133 	 *
11134 	 * If the pd in the target or the packet is NULL it's probably
11135 	 * because the device has gone away, we allow the request to be
11136 	 * put on the internal queue here in case the device comes back within
11137 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11138 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11139 	 * could be NULL because the device was disappearing during or since
11140 	 * packet initialization.
11141 	 */
11142 
11143 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11144 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11145 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11146 	    (ptgt->tgt_pd_handle == NULL) ||
11147 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11148 		/*
11149 		 * If ((LUN is busy AND
11150 		 *	LUN not suspended AND
11151 		 *	The system is not in panic state) OR
11152 		 *	(The port is coming up))
11153 		 *
11154 		 * We check to see if the any of the flags FLAG_NOINTR or
11155 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11156 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11157 		 */
11158 		mutex_exit(&ptgt->tgt_mutex);
11159 		mutex_exit(&pptr->port_mutex);
11160 
11161 		/* see if using interrupts is allowed (so queueing'll work) */
11162 		if (pkt->pkt_flags & FLAG_NOINTR) {
11163 			pkt->pkt_resid = 0;
11164 			return (TRAN_BUSY);
11165 		}
11166 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11167 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11168 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11169 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11170 			return (TRAN_BUSY);
11171 		}
11172 #ifdef	DEBUG
11173 		mutex_enter(&pptr->port_pkt_mutex);
11174 		pptr->port_npkts++;
11175 		mutex_exit(&pptr->port_pkt_mutex);
11176 #endif /* DEBUG */
11177 
11178 		/* got queue up the pkt for later */
11179 		fcp_queue_pkt(pptr, cmd);
11180 		return (TRAN_ACCEPT);
11181 	}
11182 	cmd->cmd_state = FCP_PKT_ISSUED;
11183 
11184 	mutex_exit(&ptgt->tgt_mutex);
11185 	mutex_exit(&pptr->port_mutex);
11186 
11187 	/*
11188 	 * Now that we released the mutexes, what was protected by them can
11189 	 * change.
11190 	 */
11191 
11192 	/*
11193 	 * If there is a reconfiguration in progress, wait for it to complete.
11194 	 */
11195 	fcp_reconfig_wait(pptr);
11196 
11197 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11198 	    pkt->pkt_time : 0;
11199 
11200 	/* prepare the packet */
11201 
11202 	fcp_prepare_pkt(pptr, cmd, plun);
11203 
11204 	if (cmd->cmd_pkt->pkt_time) {
11205 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11206 	} else {
11207 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11208 	}
11209 
11210 	/*
11211 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11212 	 * have to do polled I/O
11213 	 */
11214 	if (pkt->pkt_flags & FLAG_NOINTR) {
11215 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11216 		return (fcp_dopoll(pptr, cmd));
11217 	}
11218 
11219 #ifdef	DEBUG
11220 	mutex_enter(&pptr->port_pkt_mutex);
11221 	pptr->port_npkts++;
11222 	mutex_exit(&pptr->port_pkt_mutex);
11223 #endif /* DEBUG */
11224 
11225 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11226 	if (rval == FC_SUCCESS) {
11227 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11228 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11229 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11230 		return (TRAN_ACCEPT);
11231 	}
11232 
11233 	cmd->cmd_state = FCP_PKT_IDLE;
11234 
11235 #ifdef	DEBUG
11236 	mutex_enter(&pptr->port_pkt_mutex);
11237 	pptr->port_npkts--;
11238 	mutex_exit(&pptr->port_pkt_mutex);
11239 #endif /* DEBUG */
11240 
11241 	/*
11242 	 * For lack of clearer definitions, choose
11243 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11244 	 */
11245 
11246 	if (rval == FC_TRAN_BUSY) {
11247 		pkt->pkt_resid = 0;
11248 		rval = TRAN_BUSY;
11249 	} else {
11250 		mutex_enter(&ptgt->tgt_mutex);
11251 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11252 			child_info_t	*cip;
11253 
11254 			mutex_enter(&plun->lun_mutex);
11255 			cip = plun->lun_cip;
11256 			mutex_exit(&plun->lun_mutex);
11257 
11258 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11259 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11260 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11261 			    plun->lun_tgt->tgt_d_id, rval, cip);
11262 
11263 			rval = TRAN_FATAL_ERROR;
11264 		} else {
11265 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11266 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11267 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11268 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11269 				    pkt);
11270 				rval = TRAN_BUSY;
11271 			} else {
11272 				rval = TRAN_ACCEPT;
11273 				fcp_queue_pkt(pptr, cmd);
11274 			}
11275 		}
11276 		mutex_exit(&ptgt->tgt_mutex);
11277 	}
11278 
11279 	return (rval);
11280 }
11281 
11282 /*
11283  * called by the transport to abort a packet
11284  */
11285 /*ARGSUSED*/
11286 static int
11287 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11288 {
11289 	int tgt_cnt;
11290 	struct fcp_port		*pptr = ADDR2FCP(ap);
11291 	struct fcp_lun	*plun = ADDR2LUN(ap);
11292 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11293 
11294 	if (pkt == NULL) {
11295 		if (ptgt) {
11296 			mutex_enter(&ptgt->tgt_mutex);
11297 			tgt_cnt = ptgt->tgt_change_cnt;
11298 			mutex_exit(&ptgt->tgt_mutex);
11299 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11300 			return (TRUE);
11301 		}
11302 	}
11303 	return (FALSE);
11304 }
11305 
11306 
11307 /*
11308  * Perform reset
11309  */
11310 int
11311 fcp_scsi_reset(struct scsi_address *ap, int level)
11312 {
11313 	int			rval = 0;
11314 	struct fcp_port		*pptr = ADDR2FCP(ap);
11315 	struct fcp_lun	*plun = ADDR2LUN(ap);
11316 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11317 
11318 	if (level == RESET_ALL) {
11319 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11320 			rval = 1;
11321 		}
11322 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11323 		/*
11324 		 * If we are in the middle of discovery, return
11325 		 * SUCCESS as this target will be rediscovered
11326 		 * anyway
11327 		 */
11328 		mutex_enter(&ptgt->tgt_mutex);
11329 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11330 			mutex_exit(&ptgt->tgt_mutex);
11331 			return (1);
11332 		}
11333 		mutex_exit(&ptgt->tgt_mutex);
11334 
11335 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11336 			rval = 1;
11337 		}
11338 	}
11339 	return (rval);
11340 }
11341 
11342 
11343 /*
11344  * called by the framework to get a SCSI capability
11345  */
11346 static int
11347 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11348 {
11349 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11350 }
11351 
11352 
11353 /*
11354  * called by the framework to set a SCSI capability
11355  */
11356 static int
11357 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11358 {
11359 	return (fcp_commoncap(ap, cap, value, whom, 1));
11360 }
11361 
11362 /*
11363  *     Function: fcp_pkt_setup
11364  *
11365  *  Description: This function sets up the scsi_pkt structure passed by the
11366  *		 caller. This function assumes fcp_pkt_constructor has been
11367  *		 called previously for the packet passed by the caller.	 If
11368  *		 successful this call will have the following results:
11369  *
11370  *		   - The resources needed that will be constant through out
11371  *		     the whole transaction are allocated.
11372  *		   - The fields that will be constant through out the whole
11373  *		     transaction are initialized.
11374  *		   - The scsi packet will be linked to the LUN structure
11375  *		     addressed by the transaction.
11376  *
11377  *     Argument:
11378  *		 *pkt		Pointer to a scsi_pkt structure.
11379  *		 callback
11380  *		 arg
11381  *
11382  * Return Value: 0	Success
11383  *		 !0	Failure
11384  *
11385  *	Context: Kernel context or interrupt context
11386  */
11387 /* ARGSUSED */
11388 static int
11389 fcp_pkt_setup(struct scsi_pkt *pkt,
11390     int (*callback)(caddr_t arg),
11391     caddr_t arg)
11392 {
11393 	struct fcp_pkt	*cmd;
11394 	struct fcp_port	*pptr;
11395 	struct fcp_lun	*plun;
11396 	struct fcp_tgt	*ptgt;
11397 	int		kf;
11398 	fc_packet_t	*fpkt;
11399 	fc_frame_hdr_t	*hp;
11400 
11401 	pptr = ADDR2FCP(&pkt->pkt_address);
11402 	plun = ADDR2LUN(&pkt->pkt_address);
11403 	ptgt = plun->lun_tgt;
11404 
11405 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11406 	fpkt = cmd->cmd_fp_pkt;
11407 
11408 	/*
11409 	 * this request is for dma allocation only
11410 	 */
11411 	/*
11412 	 * First step of fcp_scsi_init_pkt: pkt allocation
11413 	 * We determine if the caller is willing to wait for the
11414 	 * resources.
11415 	 */
11416 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11417 
11418 	/*
11419 	 * Selective zeroing of the pkt.
11420 	 */
11421 	cmd->cmd_back = NULL;
11422 	cmd->cmd_next = NULL;
11423 
11424 	/*
11425 	 * Zero out fcp command
11426 	 */
11427 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11428 
11429 	cmd->cmd_state = FCP_PKT_IDLE;
11430 
11431 	fpkt = cmd->cmd_fp_pkt;
11432 	fpkt->pkt_data_acc = NULL;
11433 
11434 	/*
11435 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11436 	 * could be destroyed.	We need fail pkt_setup.
11437 	 */
11438 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11439 		return (-1);
11440 	}
11441 
11442 	mutex_enter(&ptgt->tgt_mutex);
11443 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11444 
11445 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11446 	    != FC_SUCCESS) {
11447 		mutex_exit(&ptgt->tgt_mutex);
11448 		return (-1);
11449 	}
11450 
11451 	mutex_exit(&ptgt->tgt_mutex);
11452 
11453 	/* Fill in the Fabric Channel Header */
11454 	hp = &fpkt->pkt_cmd_fhdr;
11455 	hp->r_ctl = R_CTL_COMMAND;
11456 	hp->rsvd = 0;
11457 	hp->type = FC_TYPE_SCSI_FCP;
11458 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11459 	hp->seq_id = 0;
11460 	hp->df_ctl  = 0;
11461 	hp->seq_cnt = 0;
11462 	hp->ox_id = 0xffff;
11463 	hp->rx_id = 0xffff;
11464 	hp->ro = 0;
11465 
11466 	/*
11467 	 * A doubly linked list (cmd_forw, cmd_back) is built
11468 	 * out of every allocated packet on a per-lun basis
11469 	 *
11470 	 * The packets are maintained in the list so as to satisfy
11471 	 * scsi_abort() requests. At present (which is unlikely to
11472 	 * change in the future) nobody performs a real scsi_abort
11473 	 * in the SCSI target drivers (as they don't keep the packets
11474 	 * after doing scsi_transport - so they don't know how to
11475 	 * abort a packet other than sending a NULL to abort all
11476 	 * outstanding packets)
11477 	 */
11478 	mutex_enter(&plun->lun_mutex);
11479 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11480 		plun->lun_pkt_head->cmd_back = cmd;
11481 	} else {
11482 		plun->lun_pkt_tail = cmd;
11483 	}
11484 	plun->lun_pkt_head = cmd;
11485 	mutex_exit(&plun->lun_mutex);
11486 	return (0);
11487 }
11488 
11489 /*
11490  *     Function: fcp_pkt_teardown
11491  *
11492  *  Description: This function releases a scsi_pkt structure and all the
11493  *		 resources attached to it.
11494  *
11495  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11496  *
11497  * Return Value: None
11498  *
11499  *	Context: User, Kernel or Interrupt context.
11500  */
11501 static void
11502 fcp_pkt_teardown(struct scsi_pkt *pkt)
11503 {
11504 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11505 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11506 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11507 
11508 	/*
11509 	 * Remove the packet from the per-lun list
11510 	 */
11511 	mutex_enter(&plun->lun_mutex);
11512 	if (cmd->cmd_back) {
11513 		ASSERT(cmd != plun->lun_pkt_head);
11514 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11515 	} else {
11516 		ASSERT(cmd == plun->lun_pkt_head);
11517 		plun->lun_pkt_head = cmd->cmd_forw;
11518 	}
11519 
11520 	if (cmd->cmd_forw) {
11521 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11522 	} else {
11523 		ASSERT(cmd == plun->lun_pkt_tail);
11524 		plun->lun_pkt_tail = cmd->cmd_back;
11525 	}
11526 
11527 	mutex_exit(&plun->lun_mutex);
11528 
11529 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11530 }
11531 
11532 /*
11533  * Routine for reset notification setup, to register or cancel.
11534  * This function is called by SCSA
11535  */
11536 /*ARGSUSED*/
11537 static int
11538 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11539     void (*callback)(caddr_t), caddr_t arg)
11540 {
11541 	struct fcp_port *pptr = ADDR2FCP(ap);
11542 
11543 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11544 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11545 }
11546 
11547 
11548 static int
11549 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11550     ddi_eventcookie_t *event_cookiep)
11551 {
11552 	struct fcp_port *pptr = fcp_dip2port(dip);
11553 
11554 	if (pptr == NULL) {
11555 		return (DDI_FAILURE);
11556 	}
11557 
11558 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11559 	    event_cookiep, NDI_EVENT_NOPASS));
11560 }
11561 
11562 
11563 static int
11564 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11565     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11566     ddi_callback_id_t *cb_id)
11567 {
11568 	struct fcp_port *pptr = fcp_dip2port(dip);
11569 
11570 	if (pptr == NULL) {
11571 		return (DDI_FAILURE);
11572 	}
11573 
11574 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11575 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11576 }
11577 
11578 
11579 static int
11580 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11581 {
11582 
11583 	struct fcp_port *pptr = fcp_dip2port(dip);
11584 
11585 	if (pptr == NULL) {
11586 		return (DDI_FAILURE);
11587 	}
11588 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11589 }
11590 
11591 
11592 /*
11593  * called by the transport to post an event
11594  */
11595 static int
11596 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11597     ddi_eventcookie_t eventid, void *impldata)
11598 {
11599 	struct fcp_port *pptr = fcp_dip2port(dip);
11600 
11601 	if (pptr == NULL) {
11602 		return (DDI_FAILURE);
11603 	}
11604 
11605 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11606 	    eventid, impldata));
11607 }
11608 
11609 
11610 /*
11611  * A target in in many cases in Fibre Channel has a one to one relation
11612  * with a port identifier (which is also known as D_ID and also as AL_PA
11613  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11614  * will most likely result in resetting all LUNs (which means a reset will
11615  * occur on all the SCSI devices connected at the other end of the bridge)
11616  * That is the latest favorite topic for discussion, for, one can debate as
11617  * hot as one likes and come up with arguably a best solution to one's
11618  * satisfaction
11619  *
11620  * To stay on track and not digress much, here are the problems stated
11621  * briefly:
11622  *
11623  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11624  *	target drivers use RESET_TARGET even if their instance is on a
11625  *	LUN. Doesn't that sound a bit broken ?
11626  *
11627  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11628  *	control fields of an FCP_CMND structure. It should have been
11629  *	fixed right there, giving flexibility to the initiators to
11630  *	minimize havoc that could be caused by resetting a target.
11631  */
11632 static int
11633 fcp_reset_target(struct scsi_address *ap, int level)
11634 {
11635 	int			rval = FC_FAILURE;
11636 	char			lun_id[25];
11637 	struct fcp_port		*pptr = ADDR2FCP(ap);
11638 	struct fcp_lun	*plun = ADDR2LUN(ap);
11639 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11640 	struct scsi_pkt		*pkt;
11641 	struct fcp_pkt	*cmd;
11642 	struct fcp_rsp		*rsp;
11643 	uint32_t		tgt_cnt;
11644 	struct fcp_rsp_info	*rsp_info;
11645 	struct fcp_reset_elem	*p;
11646 	int			bval;
11647 
11648 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11649 	    KM_NOSLEEP)) == NULL) {
11650 		return (rval);
11651 	}
11652 
11653 	mutex_enter(&ptgt->tgt_mutex);
11654 	if (level == RESET_TARGET) {
11655 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11656 			mutex_exit(&ptgt->tgt_mutex);
11657 			kmem_free(p, sizeof (struct fcp_reset_elem));
11658 			return (rval);
11659 		}
11660 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11661 		(void) strcpy(lun_id, " ");
11662 	} else {
11663 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11664 			mutex_exit(&ptgt->tgt_mutex);
11665 			kmem_free(p, sizeof (struct fcp_reset_elem));
11666 			return (rval);
11667 		}
11668 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11669 
11670 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11671 	}
11672 	tgt_cnt = ptgt->tgt_change_cnt;
11673 
11674 	mutex_exit(&ptgt->tgt_mutex);
11675 
11676 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11677 	    0, 0, NULL, 0)) == NULL) {
11678 		kmem_free(p, sizeof (struct fcp_reset_elem));
11679 		mutex_enter(&ptgt->tgt_mutex);
11680 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11681 		mutex_exit(&ptgt->tgt_mutex);
11682 		return (rval);
11683 	}
11684 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11685 
11686 	/* fill in cmd part of packet */
11687 	cmd = PKT2CMD(pkt);
11688 	if (level == RESET_TARGET) {
11689 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11690 	} else {
11691 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11692 	}
11693 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11694 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11695 
11696 	/* prepare a packet for transport */
11697 	fcp_prepare_pkt(pptr, cmd, plun);
11698 
11699 	if (cmd->cmd_pkt->pkt_time) {
11700 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11701 	} else {
11702 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11703 	}
11704 
11705 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11706 	bval = fcp_dopoll(pptr, cmd);
11707 	fc_ulp_idle_port(pptr->port_fp_handle);
11708 
11709 	/* submit the packet */
11710 	if (bval == TRAN_ACCEPT) {
11711 		int error = 3;
11712 
11713 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11714 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11715 		    sizeof (struct fcp_rsp));
11716 
11717 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11718 			if (fcp_validate_fcp_response(rsp, pptr) ==
11719 			    FC_SUCCESS) {
11720 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11721 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11722 					    sizeof (struct fcp_rsp), rsp_info,
11723 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11724 					    sizeof (struct fcp_rsp_info));
11725 				}
11726 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11727 					rval = FC_SUCCESS;
11728 					error = 0;
11729 				} else {
11730 					error = 1;
11731 				}
11732 			} else {
11733 				error = 2;
11734 			}
11735 		}
11736 
11737 		switch (error) {
11738 		case 0:
11739 			fcp_log(CE_WARN, pptr->port_dip,
11740 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11741 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11742 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11743 			break;
11744 
11745 		case 1:
11746 			fcp_log(CE_WARN, pptr->port_dip,
11747 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11748 			    " response code=%x",
11749 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11750 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11751 			    rsp_info->rsp_code);
11752 			break;
11753 
11754 		case 2:
11755 			fcp_log(CE_WARN, pptr->port_dip,
11756 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11757 			    " Bad FCP response values: rsvd1=%x,"
11758 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11759 			    " rsplen=%x, senselen=%x",
11760 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11761 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11762 			    rsp->reserved_0, rsp->reserved_1,
11763 			    rsp->fcp_u.fcp_status.reserved_0,
11764 			    rsp->fcp_u.fcp_status.reserved_1,
11765 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11766 			break;
11767 
11768 		default:
11769 			fcp_log(CE_WARN, pptr->port_dip,
11770 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11771 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11772 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11773 			break;
11774 		}
11775 	}
11776 	scsi_destroy_pkt(pkt);
11777 
11778 	if (rval == FC_FAILURE) {
11779 		mutex_enter(&ptgt->tgt_mutex);
11780 		if (level == RESET_TARGET) {
11781 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11782 		} else {
11783 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11784 		}
11785 		mutex_exit(&ptgt->tgt_mutex);
11786 		kmem_free(p, sizeof (struct fcp_reset_elem));
11787 		return (rval);
11788 	}
11789 
11790 	mutex_enter(&pptr->port_mutex);
11791 	if (level == RESET_TARGET) {
11792 		p->tgt = ptgt;
11793 		p->lun = NULL;
11794 	} else {
11795 		p->tgt = NULL;
11796 		p->lun = plun;
11797 	}
11798 	p->tgt = ptgt;
11799 	p->tgt_cnt = tgt_cnt;
11800 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11801 	p->next = pptr->port_reset_list;
11802 	pptr->port_reset_list = p;
11803 
11804 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11805 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11806 	    "Notify ssd of the reset to reinstate the reservations");
11807 
11808 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11809 	    &pptr->port_reset_notify_listf);
11810 
11811 	mutex_exit(&pptr->port_mutex);
11812 
11813 	return (rval);
11814 }
11815 
11816 
11817 /*
11818  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11819  * SCSI capabilities
11820  */
11821 /* ARGSUSED */
11822 static int
11823 fcp_commoncap(struct scsi_address *ap, char *cap,
11824     int val, int tgtonly, int doset)
11825 {
11826 	struct fcp_port		*pptr = ADDR2FCP(ap);
11827 	struct fcp_lun	*plun = ADDR2LUN(ap);
11828 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11829 	int			cidx;
11830 	int			rval = FALSE;
11831 
11832 	if (cap == (char *)0) {
11833 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11834 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11835 		    "fcp_commoncap: invalid arg");
11836 		return (rval);
11837 	}
11838 
11839 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11840 		return (UNDEFINED);
11841 	}
11842 
11843 	/*
11844 	 * Process setcap request.
11845 	 */
11846 	if (doset) {
11847 		/*
11848 		 * At present, we can only set binary (0/1) values
11849 		 */
11850 		switch (cidx) {
11851 		case SCSI_CAP_ARQ:
11852 			if (val == 0) {
11853 				rval = FALSE;
11854 			} else {
11855 				rval = TRUE;
11856 			}
11857 			break;
11858 
11859 		case SCSI_CAP_LUN_RESET:
11860 			if (val) {
11861 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11862 			} else {
11863 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11864 			}
11865 			rval = TRUE;
11866 			break;
11867 
11868 		case SCSI_CAP_SECTOR_SIZE:
11869 			rval = TRUE;
11870 			break;
11871 		default:
11872 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11873 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11874 			    "fcp_setcap: unsupported %d", cidx);
11875 			rval = UNDEFINED;
11876 			break;
11877 		}
11878 
11879 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11880 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11881 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11882 		    "0x%x/0x%x/0x%x/%d",
11883 		    cap, val, tgtonly, doset, rval);
11884 
11885 	} else {
11886 		/*
11887 		 * Process getcap request.
11888 		 */
11889 		switch (cidx) {
11890 		case SCSI_CAP_DMA_MAX:
11891 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11892 
11893 			/*
11894 			 * Need to make an adjustment qlc is uint_t 64
11895 			 * st is int, so we will make the adjustment here
11896 			 * being as nobody wants to touch this.
11897 			 * It still leaves the max single block length
11898 			 * of 2 gig. This should last .
11899 			 */
11900 
11901 			if (rval == -1) {
11902 				rval = MAX_INT_DMA;
11903 			}
11904 
11905 			break;
11906 
11907 		case SCSI_CAP_INITIATOR_ID:
11908 			rval = pptr->port_id;
11909 			break;
11910 
11911 		case SCSI_CAP_ARQ:
11912 		case SCSI_CAP_RESET_NOTIFICATION:
11913 		case SCSI_CAP_TAGGED_QING:
11914 			rval = TRUE;
11915 			break;
11916 
11917 		case SCSI_CAP_SCSI_VERSION:
11918 			rval = 3;
11919 			break;
11920 
11921 		case SCSI_CAP_INTERCONNECT_TYPE:
11922 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11923 			    (ptgt->tgt_hard_addr == 0)) {
11924 				rval = INTERCONNECT_FABRIC;
11925 			} else {
11926 				rval = INTERCONNECT_FIBRE;
11927 			}
11928 			break;
11929 
11930 		case SCSI_CAP_LUN_RESET:
11931 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11932 			    TRUE : FALSE;
11933 			break;
11934 
11935 		default:
11936 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11937 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11938 			    "fcp_getcap: unsupported %d", cidx);
11939 			rval = UNDEFINED;
11940 			break;
11941 		}
11942 
11943 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11944 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11945 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11946 		    "0x%x/0x%x/0x%x/%d",
11947 		    cap, val, tgtonly, doset, rval);
11948 	}
11949 
11950 	return (rval);
11951 }
11952 
11953 /*
11954  * called by the transport to get the port-wwn and lun
11955  * properties of this device, and to create a "name" based on them
11956  *
11957  * these properties don't exist on sun4m
11958  *
11959  * return 1 for success else return 0
11960  */
11961 /* ARGSUSED */
11962 static int
11963 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11964 {
11965 	int			i;
11966 	int			*lun;
11967 	int			numChars;
11968 	uint_t			nlun;
11969 	uint_t			count;
11970 	uint_t			nbytes;
11971 	uchar_t			*bytes;
11972 	uint16_t		lun_num;
11973 	uint32_t		tgt_id;
11974 	char			**conf_wwn;
11975 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11976 	uchar_t			barray[FC_WWN_SIZE];
11977 	dev_info_t		*tgt_dip;
11978 	struct fcp_tgt	*ptgt;
11979 	struct fcp_port	*pptr;
11980 	struct fcp_lun	*plun;
11981 
11982 	ASSERT(sd != NULL);
11983 	ASSERT(name != NULL);
11984 
11985 	tgt_dip = sd->sd_dev;
11986 	pptr = ddi_get_soft_state(fcp_softstate,
11987 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11988 	if (pptr == NULL) {
11989 		return (0);
11990 	}
11991 
11992 	ASSERT(tgt_dip != NULL);
11993 
11994 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11995 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11996 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11997 		name[0] = '\0';
11998 		return (0);
11999 	}
12000 
12001 	if (nlun == 0) {
12002 		ddi_prop_free(lun);
12003 		return (0);
12004 	}
12005 
12006 	lun_num = lun[0];
12007 	ddi_prop_free(lun);
12008 
12009 	/*
12010 	 * Lookup for .conf WWN property
12011 	 */
12012 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12013 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12014 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12015 		ASSERT(count >= 1);
12016 
12017 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12018 		ddi_prop_free(conf_wwn);
12019 		mutex_enter(&pptr->port_mutex);
12020 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12021 			mutex_exit(&pptr->port_mutex);
12022 			return (0);
12023 		}
12024 		ptgt = plun->lun_tgt;
12025 		mutex_exit(&pptr->port_mutex);
12026 
12027 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12028 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12029 
12030 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12031 		    ptgt->tgt_hard_addr != 0) {
12032 			tgt_id = (uint32_t)fcp_alpa_to_switch[
12033 			    ptgt->tgt_hard_addr];
12034 		} else {
12035 			tgt_id = ptgt->tgt_d_id;
12036 		}
12037 
12038 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12039 		    TARGET_PROP, tgt_id);
12040 	}
12041 
12042 	/* get the our port-wwn property */
12043 	bytes = NULL;
12044 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12045 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12046 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12047 		if (bytes != NULL) {
12048 			ddi_prop_free(bytes);
12049 		}
12050 		return (0);
12051 	}
12052 
12053 	for (i = 0; i < FC_WWN_SIZE; i++) {
12054 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12055 	}
12056 
12057 	/* Stick in the address of the form "wWWN,LUN" */
12058 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12059 
12060 	ASSERT(numChars < len);
12061 	if (numChars >= len) {
12062 		fcp_log(CE_WARN, pptr->port_dip,
12063 		    "!fcp_scsi_get_name: "
12064 		    "name parameter length too small, it needs to be %d",
12065 		    numChars+1);
12066 	}
12067 
12068 	ddi_prop_free(bytes);
12069 
12070 	return (1);
12071 }
12072 
12073 
12074 /*
12075  * called by the transport to get the SCSI target id value, returning
12076  * it in "name"
12077  *
12078  * this isn't needed/used on sun4m
12079  *
12080  * return 1 for success else return 0
12081  */
12082 /* ARGSUSED */
12083 static int
12084 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12085 {
12086 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
12087 	struct fcp_tgt	*ptgt;
12088 	int    numChars;
12089 
12090 	if (plun == NULL) {
12091 		return (0);
12092 	}
12093 
12094 	if ((ptgt = plun->lun_tgt) == NULL) {
12095 		return (0);
12096 	}
12097 
12098 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12099 
12100 	ASSERT(numChars < len);
12101 	if (numChars >= len) {
12102 		fcp_log(CE_WARN, NULL,
12103 		    "!fcp_scsi_get_bus_addr: "
12104 		    "name parameter length too small, it needs to be %d",
12105 		    numChars+1);
12106 	}
12107 
12108 	return (1);
12109 }
12110 
12111 
12112 /*
12113  * called internally to reset the link where the specified port lives
12114  */
12115 static int
12116 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12117 {
12118 	la_wwn_t		wwn;
12119 	struct fcp_lun	*plun;
12120 	struct fcp_tgt	*ptgt;
12121 
12122 	/* disable restart of lip if we're suspended */
12123 	mutex_enter(&pptr->port_mutex);
12124 
12125 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12126 	    FCP_STATE_POWER_DOWN)) {
12127 		mutex_exit(&pptr->port_mutex);
12128 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12129 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12130 		    "fcp_linkreset, fcp%d: link reset "
12131 		    "disabled due to DDI_SUSPEND",
12132 		    ddi_get_instance(pptr->port_dip));
12133 		return (FC_FAILURE);
12134 	}
12135 
12136 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12137 		mutex_exit(&pptr->port_mutex);
12138 		return (FC_SUCCESS);
12139 	}
12140 
12141 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12142 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12143 
12144 	/*
12145 	 * If ap == NULL assume local link reset.
12146 	 */
12147 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12148 		plun = ADDR2LUN(ap);
12149 		ptgt = plun->lun_tgt;
12150 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12151 	} else {
12152 		bzero((caddr_t)&wwn, sizeof (wwn));
12153 	}
12154 	mutex_exit(&pptr->port_mutex);
12155 
12156 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12157 }
12158 
12159 
12160 /*
12161  * called from fcp_port_attach() to resume a port
12162  * return DDI_* success/failure status
12163  * acquires and releases the global mutex
12164  * acquires and releases the port mutex
12165  */
12166 /*ARGSUSED*/
12167 
12168 static int
12169 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12170     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12171 {
12172 	int			res = DDI_FAILURE; /* default result */
12173 	struct fcp_port	*pptr;		/* port state ptr */
12174 	uint32_t		alloc_cnt;
12175 	uint32_t		max_cnt;
12176 	fc_portmap_t		*tmp_list = NULL;
12177 
12178 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12179 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12180 	    instance);
12181 
12182 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12183 		cmn_err(CE_WARN, "fcp: bad soft state");
12184 		return (res);
12185 	}
12186 
12187 	mutex_enter(&pptr->port_mutex);
12188 	switch (cmd) {
12189 	case FC_CMD_RESUME:
12190 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12191 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12192 		break;
12193 
12194 	case FC_CMD_POWER_UP:
12195 		/*
12196 		 * If the port is DDI_SUSPENded, defer rediscovery
12197 		 * until DDI_RESUME occurs
12198 		 */
12199 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12200 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12201 			mutex_exit(&pptr->port_mutex);
12202 			return (DDI_SUCCESS);
12203 		}
12204 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12205 	}
12206 	pptr->port_id = s_id;
12207 	pptr->port_state = FCP_STATE_INIT;
12208 	mutex_exit(&pptr->port_mutex);
12209 
12210 	/*
12211 	 * Make a copy of ulp_port_info as fctl allocates
12212 	 * a temp struct.
12213 	 */
12214 	(void) fcp_cp_pinfo(pptr, pinfo);
12215 
12216 	mutex_enter(&fcp_global_mutex);
12217 	if (fcp_watchdog_init++ == 0) {
12218 		fcp_watchdog_tick = fcp_watchdog_timeout *
12219 		    drv_usectohz(1000000);
12220 		fcp_watchdog_id = timeout(fcp_watch,
12221 		    NULL, fcp_watchdog_tick);
12222 	}
12223 	mutex_exit(&fcp_global_mutex);
12224 
12225 	/*
12226 	 * Handle various topologies and link states.
12227 	 */
12228 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12229 	case FC_STATE_OFFLINE:
12230 		/*
12231 		 * Wait for ONLINE, at which time a state
12232 		 * change will cause a statec_callback
12233 		 */
12234 		res = DDI_SUCCESS;
12235 		break;
12236 
12237 	case FC_STATE_ONLINE:
12238 
12239 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12240 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12241 			res = DDI_SUCCESS;
12242 			break;
12243 		}
12244 
12245 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12246 		    !fcp_enable_auto_configuration) {
12247 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12248 			if (tmp_list == NULL) {
12249 				if (!alloc_cnt) {
12250 					res = DDI_SUCCESS;
12251 				}
12252 				break;
12253 			}
12254 			max_cnt = alloc_cnt;
12255 		} else {
12256 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12257 
12258 			alloc_cnt = FCP_MAX_DEVICES;
12259 
12260 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12261 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12262 			    KM_NOSLEEP)) == NULL) {
12263 				fcp_log(CE_WARN, pptr->port_dip,
12264 				    "!fcp%d: failed to allocate portmap",
12265 				    instance);
12266 				break;
12267 			}
12268 
12269 			max_cnt = alloc_cnt;
12270 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12271 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12272 			    FC_SUCCESS) {
12273 				caddr_t msg;
12274 
12275 				(void) fc_ulp_error(res, &msg);
12276 
12277 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12278 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12279 				    "resume failed getportmap: reason=0x%x",
12280 				    res);
12281 
12282 				fcp_log(CE_WARN, pptr->port_dip,
12283 				    "!failed to get port map : %s", msg);
12284 				break;
12285 			}
12286 			if (max_cnt > alloc_cnt) {
12287 				alloc_cnt = max_cnt;
12288 			}
12289 		}
12290 
12291 		/*
12292 		 * do the SCSI device discovery and create
12293 		 * the devinfos
12294 		 */
12295 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12296 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12297 		    max_cnt, pptr->port_id);
12298 
12299 		res = DDI_SUCCESS;
12300 		break;
12301 
12302 	default:
12303 		fcp_log(CE_WARN, pptr->port_dip,
12304 		    "!fcp%d: invalid port state at attach=0x%x",
12305 		    instance, pptr->port_phys_state);
12306 
12307 		mutex_enter(&pptr->port_mutex);
12308 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12309 		mutex_exit(&pptr->port_mutex);
12310 		res = DDI_SUCCESS;
12311 
12312 		break;
12313 	}
12314 
12315 	if (tmp_list != NULL) {
12316 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12317 	}
12318 
12319 	return (res);
12320 }
12321 
12322 
12323 static void
12324 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12325 {
12326 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12327 	pptr->port_dip = pinfo->port_dip;
12328 	pptr->port_fp_handle = pinfo->port_handle;
12329 	if (pinfo->port_acc_attr != NULL) {
12330 		/*
12331 		 * FCA supports DMA
12332 		 */
12333 		pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12334 		pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12335 		pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12336 		pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12337 	}
12338 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12339 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12340 	pptr->port_phys_state = pinfo->port_state;
12341 	pptr->port_topology = pinfo->port_flags;
12342 	pptr->port_reset_action = pinfo->port_reset_action;
12343 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12344 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12345 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12346 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12347 
12348 	/* Clear FMA caps to avoid fm-capability ereport */
12349 	if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12350 		pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12351 	if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12352 		pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12353 	if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12354 		pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12355 }
12356 
12357 /*
12358  * If the elements wait field is set to 1 then
12359  * another thread is waiting for the operation to complete. Once
12360  * it is complete, the waiting thread is signaled and the element is
12361  * freed by the waiting thread. If the elements wait field is set to 0
12362  * the element is freed.
12363  */
12364 static void
12365 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12366 {
12367 	ASSERT(elem != NULL);
12368 	mutex_enter(&elem->mutex);
12369 	elem->result = result;
12370 	if (elem->wait) {
12371 		elem->wait = 0;
12372 		cv_signal(&elem->cv);
12373 		mutex_exit(&elem->mutex);
12374 	} else {
12375 		mutex_exit(&elem->mutex);
12376 		cv_destroy(&elem->cv);
12377 		mutex_destroy(&elem->mutex);
12378 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12379 	}
12380 }
12381 
12382 /*
12383  * This function is invoked from the taskq thread to allocate
12384  * devinfo nodes and to online/offline them.
12385  */
12386 static void
12387 fcp_hp_task(void *arg)
12388 {
12389 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12390 	struct fcp_lun	*plun = elem->lun;
12391 	struct fcp_port		*pptr = elem->port;
12392 	int			result;
12393 
12394 	ASSERT(elem->what == FCP_ONLINE ||
12395 	    elem->what == FCP_OFFLINE ||
12396 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12397 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12398 
12399 	mutex_enter(&pptr->port_mutex);
12400 	mutex_enter(&plun->lun_mutex);
12401 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12402 	    plun->lun_event_count != elem->event_cnt) ||
12403 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12404 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12405 		mutex_exit(&plun->lun_mutex);
12406 		mutex_exit(&pptr->port_mutex);
12407 		fcp_process_elem(elem, NDI_FAILURE);
12408 		return;
12409 	}
12410 	mutex_exit(&plun->lun_mutex);
12411 	mutex_exit(&pptr->port_mutex);
12412 
12413 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12414 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12415 	fcp_process_elem(elem, result);
12416 }
12417 
12418 
12419 static child_info_t *
12420 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12421     int tcount)
12422 {
12423 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12424 
12425 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12426 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12427 
12428 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12429 		/*
12430 		 * Child has not been created yet. Create the child device
12431 		 * based on the per-Lun flags.
12432 		 */
12433 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12434 			plun->lun_cip =
12435 			    CIP(fcp_create_dip(plun, lcount, tcount));
12436 			plun->lun_mpxio = 0;
12437 		} else {
12438 			plun->lun_cip =
12439 			    CIP(fcp_create_pip(plun, lcount, tcount));
12440 			plun->lun_mpxio = 1;
12441 		}
12442 	} else {
12443 		plun->lun_cip = cip;
12444 	}
12445 
12446 	return (plun->lun_cip);
12447 }
12448 
12449 
12450 static int
12451 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12452 {
12453 	int		rval = FC_FAILURE;
12454 	dev_info_t	*pdip;
12455 	struct dev_info	*dip;
12456 	int		circular;
12457 
12458 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12459 
12460 	pdip = plun->lun_tgt->tgt_port->port_dip;
12461 
12462 	if (plun->lun_cip == NULL) {
12463 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12464 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12465 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12466 		    "plun: %p lun state: %x num: %d target state: %x",
12467 		    plun, plun->lun_state, plun->lun_num,
12468 		    plun->lun_tgt->tgt_port->port_state);
12469 		return (rval);
12470 	}
12471 	ndi_devi_enter(pdip, &circular);
12472 	dip = DEVI(pdip)->devi_child;
12473 	while (dip) {
12474 		if (dip == DEVI(cdip)) {
12475 			rval = FC_SUCCESS;
12476 			break;
12477 		}
12478 		dip = dip->devi_sibling;
12479 	}
12480 	ndi_devi_exit(pdip, circular);
12481 	return (rval);
12482 }
12483 
12484 static int
12485 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12486 {
12487 	int		rval = FC_FAILURE;
12488 
12489 	ASSERT(plun != NULL);
12490 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12491 
12492 	if (plun->lun_mpxio == 0) {
12493 		rval = fcp_is_dip_present(plun, DIP(cip));
12494 	} else {
12495 		rval = fcp_is_pip_present(plun, PIP(cip));
12496 	}
12497 
12498 	return (rval);
12499 }
12500 
12501 /*
12502  *     Function: fcp_create_dip
12503  *
12504  *  Description: Creates a dev_info_t structure for the LUN specified by the
12505  *		 caller.
12506  *
12507  *     Argument: plun		Lun structure
12508  *		 link_cnt	Link state count.
12509  *		 tgt_cnt	Target state change count.
12510  *
12511  * Return Value: NULL if it failed
12512  *		 dev_info_t structure address if it succeeded
12513  *
12514  *	Context: Kernel context
12515  */
12516 static dev_info_t *
12517 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12518 {
12519 	int			failure = 0;
12520 	uint32_t		tgt_id;
12521 	uint64_t		sam_lun;
12522 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12523 	struct fcp_port	*pptr = ptgt->tgt_port;
12524 	dev_info_t		*pdip = pptr->port_dip;
12525 	dev_info_t		*cdip = NULL;
12526 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12527 	char			*nname = NULL;
12528 	char			**compatible = NULL;
12529 	int			ncompatible;
12530 	char			*scsi_binding_set;
12531 	char			t_pwwn[17];
12532 
12533 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12534 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12535 
12536 	/* get the 'scsi-binding-set' property */
12537 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12538 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12539 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12540 		scsi_binding_set = NULL;
12541 	}
12542 
12543 	/* determine the node name and compatible */
12544 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12545 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12546 	if (scsi_binding_set) {
12547 		ddi_prop_free(scsi_binding_set);
12548 	}
12549 
12550 	if (nname == NULL) {
12551 #ifdef	DEBUG
12552 		cmn_err(CE_WARN, "%s%d: no driver for "
12553 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12554 		    "	 compatible: %s",
12555 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12556 		    ptgt->tgt_port_wwn.raw_wwn[0],
12557 		    ptgt->tgt_port_wwn.raw_wwn[1],
12558 		    ptgt->tgt_port_wwn.raw_wwn[2],
12559 		    ptgt->tgt_port_wwn.raw_wwn[3],
12560 		    ptgt->tgt_port_wwn.raw_wwn[4],
12561 		    ptgt->tgt_port_wwn.raw_wwn[5],
12562 		    ptgt->tgt_port_wwn.raw_wwn[6],
12563 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12564 		    *compatible);
12565 #endif	/* DEBUG */
12566 		failure++;
12567 		goto end_of_fcp_create_dip;
12568 	}
12569 
12570 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12571 
12572 	/*
12573 	 * if the old_dip does not match the cdip, that means there is
12574 	 * some property change. since we'll be using the cdip, we need
12575 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12576 	 * then the dtype for the device has been updated. Offline the
12577 	 * the old device and create a new device with the new device type
12578 	 * Refer to bug: 4764752
12579 	 */
12580 	if (old_dip && (cdip != old_dip ||
12581 	    plun->lun_state & FCP_LUN_CHANGED)) {
12582 		plun->lun_state &= ~(FCP_LUN_INIT);
12583 		mutex_exit(&plun->lun_mutex);
12584 		mutex_exit(&pptr->port_mutex);
12585 
12586 		mutex_enter(&ptgt->tgt_mutex);
12587 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12588 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12589 		mutex_exit(&ptgt->tgt_mutex);
12590 
12591 #ifdef DEBUG
12592 		if (cdip != NULL) {
12593 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12594 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12595 			    "Old dip=%p; New dip=%p don't match", old_dip,
12596 			    cdip);
12597 		} else {
12598 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12599 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12600 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12601 		}
12602 #endif
12603 
12604 		mutex_enter(&pptr->port_mutex);
12605 		mutex_enter(&plun->lun_mutex);
12606 	}
12607 
12608 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12609 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12610 		if (ndi_devi_alloc(pptr->port_dip, nname,
12611 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12612 			failure++;
12613 			goto end_of_fcp_create_dip;
12614 		}
12615 	}
12616 
12617 	/*
12618 	 * Previously all the properties for the devinfo were destroyed here
12619 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12620 	 * the devid property (and other properties established by the target
12621 	 * driver or framework) which the code does not always recreate, this
12622 	 * call was removed.
12623 	 * This opens a theoretical possibility that we may return with a
12624 	 * stale devid on the node if the scsi entity behind the fibre channel
12625 	 * lun has changed.
12626 	 */
12627 
12628 	/* decorate the node with compatible */
12629 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12630 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12631 		failure++;
12632 		goto end_of_fcp_create_dip;
12633 	}
12634 
12635 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12636 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12637 		failure++;
12638 		goto end_of_fcp_create_dip;
12639 	}
12640 
12641 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12642 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12643 		failure++;
12644 		goto end_of_fcp_create_dip;
12645 	}
12646 
12647 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12648 	t_pwwn[16] = '\0';
12649 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12650 	    != DDI_PROP_SUCCESS) {
12651 		failure++;
12652 		goto end_of_fcp_create_dip;
12653 	}
12654 
12655 	/*
12656 	 * If there is no hard address - We might have to deal with
12657 	 * that by using WWN - Having said that it is important to
12658 	 * recognize this problem early so ssd can be informed of
12659 	 * the right interconnect type.
12660 	 */
12661 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12662 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12663 	} else {
12664 		tgt_id = ptgt->tgt_d_id;
12665 	}
12666 
12667 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12668 	    tgt_id) != DDI_PROP_SUCCESS) {
12669 		failure++;
12670 		goto end_of_fcp_create_dip;
12671 	}
12672 
12673 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12674 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12675 		failure++;
12676 		goto end_of_fcp_create_dip;
12677 	}
12678 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12679 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12680 	    sam_lun) != DDI_PROP_SUCCESS) {
12681 		failure++;
12682 		goto end_of_fcp_create_dip;
12683 	}
12684 
12685 end_of_fcp_create_dip:
12686 	scsi_hba_nodename_compatible_free(nname, compatible);
12687 
12688 	if (cdip != NULL && failure) {
12689 		(void) ndi_prop_remove_all(cdip);
12690 		(void) ndi_devi_free(cdip);
12691 		cdip = NULL;
12692 	}
12693 
12694 	return (cdip);
12695 }
12696 
12697 /*
12698  *     Function: fcp_create_pip
12699  *
12700  *  Description: Creates a Path Id for the LUN specified by the caller.
12701  *
12702  *     Argument: plun		Lun structure
12703  *		 link_cnt	Link state count.
12704  *		 tgt_cnt	Target state count.
12705  *
12706  * Return Value: NULL if it failed
12707  *		 mdi_pathinfo_t structure address if it succeeded
12708  *
12709  *	Context: Kernel context
12710  */
12711 static mdi_pathinfo_t *
12712 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12713 {
12714 	int			i;
12715 	char			buf[MAXNAMELEN];
12716 	char			uaddr[MAXNAMELEN];
12717 	int			failure = 0;
12718 	uint32_t		tgt_id;
12719 	uint64_t		sam_lun;
12720 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12721 	struct fcp_port	*pptr = ptgt->tgt_port;
12722 	dev_info_t		*pdip = pptr->port_dip;
12723 	mdi_pathinfo_t		*pip = NULL;
12724 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12725 	char			*nname = NULL;
12726 	char			**compatible = NULL;
12727 	int			ncompatible;
12728 	char			*scsi_binding_set;
12729 	char			t_pwwn[17];
12730 
12731 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12732 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12733 
12734 	scsi_binding_set = "vhci";
12735 
12736 	/* determine the node name and compatible */
12737 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12738 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12739 
12740 	if (nname == NULL) {
12741 #ifdef	DEBUG
12742 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12743 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12744 		    "	 compatible: %s",
12745 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12746 		    ptgt->tgt_port_wwn.raw_wwn[0],
12747 		    ptgt->tgt_port_wwn.raw_wwn[1],
12748 		    ptgt->tgt_port_wwn.raw_wwn[2],
12749 		    ptgt->tgt_port_wwn.raw_wwn[3],
12750 		    ptgt->tgt_port_wwn.raw_wwn[4],
12751 		    ptgt->tgt_port_wwn.raw_wwn[5],
12752 		    ptgt->tgt_port_wwn.raw_wwn[6],
12753 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12754 		    *compatible);
12755 #endif	/* DEBUG */
12756 		failure++;
12757 		goto end_of_fcp_create_pip;
12758 	}
12759 
12760 	pip = fcp_find_existing_pip(plun, pdip);
12761 
12762 	/*
12763 	 * if the old_dip does not match the cdip, that means there is
12764 	 * some property change. since we'll be using the cdip, we need
12765 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12766 	 * then the dtype for the device has been updated. Offline the
12767 	 * the old device and create a new device with the new device type
12768 	 * Refer to bug: 4764752
12769 	 */
12770 	if (old_pip && (pip != old_pip ||
12771 	    plun->lun_state & FCP_LUN_CHANGED)) {
12772 		plun->lun_state &= ~(FCP_LUN_INIT);
12773 		mutex_exit(&plun->lun_mutex);
12774 		mutex_exit(&pptr->port_mutex);
12775 
12776 		mutex_enter(&ptgt->tgt_mutex);
12777 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12778 		    FCP_OFFLINE, lcount, tcount,
12779 		    NDI_DEVI_REMOVE, 0);
12780 		mutex_exit(&ptgt->tgt_mutex);
12781 
12782 		if (pip != NULL) {
12783 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12784 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12785 			    "Old pip=%p; New pip=%p don't match",
12786 			    old_pip, pip);
12787 		} else {
12788 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12789 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12790 			    "Old pip=%p; New pip=NULL don't match",
12791 			    old_pip);
12792 		}
12793 
12794 		mutex_enter(&pptr->port_mutex);
12795 		mutex_enter(&plun->lun_mutex);
12796 	}
12797 
12798 	/*
12799 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12800 	 * lun_guid_size which is dependent on the target, I don't
12801 	 * believe the same trancation happens here UNLESS the standards
12802 	 * change the FC_WWN_SIZE value to something larger than
12803 	 * MAXNAMELEN(currently 255 bytes).
12804 	 */
12805 
12806 	for (i = 0; i < FC_WWN_SIZE; i++) {
12807 		(void) sprintf(&buf[i << 1], "%02x",
12808 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12809 	}
12810 
12811 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12812 	    buf, plun->lun_num);
12813 
12814 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12815 		/*
12816 		 * Release the locks before calling into
12817 		 * mdi_pi_alloc_compatible() since this can result in a
12818 		 * callback into fcp which can result in a deadlock
12819 		 * (see bug # 4870272).
12820 		 *
12821 		 * Basically, what we are trying to avoid is the scenario where
12822 		 * one thread does ndi_devi_enter() and tries to grab
12823 		 * fcp_mutex and another does it the other way round.
12824 		 *
12825 		 * But before we do that, make sure that nobody releases the
12826 		 * port in the meantime. We can do this by setting a flag.
12827 		 */
12828 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12829 		pptr->port_state |= FCP_STATE_IN_MDI;
12830 		mutex_exit(&plun->lun_mutex);
12831 		mutex_exit(&pptr->port_mutex);
12832 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12833 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12834 			fcp_log(CE_WARN, pptr->port_dip,
12835 			    "!path alloc failed:0x%x", plun);
12836 			mutex_enter(&pptr->port_mutex);
12837 			mutex_enter(&plun->lun_mutex);
12838 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12839 			failure++;
12840 			goto end_of_fcp_create_pip;
12841 		}
12842 		mutex_enter(&pptr->port_mutex);
12843 		mutex_enter(&plun->lun_mutex);
12844 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12845 	} else {
12846 		(void) mdi_prop_remove(pip, NULL);
12847 	}
12848 
12849 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12850 
12851 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12852 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12853 	    != DDI_PROP_SUCCESS) {
12854 		failure++;
12855 		goto end_of_fcp_create_pip;
12856 	}
12857 
12858 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12859 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12860 	    != DDI_PROP_SUCCESS) {
12861 		failure++;
12862 		goto end_of_fcp_create_pip;
12863 	}
12864 
12865 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12866 	t_pwwn[16] = '\0';
12867 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12868 	    != DDI_PROP_SUCCESS) {
12869 		failure++;
12870 		goto end_of_fcp_create_pip;
12871 	}
12872 
12873 	/*
12874 	 * If there is no hard address - We might have to deal with
12875 	 * that by using WWN - Having said that it is important to
12876 	 * recognize this problem early so ssd can be informed of
12877 	 * the right interconnect type.
12878 	 */
12879 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12880 	    ptgt->tgt_hard_addr != 0) {
12881 		tgt_id = (uint32_t)
12882 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12883 	} else {
12884 		tgt_id = ptgt->tgt_d_id;
12885 	}
12886 
12887 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12888 	    != DDI_PROP_SUCCESS) {
12889 		failure++;
12890 		goto end_of_fcp_create_pip;
12891 	}
12892 
12893 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12894 	    != DDI_PROP_SUCCESS) {
12895 		failure++;
12896 		goto end_of_fcp_create_pip;
12897 	}
12898 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12899 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12900 	    != DDI_PROP_SUCCESS) {
12901 		failure++;
12902 		goto end_of_fcp_create_pip;
12903 	}
12904 
12905 end_of_fcp_create_pip:
12906 	scsi_hba_nodename_compatible_free(nname, compatible);
12907 
12908 	if (pip != NULL && failure) {
12909 		(void) mdi_prop_remove(pip, NULL);
12910 		mutex_exit(&plun->lun_mutex);
12911 		mutex_exit(&pptr->port_mutex);
12912 		(void) mdi_pi_free(pip, 0);
12913 		mutex_enter(&pptr->port_mutex);
12914 		mutex_enter(&plun->lun_mutex);
12915 		pip = NULL;
12916 	}
12917 
12918 	return (pip);
12919 }
12920 
12921 static dev_info_t *
12922 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12923 {
12924 	uint_t			nbytes;
12925 	uchar_t			*bytes;
12926 	uint_t			nwords;
12927 	uint32_t		tgt_id;
12928 	int			*words;
12929 	dev_info_t		*cdip;
12930 	dev_info_t		*ndip;
12931 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12932 	struct fcp_port	*pptr = ptgt->tgt_port;
12933 	int			circular;
12934 
12935 	ndi_devi_enter(pdip, &circular);
12936 
12937 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12938 	while ((cdip = ndip) != NULL) {
12939 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12940 
12941 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12942 			continue;
12943 		}
12944 
12945 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12946 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12947 		    &nbytes) != DDI_PROP_SUCCESS) {
12948 			continue;
12949 		}
12950 
12951 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12952 			if (bytes != NULL) {
12953 				ddi_prop_free(bytes);
12954 			}
12955 			continue;
12956 		}
12957 		ASSERT(bytes != NULL);
12958 
12959 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12960 			ddi_prop_free(bytes);
12961 			continue;
12962 		}
12963 
12964 		ddi_prop_free(bytes);
12965 
12966 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12967 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12968 		    &nbytes) != DDI_PROP_SUCCESS) {
12969 			continue;
12970 		}
12971 
12972 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12973 			if (bytes != NULL) {
12974 				ddi_prop_free(bytes);
12975 			}
12976 			continue;
12977 		}
12978 		ASSERT(bytes != NULL);
12979 
12980 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12981 			ddi_prop_free(bytes);
12982 			continue;
12983 		}
12984 
12985 		ddi_prop_free(bytes);
12986 
12987 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12988 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12989 		    &nwords) != DDI_PROP_SUCCESS) {
12990 			continue;
12991 		}
12992 
12993 		if (nwords != 1 || words == NULL) {
12994 			if (words != NULL) {
12995 				ddi_prop_free(words);
12996 			}
12997 			continue;
12998 		}
12999 		ASSERT(words != NULL);
13000 
13001 		/*
13002 		 * If there is no hard address - We might have to deal with
13003 		 * that by using WWN - Having said that it is important to
13004 		 * recognize this problem early so ssd can be informed of
13005 		 * the right interconnect type.
13006 		 */
13007 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13008 		    ptgt->tgt_hard_addr != 0) {
13009 			tgt_id =
13010 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13011 		} else {
13012 			tgt_id = ptgt->tgt_d_id;
13013 		}
13014 
13015 		if (tgt_id != (uint32_t)*words) {
13016 			ddi_prop_free(words);
13017 			continue;
13018 		}
13019 		ddi_prop_free(words);
13020 
13021 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13022 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13023 		    &nwords) != DDI_PROP_SUCCESS) {
13024 			continue;
13025 		}
13026 
13027 		if (nwords != 1 || words == NULL) {
13028 			if (words != NULL) {
13029 				ddi_prop_free(words);
13030 			}
13031 			continue;
13032 		}
13033 		ASSERT(words != NULL);
13034 
13035 		if (plun->lun_num == (uint16_t)*words) {
13036 			ddi_prop_free(words);
13037 			break;
13038 		}
13039 		ddi_prop_free(words);
13040 	}
13041 	ndi_devi_exit(pdip, circular);
13042 
13043 	return (cdip);
13044 }
13045 
13046 
13047 static int
13048 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13049 {
13050 	dev_info_t	*pdip;
13051 	char		buf[MAXNAMELEN];
13052 	char		uaddr[MAXNAMELEN];
13053 	int		rval = FC_FAILURE;
13054 
13055 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13056 
13057 	pdip = plun->lun_tgt->tgt_port->port_dip;
13058 
13059 	/*
13060 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13061 	 * non-NULL even when the LUN is not there as in the case when a LUN is
13062 	 * configured and then deleted on the device end (for T3/T4 case). In
13063 	 * such cases, pip will be NULL.
13064 	 *
13065 	 * If the device generates an RSCN, it will end up getting offlined when
13066 	 * it disappeared and a new LUN will get created when it is rediscovered
13067 	 * on the device. If we check for lun_cip here, the LUN will not end
13068 	 * up getting onlined since this function will end up returning a
13069 	 * FC_SUCCESS.
13070 	 *
13071 	 * The behavior is different on other devices. For instance, on a HDS,
13072 	 * there was no RSCN generated by the device but the next I/O generated
13073 	 * a check condition and rediscovery got triggered that way. So, in
13074 	 * such cases, this path will not be exercised
13075 	 */
13076 	if (pip == NULL) {
13077 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13078 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
13079 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
13080 		    "plun: %p lun state: %x num: %d target state: %x",
13081 		    plun, plun->lun_state, plun->lun_num,
13082 		    plun->lun_tgt->tgt_port->port_state);
13083 		return (rval);
13084 	}
13085 
13086 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13087 
13088 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13089 
13090 	if (plun->lun_old_guid) {
13091 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13092 			rval = FC_SUCCESS;
13093 		}
13094 	} else {
13095 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13096 			rval = FC_SUCCESS;
13097 		}
13098 	}
13099 	return (rval);
13100 }
13101 
13102 static mdi_pathinfo_t *
13103 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13104 {
13105 	char			buf[MAXNAMELEN];
13106 	char			uaddr[MAXNAMELEN];
13107 	mdi_pathinfo_t		*pip;
13108 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13109 	struct fcp_port	*pptr = ptgt->tgt_port;
13110 
13111 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13112 
13113 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13114 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13115 
13116 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13117 
13118 	return (pip);
13119 }
13120 
13121 
13122 static int
13123 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13124     int tcount, int flags, int *circ)
13125 {
13126 	int			rval;
13127 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13128 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13129 	dev_info_t		*cdip = NULL;
13130 
13131 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13132 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13133 
13134 	if (plun->lun_cip == NULL) {
13135 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13136 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13137 		    "fcp_online_child: plun->lun_cip is NULL: "
13138 		    "plun: %p state: %x num: %d target state: %x",
13139 		    plun, plun->lun_state, plun->lun_num,
13140 		    plun->lun_tgt->tgt_port->port_state);
13141 		return (NDI_FAILURE);
13142 	}
13143 again:
13144 	if (plun->lun_mpxio == 0) {
13145 		cdip = DIP(cip);
13146 		mutex_exit(&plun->lun_mutex);
13147 		mutex_exit(&pptr->port_mutex);
13148 
13149 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13150 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13151 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13152 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13153 
13154 		/*
13155 		 * We could check for FCP_LUN_INIT here but chances
13156 		 * of getting here when it's already in FCP_LUN_INIT
13157 		 * is rare and a duplicate ndi_devi_online wouldn't
13158 		 * hurt either (as the node would already have been
13159 		 * in CF2)
13160 		 */
13161 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13162 			rval = ndi_devi_bind_driver(cdip, flags);
13163 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13164 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13165 			    "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13166 		} else {
13167 			rval = ndi_devi_online(cdip, flags);
13168 		}
13169 
13170 		/*
13171 		 * We log the message into trace buffer if the device
13172 		 * is "ses" and into syslog for any other device
13173 		 * type. This is to prevent the ndi_devi_online failure
13174 		 * message that appears for V880/A5K ses devices.
13175 		 */
13176 		if (rval == NDI_SUCCESS) {
13177 			mutex_enter(&ptgt->tgt_mutex);
13178 			plun->lun_state |= FCP_LUN_INIT;
13179 			mutex_exit(&ptgt->tgt_mutex);
13180 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13181 			fcp_log(CE_NOTE, pptr->port_dip,
13182 			    "!ndi_devi_online:"
13183 			    " failed for %s: target=%x lun=%x %x",
13184 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13185 			    plun->lun_num, rval);
13186 		} else {
13187 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13188 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13189 			    " !ndi_devi_online:"
13190 			    " failed for %s: target=%x lun=%x %x",
13191 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13192 			    plun->lun_num, rval);
13193 		}
13194 	} else {
13195 		cdip = mdi_pi_get_client(PIP(cip));
13196 		mutex_exit(&plun->lun_mutex);
13197 		mutex_exit(&pptr->port_mutex);
13198 
13199 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13200 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13201 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13202 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13203 
13204 		/*
13205 		 * Hold path and exit phci to avoid deadlock with power
13206 		 * management code during mdi_pi_online.
13207 		 */
13208 		mdi_hold_path(PIP(cip));
13209 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13210 
13211 		rval = mdi_pi_online(PIP(cip), flags);
13212 
13213 		mdi_devi_enter_phci(pptr->port_dip, circ);
13214 		mdi_rele_path(PIP(cip));
13215 
13216 		if (rval == MDI_SUCCESS) {
13217 			mutex_enter(&ptgt->tgt_mutex);
13218 			plun->lun_state |= FCP_LUN_INIT;
13219 			mutex_exit(&ptgt->tgt_mutex);
13220 
13221 			/*
13222 			 * Clear MPxIO path permanent disable in case
13223 			 * fcp hotplug dropped the offline event.
13224 			 */
13225 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13226 
13227 		} else if (rval == MDI_NOT_SUPPORTED) {
13228 			child_info_t	*old_cip = cip;
13229 
13230 			/*
13231 			 * MPxIO does not support this device yet.
13232 			 * Enumerate in legacy mode.
13233 			 */
13234 			mutex_enter(&pptr->port_mutex);
13235 			mutex_enter(&plun->lun_mutex);
13236 			plun->lun_mpxio = 0;
13237 			plun->lun_cip = NULL;
13238 			cdip = fcp_create_dip(plun, lcount, tcount);
13239 			plun->lun_cip = cip = CIP(cdip);
13240 			if (cip == NULL) {
13241 				fcp_log(CE_WARN, pptr->port_dip,
13242 				    "!fcp_online_child: "
13243 				    "Create devinfo failed for LU=%p", plun);
13244 				mutex_exit(&plun->lun_mutex);
13245 
13246 				mutex_enter(&ptgt->tgt_mutex);
13247 				plun->lun_state |= FCP_LUN_OFFLINE;
13248 				mutex_exit(&ptgt->tgt_mutex);
13249 
13250 				mutex_exit(&pptr->port_mutex);
13251 
13252 				/*
13253 				 * free the mdi_pathinfo node
13254 				 */
13255 				(void) mdi_pi_free(PIP(old_cip), 0);
13256 			} else {
13257 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13258 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13259 				    "fcp_online_child: creating devinfo "
13260 				    "node 0x%p for plun 0x%p",
13261 				    cip, plun);
13262 				mutex_exit(&plun->lun_mutex);
13263 				mutex_exit(&pptr->port_mutex);
13264 				/*
13265 				 * free the mdi_pathinfo node
13266 				 */
13267 				(void) mdi_pi_free(PIP(old_cip), 0);
13268 				mutex_enter(&pptr->port_mutex);
13269 				mutex_enter(&plun->lun_mutex);
13270 				goto again;
13271 			}
13272 		} else {
13273 			if (cdip) {
13274 				fcp_log(CE_NOTE, pptr->port_dip,
13275 				    "!fcp_online_child: mdi_pi_online:"
13276 				    " failed for %s: target=%x lun=%x %x",
13277 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13278 				    plun->lun_num, rval);
13279 			}
13280 		}
13281 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13282 	}
13283 
13284 	if (rval == NDI_SUCCESS) {
13285 		if (cdip) {
13286 			(void) ndi_event_retrieve_cookie(
13287 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13288 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13289 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13290 			    cdip, fcp_insert_eid, NULL);
13291 		}
13292 	}
13293 	mutex_enter(&pptr->port_mutex);
13294 	mutex_enter(&plun->lun_mutex);
13295 	return (rval);
13296 }
13297 
13298 /* ARGSUSED */
13299 static int
13300 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13301     int tcount, int flags, int *circ)
13302 {
13303 	int rval;
13304 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13305 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13306 	dev_info_t		*cdip;
13307 
13308 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13309 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13310 
13311 	if (plun->lun_cip == NULL) {
13312 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13313 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13314 		    "fcp_offline_child: plun->lun_cip is NULL: "
13315 		    "plun: %p lun state: %x num: %d target state: %x",
13316 		    plun, plun->lun_state, plun->lun_num,
13317 		    plun->lun_tgt->tgt_port->port_state);
13318 		return (NDI_FAILURE);
13319 	}
13320 
13321 	if (plun->lun_mpxio == 0) {
13322 		cdip = DIP(cip);
13323 		mutex_exit(&plun->lun_mutex);
13324 		mutex_exit(&pptr->port_mutex);
13325 		rval = ndi_devi_offline(DIP(cip), flags);
13326 		if (rval != NDI_SUCCESS) {
13327 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13328 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13329 			    "fcp_offline_child: ndi_devi_offline failed "
13330 			    "rval=%x cip=%p", rval, cip);
13331 		}
13332 	} else {
13333 		cdip = mdi_pi_get_client(PIP(cip));
13334 		mutex_exit(&plun->lun_mutex);
13335 		mutex_exit(&pptr->port_mutex);
13336 
13337 		/*
13338 		 * Exit phci to avoid deadlock with power management code
13339 		 * during mdi_pi_offline
13340 		 */
13341 		mdi_hold_path(PIP(cip));
13342 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13343 
13344 		rval = mdi_pi_offline(PIP(cip), flags);
13345 
13346 		mdi_devi_enter_phci(pptr->port_dip, circ);
13347 		mdi_rele_path(PIP(cip));
13348 
13349 		if (rval == MDI_SUCCESS) {
13350 			/*
13351 			 * Clear MPxIO path permanent disable as the path is
13352 			 * already offlined.
13353 			 */
13354 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13355 
13356 			if (flags & NDI_DEVI_REMOVE) {
13357 				(void) mdi_pi_free(PIP(cip), 0);
13358 			}
13359 		} else {
13360 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13361 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13362 			    "fcp_offline_child: mdi_pi_offline failed "
13363 			    "rval=%x cip=%p", rval, cip);
13364 		}
13365 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13366 	}
13367 
13368 	mutex_enter(&ptgt->tgt_mutex);
13369 	plun->lun_state &= ~FCP_LUN_INIT;
13370 	mutex_exit(&ptgt->tgt_mutex);
13371 
13372 	mutex_enter(&pptr->port_mutex);
13373 	mutex_enter(&plun->lun_mutex);
13374 
13375 	if (rval == NDI_SUCCESS) {
13376 		cdip = NULL;
13377 		if (flags & NDI_DEVI_REMOVE) {
13378 			/*
13379 			 * If the guid of the LUN changes, lun_cip will not
13380 			 * equal to cip, and after offlining the LUN with the
13381 			 * old guid, we should keep lun_cip since it's the cip
13382 			 * of the LUN with the new guid.
13383 			 * Otherwise remove our reference to child node.
13384 			 */
13385 			if (plun->lun_cip == cip) {
13386 				plun->lun_cip = NULL;
13387 			}
13388 			if (plun->lun_old_guid) {
13389 				kmem_free(plun->lun_old_guid,
13390 				    plun->lun_old_guid_size);
13391 				plun->lun_old_guid = NULL;
13392 				plun->lun_old_guid_size = 0;
13393 			}
13394 		}
13395 	}
13396 
13397 	if (cdip) {
13398 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13399 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13400 		    " target=%x lun=%x", "ndi_offline",
13401 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13402 	}
13403 
13404 	return (rval);
13405 }
13406 
13407 static void
13408 fcp_remove_child(struct fcp_lun *plun)
13409 {
13410 	int circ;
13411 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13412 
13413 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13414 		if (plun->lun_mpxio == 0) {
13415 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13416 			(void) ndi_devi_free(DIP(plun->lun_cip));
13417 		} else {
13418 			mutex_exit(&plun->lun_mutex);
13419 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13420 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13421 			mdi_devi_enter(
13422 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13423 			mdi_hold_path(PIP(plun->lun_cip));
13424 			mdi_devi_exit_phci(
13425 			    plun->lun_tgt->tgt_port->port_dip, circ);
13426 			(void) mdi_pi_offline(PIP(plun->lun_cip),
13427 			    NDI_DEVI_REMOVE);
13428 			mdi_devi_enter_phci(
13429 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13430 			mdi_rele_path(PIP(plun->lun_cip));
13431 			mdi_devi_exit_phci(
13432 			    plun->lun_tgt->tgt_port->port_dip, circ);
13433 			FCP_TRACE(fcp_logq,
13434 			    plun->lun_tgt->tgt_port->port_instbuf,
13435 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13436 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13437 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13438 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13439 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13440 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13441 			mutex_enter(&plun->lun_mutex);
13442 		}
13443 	}
13444 
13445 	plun->lun_cip = NULL;
13446 }
13447 
13448 /*
13449  * called when a timeout occurs
13450  *
13451  * can be scheduled during an attach or resume (if not already running)
13452  *
13453  * one timeout is set up for all ports
13454  *
13455  * acquires and releases the global mutex
13456  */
13457 /*ARGSUSED*/
13458 static void
13459 fcp_watch(void *arg)
13460 {
13461 	struct fcp_port	*pptr;
13462 	struct fcp_ipkt	*icmd;
13463 	struct fcp_ipkt	*nicmd;
13464 	struct fcp_pkt	*cmd;
13465 	struct fcp_pkt	*ncmd;
13466 	struct fcp_pkt	*tail;
13467 	struct fcp_pkt	*pcmd;
13468 	struct fcp_pkt	*save_head;
13469 	struct fcp_port	*save_port;
13470 
13471 	/* increment global watchdog time */
13472 	fcp_watchdog_time += fcp_watchdog_timeout;
13473 
13474 	mutex_enter(&fcp_global_mutex);
13475 
13476 	/* scan each port in our list */
13477 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13478 		save_port = fcp_port_head;
13479 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13480 		mutex_exit(&fcp_global_mutex);
13481 
13482 		mutex_enter(&pptr->port_mutex);
13483 		if (pptr->port_ipkt_list == NULL &&
13484 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13485 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13486 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13487 			mutex_exit(&pptr->port_mutex);
13488 			mutex_enter(&fcp_global_mutex);
13489 			goto end_of_watchdog;
13490 		}
13491 
13492 		/*
13493 		 * We check if a list of targets need to be offlined.
13494 		 */
13495 		if (pptr->port_offline_tgts) {
13496 			fcp_scan_offline_tgts(pptr);
13497 		}
13498 
13499 		/*
13500 		 * We check if a list of luns need to be offlined.
13501 		 */
13502 		if (pptr->port_offline_luns) {
13503 			fcp_scan_offline_luns(pptr);
13504 		}
13505 
13506 		/*
13507 		 * We check if a list of targets or luns need to be reset.
13508 		 */
13509 		if (pptr->port_reset_list) {
13510 			fcp_check_reset_delay(pptr);
13511 		}
13512 
13513 		mutex_exit(&pptr->port_mutex);
13514 
13515 		/*
13516 		 * This is where the pending commands (pkt) are checked for
13517 		 * timeout.
13518 		 */
13519 		mutex_enter(&pptr->port_pkt_mutex);
13520 		tail = pptr->port_pkt_tail;
13521 
13522 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13523 		    cmd != NULL; cmd = ncmd) {
13524 			ncmd = cmd->cmd_next;
13525 			/*
13526 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13527 			 * must be set.
13528 			 */
13529 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13530 			/*
13531 			 * FCP_INVALID_TIMEOUT will be set for those
13532 			 * command that need to be failed. Mostly those
13533 			 * cmds that could not be queued down for the
13534 			 * "timeout" value. cmd->cmd_timeout is used
13535 			 * to try and requeue the command regularly.
13536 			 */
13537 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13538 				/*
13539 				 * This command hasn't timed out yet.  Let's
13540 				 * go to the next one.
13541 				 */
13542 				pcmd = cmd;
13543 				goto end_of_loop;
13544 			}
13545 
13546 			if (cmd == pptr->port_pkt_head) {
13547 				ASSERT(pcmd == NULL);
13548 				pptr->port_pkt_head = cmd->cmd_next;
13549 			} else {
13550 				ASSERT(pcmd != NULL);
13551 				pcmd->cmd_next = cmd->cmd_next;
13552 			}
13553 
13554 			if (cmd == pptr->port_pkt_tail) {
13555 				ASSERT(cmd->cmd_next == NULL);
13556 				pptr->port_pkt_tail = pcmd;
13557 				if (pcmd) {
13558 					pcmd->cmd_next = NULL;
13559 				}
13560 			}
13561 			cmd->cmd_next = NULL;
13562 
13563 			/*
13564 			 * save the current head before dropping the
13565 			 * mutex - If the head doesn't remain the
13566 			 * same after re acquiring the mutex, just
13567 			 * bail out and revisit on next tick.
13568 			 *
13569 			 * PS: The tail pointer can change as the commands
13570 			 * get requeued after failure to retransport
13571 			 */
13572 			save_head = pptr->port_pkt_head;
13573 			mutex_exit(&pptr->port_pkt_mutex);
13574 
13575 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13576 			    FCP_INVALID_TIMEOUT) {
13577 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13578 				struct fcp_lun	*plun;
13579 				struct fcp_tgt	*ptgt;
13580 
13581 				plun = ADDR2LUN(&pkt->pkt_address);
13582 				ptgt = plun->lun_tgt;
13583 
13584 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13585 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13586 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13587 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13588 
13589 				cmd->cmd_state == FCP_PKT_ABORTING ?
13590 				    fcp_fail_cmd(cmd, CMD_RESET,
13591 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13592 				    CMD_TIMEOUT, STAT_ABORTED);
13593 			} else {
13594 				fcp_retransport_cmd(pptr, cmd);
13595 			}
13596 			mutex_enter(&pptr->port_pkt_mutex);
13597 			if (save_head && save_head != pptr->port_pkt_head) {
13598 				/*
13599 				 * Looks like linked list got changed (mostly
13600 				 * happens when an an OFFLINE LUN code starts
13601 				 * returning overflow queue commands in
13602 				 * parallel. So bail out and revisit during
13603 				 * next tick
13604 				 */
13605 				break;
13606 			}
13607 		end_of_loop:
13608 			/*
13609 			 * Scan only upto the previously known tail pointer
13610 			 * to avoid excessive processing - lots of new packets
13611 			 * could have been added to the tail or the old ones
13612 			 * re-queued.
13613 			 */
13614 			if (cmd == tail) {
13615 				break;
13616 			}
13617 		}
13618 		mutex_exit(&pptr->port_pkt_mutex);
13619 
13620 		mutex_enter(&pptr->port_mutex);
13621 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13622 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13623 
13624 			nicmd = icmd->ipkt_next;
13625 			if ((icmd->ipkt_restart != 0) &&
13626 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13627 				/* packet has not timed out */
13628 				continue;
13629 			}
13630 
13631 			/* time for packet re-transport */
13632 			if (icmd == pptr->port_ipkt_list) {
13633 				pptr->port_ipkt_list = icmd->ipkt_next;
13634 				if (pptr->port_ipkt_list) {
13635 					pptr->port_ipkt_list->ipkt_prev =
13636 					    NULL;
13637 				}
13638 			} else {
13639 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13640 				if (icmd->ipkt_next) {
13641 					icmd->ipkt_next->ipkt_prev =
13642 					    icmd->ipkt_prev;
13643 				}
13644 			}
13645 			icmd->ipkt_next = NULL;
13646 			icmd->ipkt_prev = NULL;
13647 			mutex_exit(&pptr->port_mutex);
13648 
13649 			if (fcp_is_retryable(icmd)) {
13650 				fc_ulp_rscn_info_t *rscnp =
13651 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13652 				    pkt_ulp_rscn_infop;
13653 
13654 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13655 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13656 				    "%x to D_ID=%x Retrying..",
13657 				    icmd->ipkt_opcode,
13658 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13659 
13660 				/*
13661 				 * Update the RSCN count in the packet
13662 				 * before resending.
13663 				 */
13664 
13665 				if (rscnp != NULL) {
13666 					rscnp->ulp_rscn_count =
13667 					    fc_ulp_get_rscn_count(pptr->
13668 					    port_fp_handle);
13669 				}
13670 
13671 				mutex_enter(&pptr->port_mutex);
13672 				mutex_enter(&ptgt->tgt_mutex);
13673 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13674 					mutex_exit(&ptgt->tgt_mutex);
13675 					mutex_exit(&pptr->port_mutex);
13676 					switch (icmd->ipkt_opcode) {
13677 						int rval;
13678 					case LA_ELS_PLOGI:
13679 						if ((rval = fc_ulp_login(
13680 						    pptr->port_fp_handle,
13681 						    &icmd->ipkt_fpkt, 1)) ==
13682 						    FC_SUCCESS) {
13683 							mutex_enter(
13684 							    &pptr->port_mutex);
13685 							continue;
13686 						}
13687 						if (fcp_handle_ipkt_errors(
13688 						    pptr, ptgt, icmd, rval,
13689 						    "PLOGI") == DDI_SUCCESS) {
13690 							mutex_enter(
13691 							    &pptr->port_mutex);
13692 							continue;
13693 						}
13694 						break;
13695 
13696 					case LA_ELS_PRLI:
13697 						if ((rval = fc_ulp_issue_els(
13698 						    pptr->port_fp_handle,
13699 						    icmd->ipkt_fpkt)) ==
13700 						    FC_SUCCESS) {
13701 							mutex_enter(
13702 							    &pptr->port_mutex);
13703 							continue;
13704 						}
13705 						if (fcp_handle_ipkt_errors(
13706 						    pptr, ptgt, icmd, rval,
13707 						    "PRLI") == DDI_SUCCESS) {
13708 							mutex_enter(
13709 							    &pptr->port_mutex);
13710 							continue;
13711 						}
13712 						break;
13713 
13714 					default:
13715 						if ((rval = fcp_transport(
13716 						    pptr->port_fp_handle,
13717 						    icmd->ipkt_fpkt, 1)) ==
13718 						    FC_SUCCESS) {
13719 							mutex_enter(
13720 							    &pptr->port_mutex);
13721 							continue;
13722 						}
13723 						if (fcp_handle_ipkt_errors(
13724 						    pptr, ptgt, icmd, rval,
13725 						    "PRLI") == DDI_SUCCESS) {
13726 							mutex_enter(
13727 							    &pptr->port_mutex);
13728 							continue;
13729 						}
13730 						break;
13731 					}
13732 				} else {
13733 					mutex_exit(&ptgt->tgt_mutex);
13734 					mutex_exit(&pptr->port_mutex);
13735 				}
13736 			} else {
13737 				fcp_print_error(icmd->ipkt_fpkt);
13738 			}
13739 
13740 			(void) fcp_call_finish_init(pptr, ptgt,
13741 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13742 			    icmd->ipkt_cause);
13743 			fcp_icmd_free(pptr, icmd);
13744 			mutex_enter(&pptr->port_mutex);
13745 		}
13746 
13747 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13748 		mutex_exit(&pptr->port_mutex);
13749 		mutex_enter(&fcp_global_mutex);
13750 
13751 	end_of_watchdog:
13752 		/*
13753 		 * Bail out early before getting into trouble
13754 		 */
13755 		if (save_port != fcp_port_head) {
13756 			break;
13757 		}
13758 	}
13759 
13760 	if (fcp_watchdog_init > 0) {
13761 		/* reschedule timeout to go again */
13762 		fcp_watchdog_id =
13763 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13764 	}
13765 	mutex_exit(&fcp_global_mutex);
13766 }
13767 
13768 
13769 static void
13770 fcp_check_reset_delay(struct fcp_port *pptr)
13771 {
13772 	uint32_t		tgt_cnt;
13773 	int			level;
13774 	struct fcp_tgt	*ptgt;
13775 	struct fcp_lun	*plun;
13776 	struct fcp_reset_elem *cur = NULL;
13777 	struct fcp_reset_elem *next = NULL;
13778 	struct fcp_reset_elem *prev = NULL;
13779 
13780 	ASSERT(mutex_owned(&pptr->port_mutex));
13781 
13782 	next = pptr->port_reset_list;
13783 	while ((cur = next) != NULL) {
13784 		next = cur->next;
13785 
13786 		if (cur->timeout < fcp_watchdog_time) {
13787 			prev = cur;
13788 			continue;
13789 		}
13790 
13791 		ptgt = cur->tgt;
13792 		plun = cur->lun;
13793 		tgt_cnt = cur->tgt_cnt;
13794 
13795 		if (ptgt) {
13796 			level = RESET_TARGET;
13797 		} else {
13798 			ASSERT(plun != NULL);
13799 			level = RESET_LUN;
13800 			ptgt = plun->lun_tgt;
13801 		}
13802 		if (prev) {
13803 			prev->next = next;
13804 		} else {
13805 			/*
13806 			 * Because we drop port mutex while doing aborts for
13807 			 * packets, we can't rely on reset_list pointing to
13808 			 * our head
13809 			 */
13810 			if (cur == pptr->port_reset_list) {
13811 				pptr->port_reset_list = next;
13812 			} else {
13813 				struct fcp_reset_elem *which;
13814 
13815 				which = pptr->port_reset_list;
13816 				while (which && which->next != cur) {
13817 					which = which->next;
13818 				}
13819 				ASSERT(which != NULL);
13820 
13821 				which->next = next;
13822 				prev = which;
13823 			}
13824 		}
13825 
13826 		kmem_free(cur, sizeof (*cur));
13827 
13828 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13829 			mutex_enter(&ptgt->tgt_mutex);
13830 			if (level == RESET_TARGET) {
13831 				fcp_update_tgt_state(ptgt,
13832 				    FCP_RESET, FCP_LUN_BUSY);
13833 			} else {
13834 				fcp_update_lun_state(plun,
13835 				    FCP_RESET, FCP_LUN_BUSY);
13836 			}
13837 			mutex_exit(&ptgt->tgt_mutex);
13838 
13839 			mutex_exit(&pptr->port_mutex);
13840 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13841 			mutex_enter(&pptr->port_mutex);
13842 		}
13843 	}
13844 }
13845 
13846 
13847 static void
13848 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13849     struct fcp_lun *rlun, int tgt_cnt)
13850 {
13851 	int			rval;
13852 	struct fcp_lun	*tlun, *nlun;
13853 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13854 	    *cmd = NULL, *head = NULL,
13855 	    *tail = NULL;
13856 
13857 	mutex_enter(&pptr->port_pkt_mutex);
13858 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13859 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13860 		struct fcp_tgt *ptgt = plun->lun_tgt;
13861 
13862 		ncmd = cmd->cmd_next;
13863 
13864 		if (ptgt != ttgt && plun != rlun) {
13865 			pcmd = cmd;
13866 			continue;
13867 		}
13868 
13869 		if (pcmd != NULL) {
13870 			ASSERT(pptr->port_pkt_head != cmd);
13871 			pcmd->cmd_next = ncmd;
13872 		} else {
13873 			ASSERT(cmd == pptr->port_pkt_head);
13874 			pptr->port_pkt_head = ncmd;
13875 		}
13876 		if (pptr->port_pkt_tail == cmd) {
13877 			ASSERT(cmd->cmd_next == NULL);
13878 			pptr->port_pkt_tail = pcmd;
13879 			if (pcmd != NULL) {
13880 				pcmd->cmd_next = NULL;
13881 			}
13882 		}
13883 
13884 		if (head == NULL) {
13885 			head = tail = cmd;
13886 		} else {
13887 			ASSERT(tail != NULL);
13888 			tail->cmd_next = cmd;
13889 			tail = cmd;
13890 		}
13891 		cmd->cmd_next = NULL;
13892 	}
13893 	mutex_exit(&pptr->port_pkt_mutex);
13894 
13895 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13896 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13897 
13898 		ncmd = cmd->cmd_next;
13899 		ASSERT(pkt != NULL);
13900 
13901 		mutex_enter(&pptr->port_mutex);
13902 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13903 			mutex_exit(&pptr->port_mutex);
13904 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13905 			pkt->pkt_reason = CMD_RESET;
13906 			pkt->pkt_statistics |= STAT_DEV_RESET;
13907 			cmd->cmd_state = FCP_PKT_IDLE;
13908 			fcp_post_callback(cmd);
13909 		} else {
13910 			mutex_exit(&pptr->port_mutex);
13911 		}
13912 	}
13913 
13914 	/*
13915 	 * If the FCA will return all the commands in its queue then our
13916 	 * work is easy, just return.
13917 	 */
13918 
13919 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13920 		return;
13921 	}
13922 
13923 	/*
13924 	 * For RESET_LUN get hold of target pointer
13925 	 */
13926 	if (ttgt == NULL) {
13927 		ASSERT(rlun != NULL);
13928 
13929 		ttgt = rlun->lun_tgt;
13930 
13931 		ASSERT(ttgt != NULL);
13932 	}
13933 
13934 	/*
13935 	 * There are some severe race conditions here.
13936 	 * While we are trying to abort the pkt, it might be completing
13937 	 * so mark it aborted and if the abort does not succeed then
13938 	 * handle it in the watch thread.
13939 	 */
13940 	mutex_enter(&ttgt->tgt_mutex);
13941 	nlun = ttgt->tgt_lun;
13942 	mutex_exit(&ttgt->tgt_mutex);
13943 	while ((tlun = nlun) != NULL) {
13944 		int restart = 0;
13945 		if (rlun && rlun != tlun) {
13946 			mutex_enter(&ttgt->tgt_mutex);
13947 			nlun = tlun->lun_next;
13948 			mutex_exit(&ttgt->tgt_mutex);
13949 			continue;
13950 		}
13951 		mutex_enter(&tlun->lun_mutex);
13952 		cmd = tlun->lun_pkt_head;
13953 		while (cmd != NULL) {
13954 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13955 				struct scsi_pkt *pkt;
13956 
13957 				restart = 1;
13958 				cmd->cmd_state = FCP_PKT_ABORTING;
13959 				mutex_exit(&tlun->lun_mutex);
13960 				rval = fc_ulp_abort(pptr->port_fp_handle,
13961 				    cmd->cmd_fp_pkt, KM_SLEEP);
13962 				if (rval == FC_SUCCESS) {
13963 					pkt = cmd->cmd_pkt;
13964 					pkt->pkt_reason = CMD_RESET;
13965 					pkt->pkt_statistics |= STAT_DEV_RESET;
13966 					cmd->cmd_state = FCP_PKT_IDLE;
13967 					fcp_post_callback(cmd);
13968 				} else {
13969 					caddr_t msg;
13970 
13971 					(void) fc_ulp_error(rval, &msg);
13972 
13973 					/*
13974 					 * This part is tricky. The abort
13975 					 * failed and now the command could
13976 					 * be completing.  The cmd_state ==
13977 					 * FCP_PKT_ABORTING should save
13978 					 * us in fcp_cmd_callback. If we
13979 					 * are already aborting ignore the
13980 					 * command in fcp_cmd_callback.
13981 					 * Here we leave this packet for 20
13982 					 * sec to be aborted in the
13983 					 * fcp_watch thread.
13984 					 */
13985 					fcp_log(CE_WARN, pptr->port_dip,
13986 					    "!Abort failed after reset %s",
13987 					    msg);
13988 
13989 					cmd->cmd_timeout =
13990 					    fcp_watchdog_time +
13991 					    cmd->cmd_pkt->pkt_time +
13992 					    FCP_FAILED_DELAY;
13993 
13994 					cmd->cmd_fp_pkt->pkt_timeout =
13995 					    FCP_INVALID_TIMEOUT;
13996 					/*
13997 					 * This is a hack, cmd is put in the
13998 					 * overflow queue so that it can be
13999 					 * timed out finally
14000 					 */
14001 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
14002 
14003 					mutex_enter(&pptr->port_pkt_mutex);
14004 					if (pptr->port_pkt_head) {
14005 						ASSERT(pptr->port_pkt_tail
14006 						    != NULL);
14007 						pptr->port_pkt_tail->cmd_next
14008 						    = cmd;
14009 						pptr->port_pkt_tail = cmd;
14010 					} else {
14011 						ASSERT(pptr->port_pkt_tail
14012 						    == NULL);
14013 						pptr->port_pkt_head =
14014 						    pptr->port_pkt_tail
14015 						    = cmd;
14016 					}
14017 					cmd->cmd_next = NULL;
14018 					mutex_exit(&pptr->port_pkt_mutex);
14019 				}
14020 				mutex_enter(&tlun->lun_mutex);
14021 				cmd = tlun->lun_pkt_head;
14022 			} else {
14023 				cmd = cmd->cmd_forw;
14024 			}
14025 		}
14026 		mutex_exit(&tlun->lun_mutex);
14027 
14028 		mutex_enter(&ttgt->tgt_mutex);
14029 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14030 		mutex_exit(&ttgt->tgt_mutex);
14031 
14032 		mutex_enter(&pptr->port_mutex);
14033 		if (tgt_cnt != ttgt->tgt_change_cnt) {
14034 			mutex_exit(&pptr->port_mutex);
14035 			return;
14036 		} else {
14037 			mutex_exit(&pptr->port_mutex);
14038 		}
14039 	}
14040 }
14041 
14042 
14043 /*
14044  * unlink the soft state, returning the soft state found (if any)
14045  *
14046  * acquires and releases the global mutex
14047  */
14048 struct fcp_port *
14049 fcp_soft_state_unlink(struct fcp_port *pptr)
14050 {
14051 	struct fcp_port	*hptr;		/* ptr index */
14052 	struct fcp_port	*tptr;		/* prev hptr */
14053 
14054 	mutex_enter(&fcp_global_mutex);
14055 	for (hptr = fcp_port_head, tptr = NULL;
14056 	    hptr != NULL;
14057 	    tptr = hptr, hptr = hptr->port_next) {
14058 		if (hptr == pptr) {
14059 			/* we found a match -- remove this item */
14060 			if (tptr == NULL) {
14061 				/* we're at the head of the list */
14062 				fcp_port_head = hptr->port_next;
14063 			} else {
14064 				tptr->port_next = hptr->port_next;
14065 			}
14066 			break;			/* success */
14067 		}
14068 	}
14069 	if (fcp_port_head == NULL) {
14070 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
14071 	}
14072 	mutex_exit(&fcp_global_mutex);
14073 	return (hptr);
14074 }
14075 
14076 
14077 /*
14078  * called by fcp_scsi_hba_tgt_init to find a LUN given a
14079  * WWN and a LUN number
14080  */
14081 /* ARGSUSED */
14082 static struct fcp_lun *
14083 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14084 {
14085 	int hash;
14086 	struct fcp_tgt *ptgt;
14087 	struct fcp_lun *plun;
14088 
14089 	ASSERT(mutex_owned(&pptr->port_mutex));
14090 
14091 	hash = FCP_HASH(wwn);
14092 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14093 	    ptgt = ptgt->tgt_next) {
14094 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14095 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
14096 			mutex_enter(&ptgt->tgt_mutex);
14097 			for (plun = ptgt->tgt_lun;
14098 			    plun != NULL;
14099 			    plun = plun->lun_next) {
14100 				if (plun->lun_num == lun) {
14101 					mutex_exit(&ptgt->tgt_mutex);
14102 					return (plun);
14103 				}
14104 			}
14105 			mutex_exit(&ptgt->tgt_mutex);
14106 			return (NULL);
14107 		}
14108 	}
14109 	return (NULL);
14110 }
14111 
14112 /*
14113  *     Function: fcp_prepare_pkt
14114  *
14115  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
14116  *		 for fcp_start(). It binds the data or partially maps it.
14117  *		 Builds the FCP header and starts the initialization of the
14118  *		 Fibre Channel header.
14119  *
14120  *     Argument: *pptr		FCP port.
14121  *		 *cmd		FCP packet.
14122  *		 *plun		LUN the command will be sent to.
14123  *
14124  *	Context: User, Kernel and Interrupt context.
14125  */
14126 static void
14127 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14128     struct fcp_lun *plun)
14129 {
14130 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
14131 	struct fcp_tgt		*ptgt = plun->lun_tgt;
14132 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
14133 
14134 	ASSERT(cmd->cmd_pkt->pkt_comp ||
14135 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14136 
14137 	if (cmd->cmd_pkt->pkt_numcookies) {
14138 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14139 			fcmd->fcp_cntl.cntl_read_data = 1;
14140 			fcmd->fcp_cntl.cntl_write_data = 0;
14141 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14142 		} else {
14143 			fcmd->fcp_cntl.cntl_read_data = 0;
14144 			fcmd->fcp_cntl.cntl_write_data = 1;
14145 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14146 		}
14147 
14148 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14149 
14150 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14151 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14152 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14153 
14154 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14155 
14156 		/* FCA needs pkt_datalen to be set */
14157 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14158 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14159 	} else {
14160 		fcmd->fcp_cntl.cntl_read_data = 0;
14161 		fcmd->fcp_cntl.cntl_write_data = 0;
14162 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14163 		fpkt->pkt_datalen = 0;
14164 		fcmd->fcp_data_len = 0;
14165 	}
14166 
14167 	/* set up the Tagged Queuing type */
14168 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14169 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14170 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14171 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14172 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14173 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14174 	} else {
14175 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14176 	}
14177 
14178 	fcmd->fcp_ent_addr = plun->lun_addr;
14179 
14180 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14181 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14182 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14183 	} else {
14184 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14185 	}
14186 
14187 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14188 	cmd->cmd_pkt->pkt_state = 0;
14189 	cmd->cmd_pkt->pkt_statistics = 0;
14190 	cmd->cmd_pkt->pkt_resid = 0;
14191 
14192 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14193 
14194 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14195 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14196 		fpkt->pkt_comp = NULL;
14197 	} else {
14198 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14199 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14200 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14201 		}
14202 		fpkt->pkt_comp = fcp_cmd_callback;
14203 	}
14204 
14205 	mutex_enter(&pptr->port_mutex);
14206 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14207 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14208 	}
14209 	mutex_exit(&pptr->port_mutex);
14210 
14211 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14212 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14213 
14214 	/*
14215 	 * Save a few kernel cycles here
14216 	 */
14217 #ifndef	__lock_lint
14218 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14219 #endif /* __lock_lint */
14220 }
14221 
14222 static void
14223 fcp_post_callback(struct fcp_pkt *cmd)
14224 {
14225 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14226 }
14227 
14228 
14229 /*
14230  * called to do polled I/O by fcp_start()
14231  *
14232  * return a transport status value, i.e. TRAN_ACCECPT for success
14233  */
14234 static int
14235 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14236 {
14237 	int	rval;
14238 
14239 #ifdef	DEBUG
14240 	mutex_enter(&pptr->port_pkt_mutex);
14241 	pptr->port_npkts++;
14242 	mutex_exit(&pptr->port_pkt_mutex);
14243 #endif /* DEBUG */
14244 
14245 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14246 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14247 	} else {
14248 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14249 	}
14250 
14251 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14252 
14253 	cmd->cmd_state = FCP_PKT_ISSUED;
14254 
14255 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14256 
14257 #ifdef	DEBUG
14258 	mutex_enter(&pptr->port_pkt_mutex);
14259 	pptr->port_npkts--;
14260 	mutex_exit(&pptr->port_pkt_mutex);
14261 #endif /* DEBUG */
14262 
14263 	cmd->cmd_state = FCP_PKT_IDLE;
14264 
14265 	switch (rval) {
14266 	case FC_SUCCESS:
14267 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14268 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14269 			rval = TRAN_ACCEPT;
14270 		} else {
14271 			rval = TRAN_FATAL_ERROR;
14272 		}
14273 		break;
14274 
14275 	case FC_TRAN_BUSY:
14276 		rval = TRAN_BUSY;
14277 		cmd->cmd_pkt->pkt_resid = 0;
14278 		break;
14279 
14280 	case FC_BADPACKET:
14281 		rval = TRAN_BADPKT;
14282 		break;
14283 
14284 	default:
14285 		rval = TRAN_FATAL_ERROR;
14286 		break;
14287 	}
14288 
14289 	return (rval);
14290 }
14291 
14292 
14293 /*
14294  * called by some of the following transport-called routines to convert
14295  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14296  */
14297 static struct fcp_port *
14298 fcp_dip2port(dev_info_t *dip)
14299 {
14300 	int	instance;
14301 
14302 	instance = ddi_get_instance(dip);
14303 	return (ddi_get_soft_state(fcp_softstate, instance));
14304 }
14305 
14306 
14307 /*
14308  * called internally to return a LUN given a dip
14309  */
14310 struct fcp_lun *
14311 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14312 {
14313 	struct fcp_tgt *ptgt;
14314 	struct fcp_lun *plun;
14315 	int i;
14316 
14317 
14318 	ASSERT(mutex_owned(&pptr->port_mutex));
14319 
14320 	for (i = 0; i < FCP_NUM_HASH; i++) {
14321 		for (ptgt = pptr->port_tgt_hash_table[i];
14322 		    ptgt != NULL;
14323 		    ptgt = ptgt->tgt_next) {
14324 			mutex_enter(&ptgt->tgt_mutex);
14325 			for (plun = ptgt->tgt_lun; plun != NULL;
14326 			    plun = plun->lun_next) {
14327 				mutex_enter(&plun->lun_mutex);
14328 				if (plun->lun_cip == cip) {
14329 					mutex_exit(&plun->lun_mutex);
14330 					mutex_exit(&ptgt->tgt_mutex);
14331 					return (plun); /* match found */
14332 				}
14333 				mutex_exit(&plun->lun_mutex);
14334 			}
14335 			mutex_exit(&ptgt->tgt_mutex);
14336 		}
14337 	}
14338 	return (NULL);				/* no LUN found */
14339 }
14340 
14341 /*
14342  * pass an element to the hotplug list, kick the hotplug thread
14343  * and wait for the element to get processed by the hotplug thread.
14344  * on return the element is freed.
14345  *
14346  * return zero success and non-zero on failure
14347  *
14348  * acquires/releases the target mutex
14349  *
14350  */
14351 static int
14352 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14353     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14354 {
14355 	struct fcp_hp_elem	*elem;
14356 	int			rval;
14357 
14358 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14359 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14360 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14361 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14362 		fcp_log(CE_CONT, pptr->port_dip,
14363 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14364 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14365 		return (NDI_FAILURE);
14366 	}
14367 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14368 	mutex_enter(&elem->mutex);
14369 	if (elem->wait) {
14370 		while (elem->wait) {
14371 			cv_wait(&elem->cv, &elem->mutex);
14372 		}
14373 	}
14374 	rval = (elem->result);
14375 	mutex_exit(&elem->mutex);
14376 	mutex_destroy(&elem->mutex);
14377 	cv_destroy(&elem->cv);
14378 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14379 	return (rval);
14380 }
14381 
14382 /*
14383  * pass an element to the hotplug list, and then
14384  * kick the hotplug thread
14385  *
14386  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14387  *
14388  * acquires/releases the hotplug mutex
14389  *
14390  * called with the target mutex owned
14391  *
14392  * memory acquired in NOSLEEP mode
14393  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14394  *	 for the hp daemon to process the request and is responsible for
14395  *	 freeing the element
14396  */
14397 static struct fcp_hp_elem *
14398 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14399     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14400 {
14401 	struct fcp_hp_elem	*elem;
14402 	dev_info_t *pdip;
14403 
14404 	ASSERT(pptr != NULL);
14405 	ASSERT(plun != NULL);
14406 	ASSERT(plun->lun_tgt != NULL);
14407 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14408 
14409 	/* create space for a hotplug element */
14410 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14411 	    == NULL) {
14412 		fcp_log(CE_WARN, NULL,
14413 		    "!can't allocate memory for hotplug element");
14414 		return (NULL);
14415 	}
14416 
14417 	/* fill in hotplug element */
14418 	elem->port = pptr;
14419 	elem->lun = plun;
14420 	elem->cip = cip;
14421 	elem->old_lun_mpxio = plun->lun_mpxio;
14422 	elem->what = what;
14423 	elem->flags = flags;
14424 	elem->link_cnt = link_cnt;
14425 	elem->tgt_cnt = tgt_cnt;
14426 	elem->wait = wait;
14427 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14428 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14429 
14430 	/* schedule the hotplug task */
14431 	pdip = pptr->port_dip;
14432 	mutex_enter(&plun->lun_mutex);
14433 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14434 		plun->lun_event_count++;
14435 		elem->event_cnt = plun->lun_event_count;
14436 	}
14437 	mutex_exit(&plun->lun_mutex);
14438 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14439 	    (void *)elem, KM_NOSLEEP) == NULL) {
14440 		mutex_enter(&plun->lun_mutex);
14441 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14442 			plun->lun_event_count--;
14443 		}
14444 		mutex_exit(&plun->lun_mutex);
14445 		kmem_free(elem, sizeof (*elem));
14446 		return (0);
14447 	}
14448 
14449 	return (elem);
14450 }
14451 
14452 
14453 static void
14454 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14455 {
14456 	int			rval;
14457 	struct scsi_address	*ap;
14458 	struct fcp_lun	*plun;
14459 	struct fcp_tgt	*ptgt;
14460 	fc_packet_t	*fpkt;
14461 
14462 	ap = &cmd->cmd_pkt->pkt_address;
14463 	plun = ADDR2LUN(ap);
14464 	ptgt = plun->lun_tgt;
14465 
14466 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14467 
14468 	cmd->cmd_state = FCP_PKT_IDLE;
14469 
14470 	mutex_enter(&pptr->port_mutex);
14471 	mutex_enter(&ptgt->tgt_mutex);
14472 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14473 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14474 		fc_ulp_rscn_info_t *rscnp;
14475 
14476 		cmd->cmd_state = FCP_PKT_ISSUED;
14477 
14478 		/*
14479 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14480 		 * originally NULL, hence we try to set it to the pd pointed
14481 		 * to by the SCSI device we're trying to get to.
14482 		 */
14483 
14484 		fpkt = cmd->cmd_fp_pkt;
14485 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14486 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14487 			/*
14488 			 * We need to notify the transport that we now have a
14489 			 * reference to the remote port handle.
14490 			 */
14491 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14492 		}
14493 
14494 		mutex_exit(&ptgt->tgt_mutex);
14495 		mutex_exit(&pptr->port_mutex);
14496 
14497 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14498 
14499 		/* prepare the packet */
14500 
14501 		fcp_prepare_pkt(pptr, cmd, plun);
14502 
14503 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14504 		    pkt_ulp_rscn_infop;
14505 
14506 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14507 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14508 
14509 		if (rscnp != NULL) {
14510 			rscnp->ulp_rscn_count =
14511 			    fc_ulp_get_rscn_count(pptr->
14512 			    port_fp_handle);
14513 		}
14514 
14515 		rval = fcp_transport(pptr->port_fp_handle,
14516 		    cmd->cmd_fp_pkt, 0);
14517 
14518 		if (rval == FC_SUCCESS) {
14519 			return;
14520 		}
14521 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14522 	} else {
14523 		mutex_exit(&ptgt->tgt_mutex);
14524 		mutex_exit(&pptr->port_mutex);
14525 	}
14526 
14527 	fcp_queue_pkt(pptr, cmd);
14528 }
14529 
14530 
14531 static void
14532 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14533 {
14534 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14535 
14536 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14537 	cmd->cmd_state = FCP_PKT_IDLE;
14538 
14539 	cmd->cmd_pkt->pkt_reason = reason;
14540 	cmd->cmd_pkt->pkt_state = 0;
14541 	cmd->cmd_pkt->pkt_statistics = statistics;
14542 
14543 	fcp_post_callback(cmd);
14544 }
14545 
14546 /*
14547  *     Function: fcp_queue_pkt
14548  *
14549  *  Description: This function queues the packet passed by the caller into
14550  *		 the list of packets of the FCP port.
14551  *
14552  *     Argument: *pptr		FCP port.
14553  *		 *cmd		FCP packet to queue.
14554  *
14555  * Return Value: None
14556  *
14557  *	Context: User, Kernel and Interrupt context.
14558  */
14559 static void
14560 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14561 {
14562 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14563 
14564 	mutex_enter(&pptr->port_pkt_mutex);
14565 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14566 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14567 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14568 
14569 	/*
14570 	 * zero pkt_time means hang around for ever
14571 	 */
14572 	if (cmd->cmd_pkt->pkt_time) {
14573 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14574 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14575 		} else {
14576 			/*
14577 			 * Indicate the watch thread to fail the
14578 			 * command by setting it to highest value
14579 			 */
14580 			cmd->cmd_timeout = fcp_watchdog_time;
14581 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14582 		}
14583 	}
14584 
14585 	if (pptr->port_pkt_head) {
14586 		ASSERT(pptr->port_pkt_tail != NULL);
14587 
14588 		pptr->port_pkt_tail->cmd_next = cmd;
14589 		pptr->port_pkt_tail = cmd;
14590 	} else {
14591 		ASSERT(pptr->port_pkt_tail == NULL);
14592 
14593 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14594 	}
14595 	cmd->cmd_next = NULL;
14596 	mutex_exit(&pptr->port_pkt_mutex);
14597 }
14598 
14599 /*
14600  *     Function: fcp_update_targets
14601  *
14602  *  Description: This function applies the specified change of state to all
14603  *		 the targets listed.  The operation applied is 'set'.
14604  *
14605  *     Argument: *pptr		FCP port.
14606  *		 *dev_list	Array of fc_portmap_t structures.
14607  *		 count		Length of dev_list.
14608  *		 state		State bits to update.
14609  *		 cause		Reason for the update.
14610  *
14611  * Return Value: None
14612  *
14613  *	Context: User, Kernel and Interrupt context.
14614  *		 The mutex pptr->port_mutex must be held.
14615  */
14616 static void
14617 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14618     uint32_t count, uint32_t state, int cause)
14619 {
14620 	fc_portmap_t		*map_entry;
14621 	struct fcp_tgt	*ptgt;
14622 
14623 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14624 
14625 	while (count--) {
14626 		map_entry = &(dev_list[count]);
14627 		ptgt = fcp_lookup_target(pptr,
14628 		    (uchar_t *)&(map_entry->map_pwwn));
14629 		if (ptgt == NULL) {
14630 			continue;
14631 		}
14632 
14633 		mutex_enter(&ptgt->tgt_mutex);
14634 		ptgt->tgt_trace = 0;
14635 		ptgt->tgt_change_cnt++;
14636 		ptgt->tgt_statec_cause = cause;
14637 		ptgt->tgt_tmp_cnt = 1;
14638 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14639 		mutex_exit(&ptgt->tgt_mutex);
14640 	}
14641 }
14642 
14643 static int
14644 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14645     int lcount, int tcount, int cause)
14646 {
14647 	int rval;
14648 
14649 	mutex_enter(&pptr->port_mutex);
14650 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14651 	mutex_exit(&pptr->port_mutex);
14652 
14653 	return (rval);
14654 }
14655 
14656 
14657 static int
14658 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14659     int lcount, int tcount, int cause)
14660 {
14661 	int	finish_init = 0;
14662 	int	finish_tgt = 0;
14663 	int	do_finish_init = 0;
14664 	int	rval = FCP_NO_CHANGE;
14665 
14666 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14667 	    cause == FCP_CAUSE_LINK_DOWN) {
14668 		do_finish_init = 1;
14669 	}
14670 
14671 	if (ptgt != NULL) {
14672 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14673 		    FCP_BUF_LEVEL_2, 0,
14674 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14675 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14676 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14677 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14678 		    ptgt->tgt_d_id, ptgt->tgt_done);
14679 
14680 		mutex_enter(&ptgt->tgt_mutex);
14681 
14682 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14683 			rval = FCP_DEV_CHANGE;
14684 			if (do_finish_init && ptgt->tgt_done == 0) {
14685 				ptgt->tgt_done++;
14686 				finish_init = 1;
14687 			}
14688 		} else {
14689 			if (--ptgt->tgt_tmp_cnt <= 0) {
14690 				ptgt->tgt_tmp_cnt = 0;
14691 				finish_tgt = 1;
14692 
14693 				if (do_finish_init) {
14694 					finish_init = 1;
14695 				}
14696 			}
14697 		}
14698 		mutex_exit(&ptgt->tgt_mutex);
14699 	} else {
14700 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14701 		    FCP_BUF_LEVEL_2, 0,
14702 		    "Call Finish Init for NO target");
14703 
14704 		if (do_finish_init) {
14705 			finish_init = 1;
14706 		}
14707 	}
14708 
14709 	if (finish_tgt) {
14710 		ASSERT(ptgt != NULL);
14711 
14712 		mutex_enter(&ptgt->tgt_mutex);
14713 #ifdef	DEBUG
14714 		bzero(ptgt->tgt_tmp_cnt_stack,
14715 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14716 
14717 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14718 		    FCP_STACK_DEPTH);
14719 #endif /* DEBUG */
14720 		mutex_exit(&ptgt->tgt_mutex);
14721 
14722 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14723 	}
14724 
14725 	if (finish_init && lcount == pptr->port_link_cnt) {
14726 		ASSERT(pptr->port_tmp_cnt > 0);
14727 		if (--pptr->port_tmp_cnt == 0) {
14728 			fcp_finish_init(pptr);
14729 		}
14730 	} else if (lcount != pptr->port_link_cnt) {
14731 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14732 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14733 		    "fcp_call_finish_init_held,1: state change occured"
14734 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14735 	}
14736 
14737 	return (rval);
14738 }
14739 
14740 static void
14741 fcp_reconfigure_luns(void * tgt_handle)
14742 {
14743 	uint32_t		dev_cnt;
14744 	fc_portmap_t		*devlist;
14745 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14746 	struct fcp_port		*pptr = ptgt->tgt_port;
14747 
14748 	/*
14749 	 * If the timer that fires this off got canceled too late, the
14750 	 * target could have been destroyed.
14751 	 */
14752 
14753 	if (ptgt->tgt_tid == NULL) {
14754 		return;
14755 	}
14756 
14757 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14758 	if (devlist == NULL) {
14759 		fcp_log(CE_WARN, pptr->port_dip,
14760 		    "!fcp%d: failed to allocate for portmap",
14761 		    pptr->port_instance);
14762 		return;
14763 	}
14764 
14765 	dev_cnt = 1;
14766 	devlist->map_pd = ptgt->tgt_pd_handle;
14767 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14768 	devlist->map_did.port_id = ptgt->tgt_d_id;
14769 
14770 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14771 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14772 
14773 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14774 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14775 	devlist->map_flags = 0;
14776 
14777 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14778 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14779 
14780 	/*
14781 	 * Clear the tgt_tid after no more references to
14782 	 * the fcp_tgt
14783 	 */
14784 	mutex_enter(&ptgt->tgt_mutex);
14785 	ptgt->tgt_tid = NULL;
14786 	mutex_exit(&ptgt->tgt_mutex);
14787 
14788 	kmem_free(devlist, sizeof (*devlist));
14789 }
14790 
14791 
14792 static void
14793 fcp_free_targets(struct fcp_port *pptr)
14794 {
14795 	int			i;
14796 	struct fcp_tgt	*ptgt;
14797 
14798 	mutex_enter(&pptr->port_mutex);
14799 	for (i = 0; i < FCP_NUM_HASH; i++) {
14800 		ptgt = pptr->port_tgt_hash_table[i];
14801 		while (ptgt != NULL) {
14802 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14803 
14804 			fcp_free_target(ptgt);
14805 			ptgt = next_tgt;
14806 		}
14807 	}
14808 	mutex_exit(&pptr->port_mutex);
14809 }
14810 
14811 
14812 static void
14813 fcp_free_target(struct fcp_tgt *ptgt)
14814 {
14815 	struct fcp_lun	*plun;
14816 	timeout_id_t		tid;
14817 
14818 	mutex_enter(&ptgt->tgt_mutex);
14819 	tid = ptgt->tgt_tid;
14820 
14821 	/*
14822 	 * Cancel any pending timeouts for this target.
14823 	 */
14824 
14825 	if (tid != NULL) {
14826 		/*
14827 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14828 		 * If tgt_tid is NULL, the callback will simply return.
14829 		 */
14830 		ptgt->tgt_tid = NULL;
14831 		mutex_exit(&ptgt->tgt_mutex);
14832 		(void) untimeout(tid);
14833 		mutex_enter(&ptgt->tgt_mutex);
14834 	}
14835 
14836 	plun = ptgt->tgt_lun;
14837 	while (plun != NULL) {
14838 		struct fcp_lun *next_lun = plun->lun_next;
14839 
14840 		fcp_dealloc_lun(plun);
14841 		plun = next_lun;
14842 	}
14843 
14844 	mutex_exit(&ptgt->tgt_mutex);
14845 	fcp_dealloc_tgt(ptgt);
14846 }
14847 
14848 /*
14849  *     Function: fcp_is_retryable
14850  *
14851  *  Description: Indicates if the internal packet is retryable.
14852  *
14853  *     Argument: *icmd		FCP internal packet.
14854  *
14855  * Return Value: 0	Not retryable
14856  *		 1	Retryable
14857  *
14858  *	Context: User, Kernel and Interrupt context
14859  */
14860 static int
14861 fcp_is_retryable(struct fcp_ipkt *icmd)
14862 {
14863 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14864 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14865 		return (0);
14866 	}
14867 
14868 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14869 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14870 }
14871 
14872 /*
14873  *     Function: fcp_create_on_demand
14874  *
14875  *     Argument: *pptr		FCP port.
14876  *		 *pwwn		Port WWN.
14877  *
14878  * Return Value: 0	Success
14879  *		 EIO
14880  *		 ENOMEM
14881  *		 EBUSY
14882  *		 EINVAL
14883  *
14884  *	Context: User and Kernel context
14885  */
14886 static int
14887 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14888 {
14889 	int			wait_ms;
14890 	int			tcount;
14891 	int			lcount;
14892 	int			ret;
14893 	int			error;
14894 	int			rval = EIO;
14895 	int			ntries;
14896 	fc_portmap_t		*devlist;
14897 	opaque_t		pd;
14898 	struct fcp_lun		*plun;
14899 	struct fcp_tgt		*ptgt;
14900 	int			old_manual = 0;
14901 
14902 	/* Allocates the fc_portmap_t structure. */
14903 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14904 
14905 	/*
14906 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14907 	 * in the commented statement below:
14908 	 *
14909 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14910 	 *
14911 	 * Below, the deadline for the discovery process is set.
14912 	 */
14913 	mutex_enter(&pptr->port_mutex);
14914 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14915 	mutex_exit(&pptr->port_mutex);
14916 
14917 	/*
14918 	 * We try to find the remote port based on the WWN provided by the
14919 	 * caller.  We actually ask fp/fctl if it has it.
14920 	 */
14921 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14922 	    (la_wwn_t *)pwwn, &error, 1);
14923 
14924 	if (pd == NULL) {
14925 		kmem_free(devlist, sizeof (*devlist));
14926 		return (rval);
14927 	}
14928 
14929 	/*
14930 	 * The remote port was found.  We ask fp/fctl to update our
14931 	 * fc_portmap_t structure.
14932 	 */
14933 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14934 	    (la_wwn_t *)pwwn, devlist);
14935 	if (ret != FC_SUCCESS) {
14936 		kmem_free(devlist, sizeof (*devlist));
14937 		return (rval);
14938 	}
14939 
14940 	/*
14941 	 * The map flag field is set to indicates that the creation is being
14942 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14943 	 */
14944 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14945 
14946 	mutex_enter(&pptr->port_mutex);
14947 
14948 	/*
14949 	 * We check to see if fcp already has a target that describes the
14950 	 * device being created.  If not it is created.
14951 	 */
14952 	ptgt = fcp_lookup_target(pptr, pwwn);
14953 	if (ptgt == NULL) {
14954 		lcount = pptr->port_link_cnt;
14955 		mutex_exit(&pptr->port_mutex);
14956 
14957 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14958 		if (ptgt == NULL) {
14959 			fcp_log(CE_WARN, pptr->port_dip,
14960 			    "!FC target allocation failed");
14961 			return (ENOMEM);
14962 		}
14963 
14964 		mutex_enter(&pptr->port_mutex);
14965 	}
14966 
14967 	mutex_enter(&ptgt->tgt_mutex);
14968 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14969 	ptgt->tgt_tmp_cnt = 1;
14970 	ptgt->tgt_device_created = 0;
14971 	/*
14972 	 * If fabric and auto config is set but the target was
14973 	 * manually unconfigured then reset to the manual_config_only to
14974 	 * 0 so the device will get configured.
14975 	 */
14976 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14977 	    fcp_enable_auto_configuration &&
14978 	    ptgt->tgt_manual_config_only == 1) {
14979 		old_manual = 1;
14980 		ptgt->tgt_manual_config_only = 0;
14981 	}
14982 	mutex_exit(&ptgt->tgt_mutex);
14983 
14984 	fcp_update_targets(pptr, devlist, 1,
14985 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14986 
14987 	lcount = pptr->port_link_cnt;
14988 	tcount = ptgt->tgt_change_cnt;
14989 
14990 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14991 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14992 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14993 		    fcp_enable_auto_configuration && old_manual) {
14994 			mutex_enter(&ptgt->tgt_mutex);
14995 			ptgt->tgt_manual_config_only = 1;
14996 			mutex_exit(&ptgt->tgt_mutex);
14997 		}
14998 
14999 		if (pptr->port_link_cnt != lcount ||
15000 		    ptgt->tgt_change_cnt != tcount) {
15001 			rval = EBUSY;
15002 		}
15003 		mutex_exit(&pptr->port_mutex);
15004 
15005 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15006 		    FCP_BUF_LEVEL_3, 0,
15007 		    "fcp_create_on_demand: mapflags ptgt=%x, "
15008 		    "lcount=%x::port_link_cnt=%x, "
15009 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
15010 		    ptgt, lcount, pptr->port_link_cnt,
15011 		    tcount, ptgt->tgt_change_cnt, rval);
15012 		return (rval);
15013 	}
15014 
15015 	/*
15016 	 * Due to lack of synchronization mechanisms, we perform
15017 	 * periodic monitoring of our request; Because requests
15018 	 * get dropped when another one supercedes (either because
15019 	 * of a link change or a target change), it is difficult to
15020 	 * provide a clean synchronization mechanism (such as a
15021 	 * semaphore or a conditional variable) without exhaustively
15022 	 * rewriting the mainline discovery code of this driver.
15023 	 */
15024 	wait_ms = 500;
15025 
15026 	ntries = fcp_max_target_retries;
15027 
15028 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15029 	    FCP_BUF_LEVEL_3, 0,
15030 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15031 	    "lcount=%x::port_link_cnt=%x, "
15032 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15033 	    "tgt_tmp_cnt =%x",
15034 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15035 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15036 	    ptgt->tgt_tmp_cnt);
15037 
15038 	mutex_enter(&ptgt->tgt_mutex);
15039 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15040 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15041 		mutex_exit(&ptgt->tgt_mutex);
15042 		mutex_exit(&pptr->port_mutex);
15043 
15044 		delay(drv_usectohz(wait_ms * 1000));
15045 
15046 		mutex_enter(&pptr->port_mutex);
15047 		mutex_enter(&ptgt->tgt_mutex);
15048 	}
15049 
15050 
15051 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15052 		rval = EBUSY;
15053 	} else {
15054 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15055 		    FCP_TGT_NODE_PRESENT) {
15056 			rval = 0;
15057 		}
15058 	}
15059 
15060 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15061 	    FCP_BUF_LEVEL_3, 0,
15062 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15063 	    "lcount=%x::port_link_cnt=%x, "
15064 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15065 	    "tgt_tmp_cnt =%x",
15066 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15067 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15068 	    ptgt->tgt_tmp_cnt);
15069 
15070 	if (rval) {
15071 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15072 		    fcp_enable_auto_configuration && old_manual) {
15073 			ptgt->tgt_manual_config_only = 1;
15074 		}
15075 		mutex_exit(&ptgt->tgt_mutex);
15076 		mutex_exit(&pptr->port_mutex);
15077 		kmem_free(devlist, sizeof (*devlist));
15078 
15079 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15080 		    FCP_BUF_LEVEL_3, 0,
15081 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15082 		    "lcount=%x::port_link_cnt=%x, "
15083 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15084 		    "tgt_device_created=%x, tgt D_ID=%x",
15085 		    ntries, ptgt, lcount, pptr->port_link_cnt,
15086 		    tcount, ptgt->tgt_change_cnt, rval,
15087 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
15088 		return (rval);
15089 	}
15090 
15091 	if ((plun = ptgt->tgt_lun) != NULL) {
15092 		tcount = plun->lun_tgt->tgt_change_cnt;
15093 	} else {
15094 		rval = EINVAL;
15095 	}
15096 	lcount = pptr->port_link_cnt;
15097 
15098 	/*
15099 	 * Configuring the target with no LUNs will fail. We
15100 	 * should reset the node state so that it is not
15101 	 * automatically configured when the LUNs are added
15102 	 * to this target.
15103 	 */
15104 	if (ptgt->tgt_lun_cnt == 0) {
15105 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15106 	}
15107 	mutex_exit(&ptgt->tgt_mutex);
15108 	mutex_exit(&pptr->port_mutex);
15109 
15110 	while (plun) {
15111 		child_info_t	*cip;
15112 
15113 		mutex_enter(&plun->lun_mutex);
15114 		cip = plun->lun_cip;
15115 		mutex_exit(&plun->lun_mutex);
15116 
15117 		mutex_enter(&ptgt->tgt_mutex);
15118 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15119 			mutex_exit(&ptgt->tgt_mutex);
15120 
15121 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15122 			    FCP_ONLINE, lcount, tcount,
15123 			    NDI_ONLINE_ATTACH);
15124 			if (rval != NDI_SUCCESS) {
15125 				FCP_TRACE(fcp_logq,
15126 				    pptr->port_instbuf, fcp_trace,
15127 				    FCP_BUF_LEVEL_3, 0,
15128 				    "fcp_create_on_demand: "
15129 				    "pass_to_hp_and_wait failed "
15130 				    "rval=%x", rval);
15131 				rval = EIO;
15132 			} else {
15133 				mutex_enter(&LUN_TGT->tgt_mutex);
15134 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
15135 				    FCP_LUN_BUSY);
15136 				mutex_exit(&LUN_TGT->tgt_mutex);
15137 			}
15138 			mutex_enter(&ptgt->tgt_mutex);
15139 		}
15140 
15141 		plun = plun->lun_next;
15142 		mutex_exit(&ptgt->tgt_mutex);
15143 	}
15144 
15145 	kmem_free(devlist, sizeof (*devlist));
15146 
15147 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15148 	    fcp_enable_auto_configuration && old_manual) {
15149 		mutex_enter(&ptgt->tgt_mutex);
15150 		/* if successful then set manual to 0 */
15151 		if (rval == 0) {
15152 			ptgt->tgt_manual_config_only = 0;
15153 		} else {
15154 			/* reset to 1 so the user has to do the config */
15155 			ptgt->tgt_manual_config_only = 1;
15156 		}
15157 		mutex_exit(&ptgt->tgt_mutex);
15158 	}
15159 
15160 	return (rval);
15161 }
15162 
15163 
15164 static void
15165 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15166 {
15167 	int		count;
15168 	uchar_t		byte;
15169 
15170 	count = 0;
15171 	while (*string) {
15172 		byte = FCP_ATOB(*string); string++;
15173 		byte = byte << 4 | FCP_ATOB(*string); string++;
15174 		bytes[count++] = byte;
15175 
15176 		if (count >= byte_len) {
15177 			break;
15178 		}
15179 	}
15180 }
15181 
15182 static void
15183 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15184 {
15185 	int		i;
15186 
15187 	for (i = 0; i < FC_WWN_SIZE; i++) {
15188 		(void) sprintf(string + (i * 2),
15189 		    "%02x", wwn[i]);
15190 	}
15191 
15192 }
15193 
15194 static void
15195 fcp_print_error(fc_packet_t *fpkt)
15196 {
15197 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15198 	    fpkt->pkt_ulp_private;
15199 	struct fcp_port	*pptr;
15200 	struct fcp_tgt	*ptgt;
15201 	struct fcp_lun	*plun;
15202 	caddr_t			buf;
15203 	int			scsi_cmd = 0;
15204 
15205 	ptgt = icmd->ipkt_tgt;
15206 	plun = icmd->ipkt_lun;
15207 	pptr = ptgt->tgt_port;
15208 
15209 	buf = kmem_zalloc(256, KM_NOSLEEP);
15210 	if (buf == NULL) {
15211 		return;
15212 	}
15213 
15214 	switch (icmd->ipkt_opcode) {
15215 	case SCMD_REPORT_LUN:
15216 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15217 		    " lun=0x%%x failed");
15218 		scsi_cmd++;
15219 		break;
15220 
15221 	case SCMD_INQUIRY_PAGE83:
15222 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15223 		    " lun=0x%%x failed");
15224 		scsi_cmd++;
15225 		break;
15226 
15227 	case SCMD_INQUIRY:
15228 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15229 		    " lun=0x%%x failed");
15230 		scsi_cmd++;
15231 		break;
15232 
15233 	case LA_ELS_PLOGI:
15234 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15235 		break;
15236 
15237 	case LA_ELS_PRLI:
15238 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15239 		break;
15240 	}
15241 
15242 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15243 		struct fcp_rsp		response, *rsp;
15244 		uchar_t			asc, ascq;
15245 		caddr_t			sense_key = NULL;
15246 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15247 
15248 		if (icmd->ipkt_nodma) {
15249 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15250 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15251 			    sizeof (struct fcp_rsp));
15252 		} else {
15253 			rsp = &response;
15254 			bep = &fcp_rsp_err;
15255 
15256 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15257 			    sizeof (struct fcp_rsp));
15258 
15259 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15260 			    bep, fpkt->pkt_resp_acc,
15261 			    sizeof (struct fcp_rsp_info));
15262 		}
15263 
15264 
15265 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15266 			(void) sprintf(buf + strlen(buf),
15267 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15268 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15269 			    " senselen=%%x. Giving up");
15270 
15271 			fcp_log(CE_WARN, pptr->port_dip, buf,
15272 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15273 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15274 			    rsp->fcp_u.fcp_status.reserved_1,
15275 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15276 
15277 			kmem_free(buf, 256);
15278 			return;
15279 		}
15280 
15281 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15282 		    bep->rsp_code != FCP_NO_FAILURE) {
15283 			(void) sprintf(buf + strlen(buf),
15284 			    " FCP Response code = 0x%x", bep->rsp_code);
15285 		}
15286 
15287 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15288 			struct scsi_extended_sense sense_info, *sense_ptr;
15289 
15290 			if (icmd->ipkt_nodma) {
15291 				sense_ptr = (struct scsi_extended_sense *)
15292 				    ((caddr_t)fpkt->pkt_resp +
15293 				    sizeof (struct fcp_rsp) +
15294 				    rsp->fcp_response_len);
15295 			} else {
15296 				sense_ptr = &sense_info;
15297 
15298 				FCP_CP_IN(fpkt->pkt_resp +
15299 				    sizeof (struct fcp_rsp) +
15300 				    rsp->fcp_response_len, &sense_info,
15301 				    fpkt->pkt_resp_acc,
15302 				    sizeof (struct scsi_extended_sense));
15303 			}
15304 
15305 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15306 			    NUM_IMPL_SENSE_KEYS) {
15307 				sense_key = sense_keys[sense_ptr->es_key];
15308 			} else {
15309 				sense_key = "Undefined";
15310 			}
15311 
15312 			asc = sense_ptr->es_add_code;
15313 			ascq = sense_ptr->es_qual_code;
15314 
15315 			(void) sprintf(buf + strlen(buf),
15316 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15317 			    " Giving up");
15318 
15319 			fcp_log(CE_WARN, pptr->port_dip, buf,
15320 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15321 			    asc, ascq);
15322 		} else {
15323 			(void) sprintf(buf + strlen(buf),
15324 			    " : SCSI status=%%x. Giving up");
15325 
15326 			fcp_log(CE_WARN, pptr->port_dip, buf,
15327 			    ptgt->tgt_d_id, plun->lun_num,
15328 			    rsp->fcp_u.fcp_status.scsi_status);
15329 		}
15330 	} else {
15331 		caddr_t state, reason, action, expln;
15332 
15333 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15334 		    &action, &expln);
15335 
15336 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15337 		    " Reason:%%s. Giving up");
15338 
15339 		if (scsi_cmd) {
15340 			fcp_log(CE_WARN, pptr->port_dip, buf,
15341 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15342 		} else {
15343 			fcp_log(CE_WARN, pptr->port_dip, buf,
15344 			    ptgt->tgt_d_id, state, reason);
15345 		}
15346 	}
15347 
15348 	kmem_free(buf, 256);
15349 }
15350 
15351 
15352 static int
15353 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15354     struct fcp_ipkt *icmd, int rval, caddr_t op)
15355 {
15356 	int	ret = DDI_FAILURE;
15357 	char	*error;
15358 
15359 	switch (rval) {
15360 	case FC_DEVICE_BUSY_NEW_RSCN:
15361 		/*
15362 		 * This means that there was a new RSCN that the transport
15363 		 * knows about (which the ULP *may* know about too) but the
15364 		 * pkt that was sent down was related to an older RSCN. So, we
15365 		 * are just going to reset the retry count and deadline and
15366 		 * continue to retry. The idea is that transport is currently
15367 		 * working on the new RSCN and will soon let the ULPs know
15368 		 * about it and when it does the existing logic will kick in
15369 		 * where it will change the tcount to indicate that something
15370 		 * changed on the target. So, rediscovery will start and there
15371 		 * will not be an infinite retry.
15372 		 *
15373 		 * For a full flow of how the RSCN info is transferred back and
15374 		 * forth, see fp.c
15375 		 */
15376 		icmd->ipkt_retries = 0;
15377 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15378 		    FCP_ICMD_DEADLINE;
15379 
15380 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15381 		    FCP_BUF_LEVEL_3, 0,
15382 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15383 		    rval, ptgt->tgt_d_id);
15384 		/* FALLTHROUGH */
15385 
15386 	case FC_STATEC_BUSY:
15387 	case FC_DEVICE_BUSY:
15388 	case FC_PBUSY:
15389 	case FC_FBUSY:
15390 	case FC_TRAN_BUSY:
15391 	case FC_OFFLINE:
15392 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15393 		    FCP_BUF_LEVEL_3, 0,
15394 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15395 		    rval, ptgt->tgt_d_id);
15396 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15397 		    fcp_is_retryable(icmd)) {
15398 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15399 			ret = DDI_SUCCESS;
15400 		}
15401 		break;
15402 
15403 	case FC_LOGINREQ:
15404 		/*
15405 		 * FC_LOGINREQ used to be handled just like all the cases
15406 		 * above. It has been changed to handled a PRLI that fails
15407 		 * with FC_LOGINREQ different than other ipkts that fail
15408 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15409 		 * a simple matter to turn it into a PLOGI instead, so that's
15410 		 * exactly what we do here.
15411 		 */
15412 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15413 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15414 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15415 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15416 		} else {
15417 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15418 			    FCP_BUF_LEVEL_3, 0,
15419 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15420 			    rval, ptgt->tgt_d_id);
15421 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15422 			    fcp_is_retryable(icmd)) {
15423 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15424 				ret = DDI_SUCCESS;
15425 			}
15426 		}
15427 		break;
15428 
15429 	default:
15430 		mutex_enter(&pptr->port_mutex);
15431 		mutex_enter(&ptgt->tgt_mutex);
15432 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15433 			mutex_exit(&ptgt->tgt_mutex);
15434 			mutex_exit(&pptr->port_mutex);
15435 
15436 			(void) fc_ulp_error(rval, &error);
15437 			fcp_log(CE_WARN, pptr->port_dip,
15438 			    "!Failed to send %s to D_ID=%x error=%s",
15439 			    op, ptgt->tgt_d_id, error);
15440 		} else {
15441 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15442 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15443 			    "fcp_handle_ipkt_errors,1: state change occured"
15444 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15445 			mutex_exit(&ptgt->tgt_mutex);
15446 			mutex_exit(&pptr->port_mutex);
15447 		}
15448 		break;
15449 	}
15450 
15451 	return (ret);
15452 }
15453 
15454 
15455 /*
15456  * Check of outstanding commands on any LUN for this target
15457  */
15458 static int
15459 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15460 {
15461 	struct	fcp_lun	*plun;
15462 	struct	fcp_pkt	*cmd;
15463 
15464 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15465 		mutex_enter(&plun->lun_mutex);
15466 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15467 		    cmd = cmd->cmd_forw) {
15468 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15469 				mutex_exit(&plun->lun_mutex);
15470 				return (FC_SUCCESS);
15471 			}
15472 		}
15473 		mutex_exit(&plun->lun_mutex);
15474 	}
15475 
15476 	return (FC_FAILURE);
15477 }
15478 
15479 static fc_portmap_t *
15480 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15481 {
15482 	int			i;
15483 	fc_portmap_t		*devlist;
15484 	fc_portmap_t		*devptr = NULL;
15485 	struct fcp_tgt	*ptgt;
15486 
15487 	mutex_enter(&pptr->port_mutex);
15488 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15489 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15490 		    ptgt = ptgt->tgt_next) {
15491 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15492 				++*dev_cnt;
15493 			}
15494 		}
15495 	}
15496 
15497 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15498 	    KM_NOSLEEP);
15499 	if (devlist == NULL) {
15500 		mutex_exit(&pptr->port_mutex);
15501 		fcp_log(CE_WARN, pptr->port_dip,
15502 		    "!fcp%d: failed to allocate for portmap for construct map",
15503 		    pptr->port_instance);
15504 		return (devptr);
15505 	}
15506 
15507 	for (i = 0; i < FCP_NUM_HASH; i++) {
15508 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15509 		    ptgt = ptgt->tgt_next) {
15510 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15511 				int ret;
15512 
15513 				ret = fc_ulp_pwwn_to_portmap(
15514 				    pptr->port_fp_handle,
15515 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15516 				    devlist);
15517 
15518 				if (ret == FC_SUCCESS) {
15519 					devlist++;
15520 					continue;
15521 				}
15522 
15523 				devlist->map_pd = NULL;
15524 				devlist->map_did.port_id = ptgt->tgt_d_id;
15525 				devlist->map_hard_addr.hard_addr =
15526 				    ptgt->tgt_hard_addr;
15527 
15528 				devlist->map_state = PORT_DEVICE_INVALID;
15529 				devlist->map_type = PORT_DEVICE_OLD;
15530 
15531 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15532 				    &devlist->map_nwwn, FC_WWN_SIZE);
15533 
15534 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15535 				    &devlist->map_pwwn, FC_WWN_SIZE);
15536 
15537 				devlist++;
15538 			}
15539 		}
15540 	}
15541 
15542 	mutex_exit(&pptr->port_mutex);
15543 
15544 	return (devptr);
15545 }
15546 /*
15547  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15548  */
15549 static void
15550 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15551 {
15552 	int i;
15553 	struct fcp_tgt	*ptgt;
15554 	struct fcp_lun	*plun;
15555 
15556 	for (i = 0; i < FCP_NUM_HASH; i++) {
15557 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15558 		    ptgt = ptgt->tgt_next) {
15559 			mutex_enter(&ptgt->tgt_mutex);
15560 			for (plun = ptgt->tgt_lun; plun != NULL;
15561 			    plun = plun->lun_next) {
15562 				if (plun->lun_mpxio &&
15563 				    plun->lun_state & FCP_LUN_BUSY) {
15564 					if (!fcp_pass_to_hp(pptr, plun,
15565 					    plun->lun_cip,
15566 					    FCP_MPXIO_PATH_SET_BUSY,
15567 					    pptr->port_link_cnt,
15568 					    ptgt->tgt_change_cnt, 0, 0)) {
15569 						FCP_TRACE(fcp_logq,
15570 						    pptr->port_instbuf,
15571 						    fcp_trace,
15572 						    FCP_BUF_LEVEL_2, 0,
15573 						    "path_verifybusy: "
15574 						    "disable lun %p failed!",
15575 						    plun);
15576 					}
15577 				}
15578 			}
15579 			mutex_exit(&ptgt->tgt_mutex);
15580 		}
15581 	}
15582 }
15583 
15584 static int
15585 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15586 {
15587 	dev_info_t		*cdip = NULL;
15588 	dev_info_t		*pdip = NULL;
15589 
15590 	ASSERT(plun);
15591 
15592 	mutex_enter(&plun->lun_mutex);
15593 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15594 		mutex_exit(&plun->lun_mutex);
15595 		return (NDI_FAILURE);
15596 	}
15597 	mutex_exit(&plun->lun_mutex);
15598 	cdip = mdi_pi_get_client(PIP(cip));
15599 	pdip = mdi_pi_get_phci(PIP(cip));
15600 
15601 	ASSERT(cdip != NULL);
15602 	ASSERT(pdip != NULL);
15603 
15604 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15605 		/* LUN ready for IO */
15606 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15607 	} else {
15608 		/* LUN busy to accept IO */
15609 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15610 	}
15611 	return (NDI_SUCCESS);
15612 }
15613 
15614 /*
15615  * Caller must free the returned string of MAXPATHLEN len
15616  * If the device is offline (-1 instance number) NULL
15617  * will be returned.
15618  */
15619 static char *
15620 fcp_get_lun_path(struct fcp_lun *plun) {
15621 	dev_info_t	*dip = NULL;
15622 	char	*path = NULL;
15623 	if (plun == NULL) {
15624 		return (NULL);
15625 	}
15626 	if (plun->lun_mpxio == 0) {
15627 		dip = DIP(plun->lun_cip);
15628 	} else {
15629 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15630 	}
15631 	if (dip == NULL) {
15632 		return (NULL);
15633 	}
15634 	if (ddi_get_instance(dip) < 0) {
15635 		return (NULL);
15636 	}
15637 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15638 	if (path == NULL) {
15639 		return (NULL);
15640 	}
15641 
15642 	(void) ddi_pathname(dip, path);
15643 	/*
15644 	 * In reality, the user wants a fully valid path (one they can open)
15645 	 * but this string is lacking the mount point, and the minor node.
15646 	 * It would be nice if we could "figure these out" somehow
15647 	 * and fill them in.  Otherwise, the userland code has to understand
15648 	 * driver specific details of which minor node is the "best" or
15649 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15650 	 * which tape doesn't rewind)
15651 	 */
15652 	return (path);
15653 }
15654 
15655 static int
15656 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15657     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15658 {
15659 	int64_t reset_delay;
15660 	int rval, retry = 0;
15661 	struct fcp_port *pptr = fcp_dip2port(parent);
15662 
15663 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15664 	    (ddi_get_lbolt64() - pptr->port_attach_time);
15665 	if (reset_delay < 0) {
15666 		reset_delay = 0;
15667 	}
15668 
15669 	if (fcp_bus_config_debug) {
15670 		flag |= NDI_DEVI_DEBUG;
15671 	}
15672 
15673 	switch (op) {
15674 	case BUS_CONFIG_ONE:
15675 		/*
15676 		 * Retry the command since we need to ensure
15677 		 * the fabric devices are available for root
15678 		 */
15679 		while (retry++ < fcp_max_bus_config_retries) {
15680 			rval =	(ndi_busop_bus_config(parent,
15681 			    flag | NDI_MDI_FALLBACK, op,
15682 			    arg, childp, (clock_t)reset_delay));
15683 			if (rval == 0) {
15684 				return (rval);
15685 			}
15686 		}
15687 
15688 		/*
15689 		 * drain taskq to make sure nodes are created and then
15690 		 * try again.
15691 		 */
15692 		taskq_wait(DEVI(parent)->devi_taskq);
15693 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15694 		    op, arg, childp, 0));
15695 
15696 	case BUS_CONFIG_DRIVER:
15697 	case BUS_CONFIG_ALL: {
15698 		/*
15699 		 * delay till all devices report in (port_tmp_cnt == 0)
15700 		 * or FCP_INIT_WAIT_TIMEOUT
15701 		 */
15702 		mutex_enter(&pptr->port_mutex);
15703 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15704 			(void) cv_timedwait(&pptr->port_config_cv,
15705 			    &pptr->port_mutex,
15706 			    ddi_get_lbolt() + (clock_t)reset_delay);
15707 			reset_delay =
15708 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15709 			    (ddi_get_lbolt64() - pptr->port_attach_time);
15710 		}
15711 		mutex_exit(&pptr->port_mutex);
15712 		/* drain taskq to make sure nodes are created */
15713 		taskq_wait(DEVI(parent)->devi_taskq);
15714 		return (ndi_busop_bus_config(parent, flag, op,
15715 		    arg, childp, 0));
15716 	}
15717 
15718 	default:
15719 		return (NDI_FAILURE);
15720 	}
15721 	/*NOTREACHED*/
15722 }
15723 
15724 static int
15725 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15726     ddi_bus_config_op_t op, void *arg)
15727 {
15728 	if (fcp_bus_config_debug) {
15729 		flag |= NDI_DEVI_DEBUG;
15730 	}
15731 
15732 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15733 }
15734 
15735 
15736 /*
15737  * Routine to copy GUID into the lun structure.
15738  * returns 0 if copy was successful and 1 if encountered a
15739  * failure and did not copy the guid.
15740  */
15741 static int
15742 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15743 {
15744 
15745 	int retval = 0;
15746 
15747 	/* add one for the null terminator */
15748 	const unsigned int len = strlen(guidp) + 1;
15749 
15750 	if ((guidp == NULL) || (plun == NULL)) {
15751 		return (1);
15752 	}
15753 
15754 	/*
15755 	 * if the plun->lun_guid already has been allocated,
15756 	 * then check the size. if the size is exact, reuse
15757 	 * it....if not free it an allocate the required size.
15758 	 * The reallocation should NOT typically happen
15759 	 * unless the GUIDs reported changes between passes.
15760 	 * We free up and alloc again even if the
15761 	 * size was more than required. This is due to the
15762 	 * fact that the field lun_guid_size - serves
15763 	 * dual role of indicating the size of the wwn
15764 	 * size and ALSO the allocation size.
15765 	 */
15766 	if (plun->lun_guid) {
15767 		if (plun->lun_guid_size != len) {
15768 			/*
15769 			 * free the allocated memory and
15770 			 * initialize the field
15771 			 * lun_guid_size to 0.
15772 			 */
15773 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15774 			plun->lun_guid = NULL;
15775 			plun->lun_guid_size = 0;
15776 		}
15777 	}
15778 	/*
15779 	 * alloc only if not already done.
15780 	 */
15781 	if (plun->lun_guid == NULL) {
15782 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15783 		if (plun->lun_guid == NULL) {
15784 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15785 			    "Unable to allocate"
15786 			    "Memory for GUID!!! size %d", len);
15787 			retval = 1;
15788 		} else {
15789 			plun->lun_guid_size = len;
15790 		}
15791 	}
15792 	if (plun->lun_guid) {
15793 		/*
15794 		 * now copy the GUID
15795 		 */
15796 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15797 	}
15798 	return (retval);
15799 }
15800 
15801 /*
15802  * fcp_reconfig_wait
15803  *
15804  * Wait for a rediscovery/reconfiguration to complete before continuing.
15805  */
15806 
15807 static void
15808 fcp_reconfig_wait(struct fcp_port *pptr)
15809 {
15810 	clock_t		reconfig_start, wait_timeout;
15811 
15812 	/*
15813 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15814 	 * reconfiguration in progress.
15815 	 */
15816 
15817 	mutex_enter(&pptr->port_mutex);
15818 	if (pptr->port_tmp_cnt == 0) {
15819 		mutex_exit(&pptr->port_mutex);
15820 		return;
15821 	}
15822 	mutex_exit(&pptr->port_mutex);
15823 
15824 	/*
15825 	 * If we cause a reconfig by raising power, delay until all devices
15826 	 * report in (port_tmp_cnt returns to 0)
15827 	 */
15828 
15829 	reconfig_start = ddi_get_lbolt();
15830 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15831 
15832 	mutex_enter(&pptr->port_mutex);
15833 
15834 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15835 	    pptr->port_tmp_cnt) {
15836 
15837 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15838 		    reconfig_start + wait_timeout);
15839 	}
15840 
15841 	mutex_exit(&pptr->port_mutex);
15842 
15843 	/*
15844 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15845 	 * we want may still be ok.  If not, it will error out later
15846 	 */
15847 }
15848 
15849 /*
15850  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15851  * We rely on the fcp_global_mutex to provide protection against changes to
15852  * the fcp_lun_blacklist.
15853  *
15854  * You can describe a list of target port WWNs and LUN numbers which will
15855  * not be configured. LUN numbers will be interpreted as decimal. White
15856  * spaces and ',' can be used in the list of LUN numbers.
15857  *
15858  * To prevent LUNs 1 and 2 from being configured for target
15859  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15860  *
15861  * pwwn-lun-blacklist=
15862  * "510000f010fd92a1,1,2",
15863  * "510000e012079df1,1,2";
15864  */
15865 static void
15866 fcp_read_blacklist(dev_info_t *dip,
15867     struct fcp_black_list_entry **pplun_blacklist) {
15868 	char **prop_array	= NULL;
15869 	char *curr_pwwn		= NULL;
15870 	char *curr_lun		= NULL;
15871 	uint32_t prop_item	= 0;
15872 	int idx			= 0;
15873 	int len			= 0;
15874 
15875 	ASSERT(mutex_owned(&fcp_global_mutex));
15876 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15877 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15878 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15879 		return;
15880 	}
15881 
15882 	for (idx = 0; idx < prop_item; idx++) {
15883 
15884 		curr_pwwn = prop_array[idx];
15885 		while (*curr_pwwn == ' ') {
15886 			curr_pwwn++;
15887 		}
15888 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15889 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15890 			    ", please check.", curr_pwwn);
15891 			continue;
15892 		}
15893 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15894 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15895 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15896 			    ", please check.", curr_pwwn);
15897 			continue;
15898 		}
15899 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15900 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15901 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15902 				    "blacklist, please check.", curr_pwwn);
15903 				break;
15904 			}
15905 		}
15906 		if (len != sizeof (la_wwn_t) * 2) {
15907 			continue;
15908 		}
15909 
15910 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15911 		*(curr_lun - 1) = '\0';
15912 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15913 	}
15914 
15915 	ddi_prop_free(prop_array);
15916 }
15917 
15918 /*
15919  * Get the masking info about one remote target port designated by wwn.
15920  * Lun ids could be separated by ',' or white spaces.
15921  */
15922 static void
15923 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15924     struct fcp_black_list_entry **pplun_blacklist) {
15925 	int		idx			= 0;
15926 	uint32_t	offset			= 0;
15927 	unsigned long	lun_id			= 0;
15928 	char		lunid_buf[16];
15929 	char		*pend			= NULL;
15930 	int		illegal_digit		= 0;
15931 
15932 	while (offset < strlen(curr_lun)) {
15933 		while ((curr_lun[offset + idx] != ',') &&
15934 		    (curr_lun[offset + idx] != '\0') &&
15935 		    (curr_lun[offset + idx] != ' ')) {
15936 			if (isdigit(curr_lun[offset + idx]) == 0) {
15937 				illegal_digit++;
15938 			}
15939 			idx++;
15940 		}
15941 		if (illegal_digit > 0) {
15942 			offset += (idx+1);	/* To the start of next lun */
15943 			idx = 0;
15944 			illegal_digit = 0;
15945 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15946 			    "the blacklist, please check digits.",
15947 			    curr_lun, curr_pwwn);
15948 			continue;
15949 		}
15950 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15951 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15952 			    "the blacklist, please check the length of LUN#.",
15953 			    curr_lun, curr_pwwn);
15954 			break;
15955 		}
15956 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15957 		    offset++;
15958 		    continue;
15959 		}
15960 
15961 		bcopy(curr_lun + offset, lunid_buf, idx);
15962 		lunid_buf[idx] = '\0';
15963 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15964 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15965 		} else {
15966 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15967 			    "the blacklist, please check %s.",
15968 			    curr_lun, curr_pwwn, lunid_buf);
15969 		}
15970 		offset += (idx+1);	/* To the start of next lun */
15971 		idx = 0;
15972 	}
15973 }
15974 
15975 /*
15976  * Add one masking record
15977  */
15978 static void
15979 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15980     struct fcp_black_list_entry **pplun_blacklist) {
15981 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15982 	struct fcp_black_list_entry	*new_entry	= NULL;
15983 	la_wwn_t			wwn;
15984 
15985 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15986 	while (tmp_entry) {
15987 		if ((bcmp(&tmp_entry->wwn, &wwn,
15988 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15989 			return;
15990 		}
15991 
15992 		tmp_entry = tmp_entry->next;
15993 	}
15994 
15995 	/* add to black list */
15996 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
15997 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
15998 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
15999 	new_entry->lun = lun_id;
16000 	new_entry->masked = 0;
16001 	new_entry->next = *pplun_blacklist;
16002 	*pplun_blacklist = new_entry;
16003 }
16004 
16005 /*
16006  * Check if we should mask the specified lun of this fcp_tgt
16007  */
16008 static int
16009 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16010 	struct fcp_black_list_entry *remote_port;
16011 
16012 	remote_port = fcp_lun_blacklist;
16013 	while (remote_port != NULL) {
16014 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16015 			if (remote_port->lun == lun_id) {
16016 				remote_port->masked++;
16017 				if (remote_port->masked == 1) {
16018 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
16019 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
16020 					    "is masked due to black listing.\n",
16021 					    lun_id, wwn->raw_wwn[0],
16022 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
16023 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
16024 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
16025 					    wwn->raw_wwn[7]);
16026 				}
16027 				return (TRUE);
16028 			}
16029 		}
16030 		remote_port = remote_port->next;
16031 	}
16032 	return (FALSE);
16033 }
16034 
16035 /*
16036  * Release all allocated resources
16037  */
16038 static void
16039 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16040 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16041 	struct fcp_black_list_entry	*current_entry	= NULL;
16042 
16043 	ASSERT(mutex_owned(&fcp_global_mutex));
16044 	/*
16045 	 * Traverse all luns
16046 	 */
16047 	while (tmp_entry) {
16048 		current_entry = tmp_entry;
16049 		tmp_entry = tmp_entry->next;
16050 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16051 	}
16052 	*pplun_blacklist = NULL;
16053 }
16054 
16055 /*
16056  * In fcp module,
16057  *   pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16058  */
16059 static struct scsi_pkt *
16060 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16061     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16062     int flags, int (*callback)(), caddr_t arg)
16063 {
16064 	fcp_port_t	*pptr = ADDR2FCP(ap);
16065 	fcp_pkt_t	*cmd  = NULL;
16066 	fc_frame_hdr_t	*hp;
16067 
16068 	/*
16069 	 * First step: get the packet
16070 	 */
16071 	if (pkt == NULL) {
16072 		pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16073 		    tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16074 		    callback, arg);
16075 		if (pkt == NULL) {
16076 			return (NULL);
16077 		}
16078 
16079 		/*
16080 		 * All fields in scsi_pkt will be initialized properly or
16081 		 * set to zero. We need do nothing for scsi_pkt.
16082 		 */
16083 		/*
16084 		 * But it's our responsibility to link other related data
16085 		 * structures. Their initialization will be done, just
16086 		 * before the scsi_pkt will be sent to FCA.
16087 		 */
16088 		cmd		= PKT2CMD(pkt);
16089 		cmd->cmd_pkt	= pkt;
16090 		cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16091 		/*
16092 		 * fc_packet_t
16093 		 */
16094 		cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16095 		cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16096 		    sizeof (struct fcp_pkt));
16097 		cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16098 		cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16099 		cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16100 		cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16101 		/*
16102 		 * Fill in the Fabric Channel Header
16103 		 */
16104 		hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16105 		hp->r_ctl = R_CTL_COMMAND;
16106 		hp->rsvd = 0;
16107 		hp->type = FC_TYPE_SCSI_FCP;
16108 		hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16109 		hp->seq_id = 0;
16110 		hp->df_ctl  = 0;
16111 		hp->seq_cnt = 0;
16112 		hp->ox_id = 0xffff;
16113 		hp->rx_id = 0xffff;
16114 		hp->ro = 0;
16115 	} else {
16116 		/*
16117 		 * We need think if we should reset any elements in
16118 		 * related data structures.
16119 		 */
16120 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
16121 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
16122 		    "reusing pkt, flags %d", flags);
16123 		cmd = PKT2CMD(pkt);
16124 		if (cmd->cmd_fp_pkt->pkt_pd) {
16125 			cmd->cmd_fp_pkt->pkt_pd = NULL;
16126 		}
16127 	}
16128 
16129 	/*
16130 	 * Second step:	 dma allocation/move
16131 	 */
16132 	if (bp && bp->b_bcount != 0) {
16133 		/*
16134 		 * Mark if it's read or write
16135 		 */
16136 		if (bp->b_flags & B_READ) {
16137 			cmd->cmd_flags |= CFLAG_IS_READ;
16138 		} else {
16139 			cmd->cmd_flags &= ~CFLAG_IS_READ;
16140 		}
16141 
16142 		bp_mapin(bp);
16143 		cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16144 		cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16145 		cmd->cmd_fp_pkt->pkt_data_resid = 0;
16146 	} else {
16147 		/*
16148 		 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16149 		 * to send zero-length read/write.
16150 		 */
16151 		cmd->cmd_fp_pkt->pkt_data = NULL;
16152 		cmd->cmd_fp_pkt->pkt_datalen = 0;
16153 	}
16154 
16155 	return (pkt);
16156 }
16157 
16158 static void
16159 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16160 {
16161 	fcp_port_t	*pptr = ADDR2FCP(ap);
16162 
16163 	/*
16164 	 * First we let FCA to uninitilize private part.
16165 	 */
16166 	fc_ulp_uninit_packet(pptr->port_fp_handle, PKT2CMD(pkt)->cmd_fp_pkt);
16167 
16168 	/*
16169 	 * Then we uninitialize fc_packet.
16170 	 */
16171 
16172 	/*
16173 	 * Thirdly, we uninitializae fcp_pkt.
16174 	 */
16175 
16176 	/*
16177 	 * In the end, we free scsi_pkt.
16178 	 */
16179 	scsi_hba_pkt_free(ap, pkt);
16180 }
16181 
16182 static int
16183 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16184 {
16185 	fcp_port_t	*pptr = ADDR2FCP(ap);
16186 	fcp_lun_t	*plun = ADDR2LUN(ap);
16187 	fcp_tgt_t	*ptgt = plun->lun_tgt;
16188 	fcp_pkt_t	*cmd  = PKT2CMD(pkt);
16189 	fcp_cmd_t	*fcmd = &cmd->cmd_fcp_cmd;
16190 	fc_packet_t	*fpkt = cmd->cmd_fp_pkt;
16191 	int		 rval;
16192 
16193 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
16194 	fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16195 
16196 	/*
16197 	 * Firstly, we need initialize fcp_pkt_t
16198 	 * Secondly, we need initialize fcp_cmd_t.
16199 	 */
16200 	bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16201 	fcmd->fcp_data_len = fpkt->pkt_datalen;
16202 	fcmd->fcp_ent_addr = plun->lun_addr;
16203 	if (pkt->pkt_flags & FLAG_HTAG) {
16204 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16205 	} else if (pkt->pkt_flags & FLAG_OTAG) {
16206 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16207 	} else if (pkt->pkt_flags & FLAG_STAG) {
16208 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16209 	} else {
16210 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16211 	}
16212 
16213 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16214 		fcmd->fcp_cntl.cntl_read_data = 1;
16215 		fcmd->fcp_cntl.cntl_write_data = 0;
16216 	} else {
16217 		fcmd->fcp_cntl.cntl_read_data = 0;
16218 		fcmd->fcp_cntl.cntl_write_data = 1;
16219 	}
16220 
16221 	/*
16222 	 * Then we need initialize fc_packet_t too.
16223 	 */
16224 	fpkt->pkt_timeout = pkt->pkt_time + 2;
16225 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16226 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16227 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16228 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16229 	} else {
16230 		fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16231 	}
16232 
16233 	if (pkt->pkt_flags & FLAG_NOINTR) {
16234 		fpkt->pkt_comp = NULL;
16235 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16236 	} else {
16237 		fpkt->pkt_comp = fcp_cmd_callback;
16238 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16239 		if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16240 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16241 		}
16242 	}
16243 
16244 	/*
16245 	 * Lastly, we need initialize scsi_pkt
16246 	 */
16247 	pkt->pkt_reason = CMD_CMPLT;
16248 	pkt->pkt_state = 0;
16249 	pkt->pkt_statistics = 0;
16250 	pkt->pkt_resid = 0;
16251 
16252 	/*
16253 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
16254 	 * have to do polled I/O
16255 	 */
16256 	if (pkt->pkt_flags & FLAG_NOINTR) {
16257 		return (fcp_dopoll(pptr, cmd));
16258 	}
16259 
16260 	cmd->cmd_state = FCP_PKT_ISSUED;
16261 	rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16262 	if (rval == FC_SUCCESS) {
16263 		return (TRAN_ACCEPT);
16264 	}
16265 
16266 	/*
16267 	 * Need more consideration
16268 	 *
16269 	 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16270 	 */
16271 	cmd->cmd_state = FCP_PKT_IDLE;
16272 	if (rval == FC_TRAN_BUSY) {
16273 		return (TRAN_BUSY);
16274 	} else {
16275 		return (TRAN_FATAL_ERROR);
16276 	}
16277 }
16278 
16279 /*
16280  * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16281  * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16282  */
16283 static void
16284 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16285 {
16286 	FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16287 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16288 }
16289 
16290 /*
16291  * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16292  */
16293 static void
16294 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16295 {
16296 	FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16297 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16298 }
16299