xref: /titanic_41/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 3f64cd552fee350c8075ec62765e9a6f9caef1a8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
431 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
432     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
433 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
434     int cause);
435 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
436     uint32_t state);
437 static struct fcp_port *fcp_get_port(opaque_t port_handle);
438 static void fcp_unsol_callback(fc_packet_t *fpkt);
439 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
440     uchar_t r_ctl, uchar_t type);
441 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
442 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
443     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
444     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
445 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
446 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
447     int nodma, int flags);
448 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
449 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
450     uchar_t *wwn);
451 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
452     uint32_t d_id);
453 static void fcp_icmd_callback(fc_packet_t *fpkt);
454 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
455     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
456 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
457 static void fcp_scsi_callback(fc_packet_t *fpkt);
458 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
459 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
461 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
462     uint16_t lun_num);
463 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
464     int link_cnt, int tgt_cnt, int cause);
465 static void fcp_finish_init(struct fcp_port *pptr);
466 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
467     int tgt_cnt, int cause);
468 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
469     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
470 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
471     int link_cnt, int tgt_cnt, int nowait, int flags);
472 static void fcp_offline_target_now(struct fcp_port *pptr,
473     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
474 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
475     int tgt_cnt, int flags);
476 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
477     int nowait, int flags);
478 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
479     int tgt_cnt);
480 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
481     int tgt_cnt, int flags);
482 static void fcp_scan_offline_luns(struct fcp_port *pptr);
483 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
484 static void fcp_update_offline_flags(struct fcp_lun *plun);
485 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
486 static void fcp_abort_commands(struct fcp_pkt *head, struct
487     fcp_port *pptr);
488 static void fcp_cmd_callback(fc_packet_t *fpkt);
489 static void fcp_complete_pkt(fc_packet_t *fpkt);
490 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
491     struct fcp_port *pptr);
492 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
493     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
494 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
495 static void fcp_dealloc_lun(struct fcp_lun *plun);
496 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
497     fc_portmap_t *map_entry, int link_cnt);
498 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
499 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
500 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
501     int internal);
502 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
503 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
504     uint32_t s_id, int instance);
505 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
506     int instance);
507 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
508 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
509     int);
510 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
511 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
512 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
513     int flags);
514 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
515 static int fcp_reset_target(struct scsi_address *ap, int level);
516 static int fcp_commoncap(struct scsi_address *ap, char *cap,
517     int val, int tgtonly, int doset);
518 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
519 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
520 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
521     int sleep);
522 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
523     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
524 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
525 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
526 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
527     int lcount, int tcount);
528 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
529 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
530 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
531     int tgt_cnt);
532 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
533     dev_info_t *pdip, caddr_t name);
534 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
535     int lcount, int tcount, int flags, int *circ);
536 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
537     int lcount, int tcount, int flags, int *circ);
538 static void fcp_remove_child(struct fcp_lun *plun);
539 static void fcp_watch(void *arg);
540 static void fcp_check_reset_delay(struct fcp_port *pptr);
541 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
542     struct fcp_lun *rlun, int tgt_cnt);
543 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
544 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
545     uchar_t *wwn, uint16_t lun);
546 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
547     struct fcp_lun *plun);
548 static void fcp_post_callback(struct fcp_pkt *cmd);
549 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
550 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
551 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
552     child_info_t *cip);
553 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
554     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
555     int tgt_cnt, int flags);
556 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
557     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
558     int tgt_cnt, int flags, int wait);
559 static void fcp_retransport_cmd(struct fcp_port *pptr,
560     struct fcp_pkt *cmd);
561 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
562     uint_t statistics);
563 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
564 static void fcp_update_targets(struct fcp_port *pptr,
565     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
566 static int fcp_call_finish_init(struct fcp_port *pptr,
567     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
568 static int fcp_call_finish_init_held(struct fcp_port *pptr,
569     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
570 static void fcp_reconfigure_luns(void * tgt_handle);
571 static void fcp_free_targets(struct fcp_port *pptr);
572 static void fcp_free_target(struct fcp_tgt *ptgt);
573 static int fcp_is_retryable(struct fcp_ipkt *icmd);
574 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
575 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
576 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
577 static void fcp_print_error(fc_packet_t *fpkt);
578 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
579     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
580 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
581 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
582     uint32_t *dev_cnt);
583 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
584 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
585 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
586     struct fcp_ioctl *, struct fcp_port **);
587 static char *fcp_get_lun_path(struct fcp_lun *plun);
588 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
589     int *rval);
590 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594     int *rval);
595 static void fcp_reconfig_wait(struct fcp_port *pptr);
596 
597 /*
598  * New functions added for mpxio support
599  */
600 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
601     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
602 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
603     int tcount);
604 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
605     dev_info_t *pdip);
606 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
607 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
608 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
609 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
610 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
611     int what);
612 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
613     fc_packet_t *fpkt);
614 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
615 
616 /*
617  * New functions added for lun masking support
618  */
619 static void fcp_read_blacklist(dev_info_t *dip,
620     struct fcp_black_list_entry **pplun_blacklist);
621 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
622     struct fcp_black_list_entry **pplun_blacklist);
623 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
624     struct fcp_black_list_entry **pplun_blacklist);
625 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
626 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
627 
628 /*
629  * New functions to support software FCA (like fcoei)
630  */
631 static struct scsi_pkt *fcp_pseudo_init_pkt(
632 	struct scsi_address *ap, struct scsi_pkt *pkt,
633 	struct buf *bp, int cmdlen, int statuslen,
634 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
635 static void fcp_pseudo_destroy_pkt(
636 	struct scsi_address *ap, struct scsi_pkt *pkt);
637 static void fcp_pseudo_sync_pkt(
638 	struct scsi_address *ap, struct scsi_pkt *pkt);
639 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
640 static void fcp_pseudo_dmafree(
641 	struct scsi_address *ap, struct scsi_pkt *pkt);
642 
643 extern struct mod_ops	mod_driverops;
644 /*
645  * This variable is defined in modctl.c and set to '1' after the root driver
646  * and fs are loaded.  It serves as an indication that the root filesystem can
647  * be used.
648  */
649 extern int		modrootloaded;
650 /*
651  * This table contains strings associated with the SCSI sense key codes.  It
652  * is used by FCP to print a clear explanation of the code returned in the
653  * sense information by a device.
654  */
655 extern char		*sense_keys[];
656 /*
657  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
658  * under this device that the paths to a physical device are created when
659  * MPxIO is used.
660  */
661 extern dev_info_t	*scsi_vhci_dip;
662 
663 /*
664  * Report lun processing
665  */
666 #define	FCP_LUN_ADDRESSING		0x80
667 #define	FCP_PD_ADDRESSING		0x00
668 #define	FCP_VOLUME_ADDRESSING		0x40
669 
670 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
671 #define	MAX_INT_DMA			0x7fffffff
672 /*
673  * Property definitions
674  */
675 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
676 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
677 #define	TARGET_PROP	(char *)fcp_target_prop
678 #define	LUN_PROP	(char *)fcp_lun_prop
679 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
680 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
681 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
682 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
683 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
684 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
685 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
686 /*
687  * Short hand macros.
688  */
689 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
690 #define	LUN_TGT		(plun->lun_tgt)
691 
692 /*
693  * Driver private macros
694  */
695 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
696 			((x) >= 'a' && (x) <= 'f') ?			\
697 			((x) - 'a' + 10) : ((x) - 'A' + 10))
698 
699 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
700 
701 #define	FCP_N_NDI_EVENTS						\
702 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
703 
704 #define	FCP_LINK_STATE_CHANGED(p, c)			\
705 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
706 
707 #define	FCP_TGT_STATE_CHANGED(t, c)			\
708 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
709 
710 #define	FCP_STATE_CHANGED(p, t, c)		\
711 	(FCP_TGT_STATE_CHANGED(t, c))
712 
713 #define	FCP_MUST_RETRY(fpkt)				\
714 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
715 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
716 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
717 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
718 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
719 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
720 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
721 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
722 
723 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
724 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
725 	(es)->es_add_code == 0x3f &&		\
726 	(es)->es_qual_code == 0x0e)
727 
728 #define	FCP_SENSE_NO_LUN(es)			\
729 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
730 	(es)->es_add_code == 0x25 &&		\
731 	(es)->es_qual_code == 0x0)
732 
733 #define	FCP_VERSION		"20091208-1.192"
734 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
735 
736 #define	FCP_NUM_ELEMENTS(array)			\
737 	(sizeof (array) / sizeof ((array)[0]))
738 
739 /*
740  * Debugging, Error reporting, and tracing
741  */
742 #define	FCP_LOG_SIZE		1024 * 1024
743 
744 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
745 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
746 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
747 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
748 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
749 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
750 #define	FCP_LEVEL_7		0x00040
751 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
752 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
753 
754 
755 
756 /*
757  * Log contents to system messages file
758  */
759 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
760 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
761 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
762 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
763 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
764 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
765 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
766 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
767 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
768 
769 
770 /*
771  * Log contents to trace buffer
772  */
773 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
774 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
775 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
776 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
777 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
778 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
779 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
780 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
781 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
782 
783 
784 /*
785  * Log contents to both system messages file and trace buffer
786  */
787 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
792 				FC_TRACE_LOG_MSG)
793 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
794 				FC_TRACE_LOG_MSG)
795 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
796 				FC_TRACE_LOG_MSG)
797 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
798 				FC_TRACE_LOG_MSG)
799 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
800 				FC_TRACE_LOG_MSG)
801 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
802 				FC_TRACE_LOG_MSG)
803 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
804 				FC_TRACE_LOG_MSG)
805 #ifdef DEBUG
806 #define	FCP_DTRACE	fc_trace_debug
807 #else
808 #define	FCP_DTRACE
809 #endif
810 
811 #define	FCP_TRACE	fc_trace_debug
812 
813 static struct cb_ops fcp_cb_ops = {
814 	fcp_open,			/* open */
815 	fcp_close,			/* close */
816 	nodev,				/* strategy */
817 	nodev,				/* print */
818 	nodev,				/* dump */
819 	nodev,				/* read */
820 	nodev,				/* write */
821 	fcp_ioctl,			/* ioctl */
822 	nodev,				/* devmap */
823 	nodev,				/* mmap */
824 	nodev,				/* segmap */
825 	nochpoll,			/* chpoll */
826 	ddi_prop_op,			/* cb_prop_op */
827 	0,				/* streamtab */
828 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
829 	CB_REV,				/* rev */
830 	nodev,				/* aread */
831 	nodev				/* awrite */
832 };
833 
834 
835 static struct dev_ops fcp_ops = {
836 	DEVO_REV,
837 	0,
838 	ddi_getinfo_1to1,
839 	nulldev,		/* identify */
840 	nulldev,		/* probe */
841 	fcp_attach,		/* attach and detach are mandatory */
842 	fcp_detach,
843 	nodev,			/* reset */
844 	&fcp_cb_ops,		/* cb_ops */
845 	NULL,			/* bus_ops */
846 	NULL,			/* power */
847 };
848 
849 
850 char *fcp_version = FCP_NAME_VERSION;
851 
852 static struct modldrv modldrv = {
853 	&mod_driverops,
854 	FCP_NAME_VERSION,
855 	&fcp_ops
856 };
857 
858 
859 static struct modlinkage modlinkage = {
860 	MODREV_1,
861 	&modldrv,
862 	NULL
863 };
864 
865 
866 static fc_ulp_modinfo_t fcp_modinfo = {
867 	&fcp_modinfo,			/* ulp_handle */
868 	FCTL_ULP_MODREV_4,		/* ulp_rev */
869 	FC4_SCSI_FCP,			/* ulp_type */
870 	"fcp",				/* ulp_name */
871 	FCP_STATEC_MASK,		/* ulp_statec_mask */
872 	fcp_port_attach,		/* ulp_port_attach */
873 	fcp_port_detach,		/* ulp_port_detach */
874 	fcp_port_ioctl,			/* ulp_port_ioctl */
875 	fcp_els_callback,		/* ulp_els_callback */
876 	fcp_data_callback,		/* ulp_data_callback */
877 	fcp_statec_callback		/* ulp_statec_callback */
878 };
879 
880 #ifdef	DEBUG
881 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
882 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
883 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
884 				FCP_LEVEL_6 | FCP_LEVEL_7)
885 #else
886 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
887 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
888 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
889 				FCP_LEVEL_6 | FCP_LEVEL_7)
890 #endif
891 
892 /* FCP global variables */
893 int			fcp_bus_config_debug = 0;
894 static int		fcp_log_size = FCP_LOG_SIZE;
895 static int		fcp_trace = FCP_TRACE_DEFAULT;
896 static fc_trace_logq_t	*fcp_logq = NULL;
897 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
898 /*
899  * The auto-configuration is set by default.  The only way of disabling it is
900  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
901  */
902 static int		fcp_enable_auto_configuration = 1;
903 static int		fcp_max_bus_config_retries	= 4;
904 static int		fcp_lun_ready_retry = 300;
905 /*
906  * The value assigned to the following variable has changed several times due
907  * to a problem with the data underruns reporting of some firmware(s).	The
908  * current value of 50 gives a timeout value of 25 seconds for a max number
909  * of 256 LUNs.
910  */
911 static int		fcp_max_target_retries = 50;
912 /*
913  * Watchdog variables
914  * ------------------
915  *
916  * fcp_watchdog_init
917  *
918  *	Indicates if the watchdog timer is running or not.  This is actually
919  *	a counter of the number of Fibre Channel ports that attached.  When
920  *	the first port attaches the watchdog is started.  When the last port
921  *	detaches the watchdog timer is stopped.
922  *
923  * fcp_watchdog_time
924  *
925  *	This is the watchdog clock counter.  It is incremented by
926  *	fcp_watchdog_time each time the watchdog timer expires.
927  *
928  * fcp_watchdog_timeout
929  *
930  *	Increment value of the variable fcp_watchdog_time as well as the
931  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
932  *	is strange that this is not a #define	but a variable since the code
933  *	never changes this value.  The reason why it can be said that the
934  *	unit is 1 second is because the number of ticks for the watchdog
935  *	timer is determined like this:
936  *
937  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
938  *				  drv_usectohz(1000000);
939  *
940  *	The value 1000000 is hard coded in the code.
941  *
942  * fcp_watchdog_tick
943  *
944  *	Watchdog timer value in ticks.
945  */
946 static int		fcp_watchdog_init = 0;
947 static int		fcp_watchdog_time = 0;
948 static int		fcp_watchdog_timeout = 1;
949 static int		fcp_watchdog_tick;
950 
951 /*
952  * fcp_offline_delay is a global variable to enable customisation of
953  * the timeout on link offlines or RSCNs. The default value is set
954  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
955  * specified in FCP4 Chapter 11 (see www.t10.org).
956  *
957  * The variable fcp_offline_delay is specified in SECONDS.
958  *
959  * If we made this a static var then the user would not be able to
960  * change it. This variable is set in fcp_attach().
961  */
962 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
963 
964 static void		*fcp_softstate = NULL; /* for soft state */
965 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
966 static kmutex_t		fcp_global_mutex;
967 static kmutex_t		fcp_ioctl_mutex;
968 static dev_info_t	*fcp_global_dip = NULL;
969 static timeout_id_t	fcp_watchdog_id;
970 const char		*fcp_lun_prop = "lun";
971 const char		*fcp_sam_lun_prop = "sam-lun";
972 const char		*fcp_target_prop = "target";
973 /*
974  * NOTE: consumers of "node-wwn" property include stmsboot in ON
975  * consolidation.
976  */
977 const char		*fcp_node_wwn_prop = "node-wwn";
978 const char		*fcp_port_wwn_prop = "port-wwn";
979 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
980 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
981 const char		*fcp_manual_config_only = "manual_configuration_only";
982 const char		*fcp_init_port_prop = "initiator-port";
983 const char		*fcp_tgt_port_prop = "target-port";
984 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
985 
986 static struct fcp_port	*fcp_port_head = NULL;
987 static ddi_eventcookie_t	fcp_insert_eid;
988 static ddi_eventcookie_t	fcp_remove_eid;
989 
990 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
991 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
992 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
993 };
994 
995 /*
996  * List of valid commands for the scsi_ioctl call
997  */
998 static uint8_t scsi_ioctl_list[] = {
999 	SCMD_INQUIRY,
1000 	SCMD_REPORT_LUN,
1001 	SCMD_READ_CAPACITY
1002 };
1003 
1004 /*
1005  * this is used to dummy up a report lun response for cases
1006  * where the target doesn't support it
1007  */
1008 static uchar_t fcp_dummy_lun[] = {
1009 	0x00,		/* MSB length (length = no of luns * 8) */
1010 	0x00,
1011 	0x00,
1012 	0x08,		/* LSB length */
1013 	0x00,		/* MSB reserved */
1014 	0x00,
1015 	0x00,
1016 	0x00,		/* LSB reserved */
1017 	FCP_PD_ADDRESSING,
1018 	0x00,		/* LUN is ZERO at the first level */
1019 	0x00,
1020 	0x00,		/* second level is zero */
1021 	0x00,
1022 	0x00,		/* third level is zero */
1023 	0x00,
1024 	0x00		/* fourth level is zero */
1025 };
1026 
1027 static uchar_t fcp_alpa_to_switch[] = {
1028 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1029 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1031 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1032 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1033 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1034 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1035 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1036 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1037 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1038 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1039 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1040 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1041 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1042 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1044 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1045 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1046 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1047 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1048 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1049 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1050 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1051 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1052 };
1053 
1054 static caddr_t pid = "SESS01	      ";
1055 
1056 #if	!defined(lint)
1057 
1058 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1059     fcp_port::fcp_next fcp_watchdog_id))
1060 
1061 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1062 
1063 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1064     fcp_insert_eid
1065     fcp_remove_eid
1066     fcp_watchdog_time))
1067 
1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069     fcp_cb_ops
1070     fcp_ops
1071     callb_cpr))
1072 
1073 #endif /* lint */
1074 
1075 /*
1076  * This table is used to determine whether or not it's safe to copy in
1077  * the target node name for a lun.  Since all luns behind the same target
1078  * have the same wwnn, only tagets that do not support multiple luns are
1079  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1080  */
1081 
1082 char *fcp_symmetric_disk_table[] = {
1083 	"SEAGATE ST",
1084 	"IBM	 DDYFT",
1085 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1086 	"SUN	 SENA",		/* SES device */
1087 	"SUN	 SESS01"	/* VICOM SVE box */
1088 };
1089 
1090 int fcp_symmetric_disk_table_size =
1091 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1092 
1093 /*
1094  * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1095  * will panic if you don't pass this in to the routine, this information.
1096  * Need to determine what the actual impact to the system is by providing
1097  * this information if any. Since dma allocation is done in pkt_init it may
1098  * not have any impact. These values are straight from the Writing Device
1099  * Driver manual.
1100  */
1101 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1102 	DMA_ATTR_V0,	/* ddi_dma_attr version */
1103 	0,		/* low address */
1104 	0xffffffff,	/* high address */
1105 	0x00ffffff,	/* counter upper bound */
1106 	1,		/* alignment requirements */
1107 	0x3f,		/* burst sizes */
1108 	1,		/* minimum DMA access */
1109 	0xffffffff,	/* maximum DMA access */
1110 	(1 << 24) - 1,	/* segment boundary restrictions */
1111 	1,		/* scater/gather list length */
1112 	512,		/* device granularity */
1113 	0		/* DMA flags */
1114 };
1115 
1116 /*
1117  * The _init(9e) return value should be that of mod_install(9f). Under
1118  * some circumstances, a failure may not be related mod_install(9f) and
1119  * one would then require a return value to indicate the failure. Looking
1120  * at mod_install(9f), it is expected to return 0 for success and non-zero
1121  * for failure. mod_install(9f) for device drivers, further goes down the
1122  * calling chain and ends up in ddi_installdrv(), whose return values are
1123  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1124  * calling chain of mod_install(9f) which return values like EINVAL and
1125  * in some even return -1.
1126  *
1127  * To work around the vagaries of the mod_install() calling chain, return
1128  * either 0 or ENODEV depending on the success or failure of mod_install()
1129  */
1130 int
1131 _init(void)
1132 {
1133 	int rval;
1134 
1135 	/*
1136 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1137 	 * before registering with the transport first.
1138 	 */
1139 	if (ddi_soft_state_init(&fcp_softstate,
1140 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1141 		return (EINVAL);
1142 	}
1143 
1144 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1145 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1146 
1147 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1148 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1149 		mutex_destroy(&fcp_global_mutex);
1150 		mutex_destroy(&fcp_ioctl_mutex);
1151 		ddi_soft_state_fini(&fcp_softstate);
1152 		return (ENODEV);
1153 	}
1154 
1155 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1156 
1157 	if ((rval = mod_install(&modlinkage)) != 0) {
1158 		fc_trace_free_logq(fcp_logq);
1159 		(void) fc_ulp_remove(&fcp_modinfo);
1160 		mutex_destroy(&fcp_global_mutex);
1161 		mutex_destroy(&fcp_ioctl_mutex);
1162 		ddi_soft_state_fini(&fcp_softstate);
1163 		rval = ENODEV;
1164 	}
1165 
1166 	return (rval);
1167 }
1168 
1169 
1170 /*
1171  * the system is done with us as a driver, so clean up
1172  */
1173 int
1174 _fini(void)
1175 {
1176 	int rval;
1177 
1178 	/*
1179 	 * don't start cleaning up until we know that the module remove
1180 	 * has worked  -- if this works, then we know that each instance
1181 	 * has successfully been DDI_DETACHed
1182 	 */
1183 	if ((rval = mod_remove(&modlinkage)) != 0) {
1184 		return (rval);
1185 	}
1186 
1187 	(void) fc_ulp_remove(&fcp_modinfo);
1188 
1189 	ddi_soft_state_fini(&fcp_softstate);
1190 	mutex_destroy(&fcp_global_mutex);
1191 	mutex_destroy(&fcp_ioctl_mutex);
1192 	fc_trace_free_logq(fcp_logq);
1193 
1194 	return (rval);
1195 }
1196 
1197 
1198 int
1199 _info(struct modinfo *modinfop)
1200 {
1201 	return (mod_info(&modlinkage, modinfop));
1202 }
1203 
1204 
1205 /*
1206  * attach the module
1207  */
1208 static int
1209 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1210 {
1211 	int rval = DDI_SUCCESS;
1212 
1213 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1214 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1215 
1216 	if (cmd == DDI_ATTACH) {
1217 		/* The FCP pseudo device is created here. */
1218 		mutex_enter(&fcp_global_mutex);
1219 		fcp_global_dip = devi;
1220 		mutex_exit(&fcp_global_mutex);
1221 
1222 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1223 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1224 			ddi_report_dev(fcp_global_dip);
1225 		} else {
1226 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1227 			mutex_enter(&fcp_global_mutex);
1228 			fcp_global_dip = NULL;
1229 			mutex_exit(&fcp_global_mutex);
1230 
1231 			rval = DDI_FAILURE;
1232 		}
1233 		/*
1234 		 * We check the fcp_offline_delay property at this
1235 		 * point. This variable is global for the driver,
1236 		 * not specific to an instance.
1237 		 *
1238 		 * We do not recommend setting the value to less
1239 		 * than 10 seconds (RA_TOV_els), or greater than
1240 		 * 60 seconds.
1241 		 */
1242 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1243 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1244 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1245 		if ((fcp_offline_delay < 10) ||
1246 		    (fcp_offline_delay > 60)) {
1247 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1248 			    "to %d second(s). This is outside the "
1249 			    "recommended range of 10..60 seconds.",
1250 			    fcp_offline_delay);
1251 		}
1252 	}
1253 
1254 	return (rval);
1255 }
1256 
1257 
1258 /*ARGSUSED*/
1259 static int
1260 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1261 {
1262 	int	res = DDI_SUCCESS;
1263 
1264 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1265 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1266 
1267 	if (cmd == DDI_DETACH) {
1268 		/*
1269 		 * Check if there are active ports/threads. If there
1270 		 * are any, we will fail, else we will succeed (there
1271 		 * should not be much to clean up)
1272 		 */
1273 		mutex_enter(&fcp_global_mutex);
1274 		FCP_DTRACE(fcp_logq, "fcp",
1275 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1276 		    (void *) fcp_port_head);
1277 
1278 		if (fcp_port_head == NULL) {
1279 			ddi_remove_minor_node(fcp_global_dip, NULL);
1280 			fcp_global_dip = NULL;
1281 			mutex_exit(&fcp_global_mutex);
1282 		} else {
1283 			mutex_exit(&fcp_global_mutex);
1284 			res = DDI_FAILURE;
1285 		}
1286 	}
1287 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1288 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1289 
1290 	return (res);
1291 }
1292 
1293 
1294 /* ARGSUSED */
1295 static int
1296 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1297 {
1298 	if (otype != OTYP_CHR) {
1299 		return (EINVAL);
1300 	}
1301 
1302 	/*
1303 	 * Allow only root to talk;
1304 	 */
1305 	if (drv_priv(credp)) {
1306 		return (EPERM);
1307 	}
1308 
1309 	mutex_enter(&fcp_global_mutex);
1310 	if (fcp_oflag & FCP_EXCL) {
1311 		mutex_exit(&fcp_global_mutex);
1312 		return (EBUSY);
1313 	}
1314 
1315 	if (flag & FEXCL) {
1316 		if (fcp_oflag & FCP_OPEN) {
1317 			mutex_exit(&fcp_global_mutex);
1318 			return (EBUSY);
1319 		}
1320 		fcp_oflag |= FCP_EXCL;
1321 	}
1322 	fcp_oflag |= FCP_OPEN;
1323 	mutex_exit(&fcp_global_mutex);
1324 
1325 	return (0);
1326 }
1327 
1328 
1329 /* ARGSUSED */
1330 static int
1331 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1332 {
1333 	if (otype != OTYP_CHR) {
1334 		return (EINVAL);
1335 	}
1336 
1337 	mutex_enter(&fcp_global_mutex);
1338 	if (!(fcp_oflag & FCP_OPEN)) {
1339 		mutex_exit(&fcp_global_mutex);
1340 		return (ENODEV);
1341 	}
1342 	fcp_oflag = FCP_IDLE;
1343 	mutex_exit(&fcp_global_mutex);
1344 
1345 	return (0);
1346 }
1347 
1348 
1349 /*
1350  * fcp_ioctl
1351  *	Entry point for the FCP ioctls
1352  *
1353  * Input:
1354  *	See ioctl(9E)
1355  *
1356  * Output:
1357  *	See ioctl(9E)
1358  *
1359  * Returns:
1360  *	See ioctl(9E)
1361  *
1362  * Context:
1363  *	Kernel context.
1364  */
1365 /* ARGSUSED */
1366 static int
1367 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1368     int *rval)
1369 {
1370 	int			ret = 0;
1371 
1372 	mutex_enter(&fcp_global_mutex);
1373 	if (!(fcp_oflag & FCP_OPEN)) {
1374 		mutex_exit(&fcp_global_mutex);
1375 		return (ENXIO);
1376 	}
1377 	mutex_exit(&fcp_global_mutex);
1378 
1379 	switch (cmd) {
1380 	case FCP_TGT_INQUIRY:
1381 	case FCP_TGT_CREATE:
1382 	case FCP_TGT_DELETE:
1383 		ret = fcp_setup_device_data_ioctl(cmd,
1384 		    (struct fcp_ioctl *)data, mode, rval);
1385 		break;
1386 
1387 	case FCP_TGT_SEND_SCSI:
1388 		mutex_enter(&fcp_ioctl_mutex);
1389 		ret = fcp_setup_scsi_ioctl(
1390 		    (struct fcp_scsi_cmd *)data, mode, rval);
1391 		mutex_exit(&fcp_ioctl_mutex);
1392 		break;
1393 
1394 	case FCP_STATE_COUNT:
1395 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1396 		    mode, rval);
1397 		break;
1398 	case FCP_GET_TARGET_MAPPINGS:
1399 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1400 		    mode, rval);
1401 		break;
1402 	default:
1403 		fcp_log(CE_WARN, NULL,
1404 		    "!Invalid ioctl opcode = 0x%x", cmd);
1405 		ret	= EINVAL;
1406 	}
1407 
1408 	return (ret);
1409 }
1410 
1411 
1412 /*
1413  * fcp_setup_device_data_ioctl
1414  *	Setup handler for the "device data" style of
1415  *	ioctl for FCP.	See "fcp_util.h" for data structure
1416  *	definition.
1417  *
1418  * Input:
1419  *	cmd	= FCP ioctl command
1420  *	data	= ioctl data
1421  *	mode	= See ioctl(9E)
1422  *
1423  * Output:
1424  *	data	= ioctl data
1425  *	rval	= return value - see ioctl(9E)
1426  *
1427  * Returns:
1428  *	See ioctl(9E)
1429  *
1430  * Context:
1431  *	Kernel context.
1432  */
1433 /* ARGSUSED */
1434 static int
1435 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1436     int *rval)
1437 {
1438 	struct fcp_port	*pptr;
1439 	struct	device_data	*dev_data;
1440 	uint32_t		link_cnt;
1441 	la_wwn_t		*wwn_ptr = NULL;
1442 	struct fcp_tgt		*ptgt = NULL;
1443 	struct fcp_lun		*plun = NULL;
1444 	int			i, error;
1445 	struct fcp_ioctl	fioctl;
1446 
1447 #ifdef	_MULTI_DATAMODEL
1448 	switch (ddi_model_convert_from(mode & FMODELS)) {
1449 	case DDI_MODEL_ILP32: {
1450 		struct fcp32_ioctl f32_ioctl;
1451 
1452 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1453 		    sizeof (struct fcp32_ioctl), mode)) {
1454 			return (EFAULT);
1455 		}
1456 		fioctl.fp_minor = f32_ioctl.fp_minor;
1457 		fioctl.listlen = f32_ioctl.listlen;
1458 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1459 		break;
1460 	}
1461 	case DDI_MODEL_NONE:
1462 		if (ddi_copyin((void *)data, (void *)&fioctl,
1463 		    sizeof (struct fcp_ioctl), mode)) {
1464 			return (EFAULT);
1465 		}
1466 		break;
1467 	}
1468 
1469 #else	/* _MULTI_DATAMODEL */
1470 	if (ddi_copyin((void *)data, (void *)&fioctl,
1471 	    sizeof (struct fcp_ioctl), mode)) {
1472 		return (EFAULT);
1473 	}
1474 #endif	/* _MULTI_DATAMODEL */
1475 
1476 	/*
1477 	 * Right now we can assume that the minor number matches with
1478 	 * this instance of fp. If this changes we will need to
1479 	 * revisit this logic.
1480 	 */
1481 	mutex_enter(&fcp_global_mutex);
1482 	pptr = fcp_port_head;
1483 	while (pptr) {
1484 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1485 			break;
1486 		} else {
1487 			pptr = pptr->port_next;
1488 		}
1489 	}
1490 	mutex_exit(&fcp_global_mutex);
1491 	if (pptr == NULL) {
1492 		return (ENXIO);
1493 	}
1494 	mutex_enter(&pptr->port_mutex);
1495 
1496 
1497 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1498 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1499 		mutex_exit(&pptr->port_mutex);
1500 		return (ENOMEM);
1501 	}
1502 
1503 	if (ddi_copyin(fioctl.list, dev_data,
1504 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1505 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1506 		mutex_exit(&pptr->port_mutex);
1507 		return (EFAULT);
1508 	}
1509 	link_cnt = pptr->port_link_cnt;
1510 
1511 	if (cmd == FCP_TGT_INQUIRY) {
1512 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1513 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1514 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1515 			/* This ioctl is requesting INQ info of local HBA */
1516 			mutex_exit(&pptr->port_mutex);
1517 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1518 			dev_data[0].dev_status = 0;
1519 			if (ddi_copyout(dev_data, fioctl.list,
1520 			    (sizeof (struct device_data)) * fioctl.listlen,
1521 			    mode)) {
1522 				kmem_free(dev_data,
1523 				    sizeof (*dev_data) * fioctl.listlen);
1524 				return (EFAULT);
1525 			}
1526 			kmem_free(dev_data,
1527 			    sizeof (*dev_data) * fioctl.listlen);
1528 #ifdef	_MULTI_DATAMODEL
1529 			switch (ddi_model_convert_from(mode & FMODELS)) {
1530 			case DDI_MODEL_ILP32: {
1531 				struct fcp32_ioctl f32_ioctl;
1532 				f32_ioctl.fp_minor = fioctl.fp_minor;
1533 				f32_ioctl.listlen = fioctl.listlen;
1534 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1535 				if (ddi_copyout((void *)&f32_ioctl,
1536 				    (void *)data,
1537 				    sizeof (struct fcp32_ioctl), mode)) {
1538 					return (EFAULT);
1539 				}
1540 				break;
1541 			}
1542 			case DDI_MODEL_NONE:
1543 				if (ddi_copyout((void *)&fioctl, (void *)data,
1544 				    sizeof (struct fcp_ioctl), mode)) {
1545 					return (EFAULT);
1546 				}
1547 				break;
1548 			}
1549 #else	/* _MULTI_DATAMODEL */
1550 			if (ddi_copyout((void *)&fioctl, (void *)data,
1551 			    sizeof (struct fcp_ioctl), mode)) {
1552 				return (EFAULT);
1553 			}
1554 #endif	/* _MULTI_DATAMODEL */
1555 			return (0);
1556 		}
1557 	}
1558 
1559 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1560 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1561 		mutex_exit(&pptr->port_mutex);
1562 		return (ENXIO);
1563 	}
1564 
1565 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1566 	    i++) {
1567 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1568 
1569 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1570 
1571 
1572 		dev_data[i].dev_status = ENXIO;
1573 
1574 		if ((ptgt = fcp_lookup_target(pptr,
1575 		    (uchar_t *)wwn_ptr)) == NULL) {
1576 			mutex_exit(&pptr->port_mutex);
1577 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1578 			    wwn_ptr, &error, 0) == NULL) {
1579 				dev_data[i].dev_status = ENODEV;
1580 				mutex_enter(&pptr->port_mutex);
1581 				continue;
1582 			} else {
1583 
1584 				dev_data[i].dev_status = EAGAIN;
1585 
1586 				mutex_enter(&pptr->port_mutex);
1587 				continue;
1588 			}
1589 		} else {
1590 			mutex_enter(&ptgt->tgt_mutex);
1591 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1592 			    FCP_TGT_BUSY)) {
1593 				dev_data[i].dev_status = EAGAIN;
1594 				mutex_exit(&ptgt->tgt_mutex);
1595 				continue;
1596 			}
1597 
1598 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1599 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1600 					dev_data[i].dev_status = ENOTSUP;
1601 				} else {
1602 					dev_data[i].dev_status = ENXIO;
1603 				}
1604 				mutex_exit(&ptgt->tgt_mutex);
1605 				continue;
1606 			}
1607 
1608 			switch (cmd) {
1609 			case FCP_TGT_INQUIRY:
1610 				/*
1611 				 * The reason we give device type of
1612 				 * lun 0 only even though in some
1613 				 * cases(like maxstrat) lun 0 device
1614 				 * type may be 0x3f(invalid) is that
1615 				 * for bridge boxes target will appear
1616 				 * as luns and the first lun could be
1617 				 * a device that utility may not care
1618 				 * about (like a tape device).
1619 				 */
1620 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1621 				dev_data[i].dev_status = 0;
1622 				mutex_exit(&ptgt->tgt_mutex);
1623 
1624 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1625 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1626 				} else {
1627 					dev_data[i].dev0_type = plun->lun_type;
1628 				}
1629 				mutex_enter(&ptgt->tgt_mutex);
1630 				break;
1631 
1632 			case FCP_TGT_CREATE:
1633 				mutex_exit(&ptgt->tgt_mutex);
1634 				mutex_exit(&pptr->port_mutex);
1635 
1636 				/*
1637 				 * serialize state change call backs.
1638 				 * only one call back will be handled
1639 				 * at a time.
1640 				 */
1641 				mutex_enter(&fcp_global_mutex);
1642 				if (fcp_oflag & FCP_BUSY) {
1643 					mutex_exit(&fcp_global_mutex);
1644 					if (dev_data) {
1645 						kmem_free(dev_data,
1646 						    sizeof (*dev_data) *
1647 						    fioctl.listlen);
1648 					}
1649 					return (EBUSY);
1650 				}
1651 				fcp_oflag |= FCP_BUSY;
1652 				mutex_exit(&fcp_global_mutex);
1653 
1654 				dev_data[i].dev_status =
1655 				    fcp_create_on_demand(pptr,
1656 				    wwn_ptr->raw_wwn);
1657 
1658 				if (dev_data[i].dev_status != 0) {
1659 					char	buf[25];
1660 
1661 					for (i = 0; i < FC_WWN_SIZE; i++) {
1662 						(void) sprintf(&buf[i << 1],
1663 						    "%02x",
1664 						    wwn_ptr->raw_wwn[i]);
1665 					}
1666 
1667 					fcp_log(CE_WARN, pptr->port_dip,
1668 					    "!Failed to create nodes for"
1669 					    " pwwn=%s; error=%x", buf,
1670 					    dev_data[i].dev_status);
1671 				}
1672 
1673 				/* allow state change call backs again */
1674 				mutex_enter(&fcp_global_mutex);
1675 				fcp_oflag &= ~FCP_BUSY;
1676 				mutex_exit(&fcp_global_mutex);
1677 
1678 				mutex_enter(&pptr->port_mutex);
1679 				mutex_enter(&ptgt->tgt_mutex);
1680 
1681 				break;
1682 
1683 			case FCP_TGT_DELETE:
1684 				break;
1685 
1686 			default:
1687 				fcp_log(CE_WARN, pptr->port_dip,
1688 				    "!Invalid device data ioctl "
1689 				    "opcode = 0x%x", cmd);
1690 			}
1691 			mutex_exit(&ptgt->tgt_mutex);
1692 		}
1693 	}
1694 	mutex_exit(&pptr->port_mutex);
1695 
1696 	if (ddi_copyout(dev_data, fioctl.list,
1697 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1698 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1699 		return (EFAULT);
1700 	}
1701 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1702 
1703 #ifdef	_MULTI_DATAMODEL
1704 	switch (ddi_model_convert_from(mode & FMODELS)) {
1705 	case DDI_MODEL_ILP32: {
1706 		struct fcp32_ioctl f32_ioctl;
1707 
1708 		f32_ioctl.fp_minor = fioctl.fp_minor;
1709 		f32_ioctl.listlen = fioctl.listlen;
1710 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1711 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1712 		    sizeof (struct fcp32_ioctl), mode)) {
1713 			return (EFAULT);
1714 		}
1715 		break;
1716 	}
1717 	case DDI_MODEL_NONE:
1718 		if (ddi_copyout((void *)&fioctl, (void *)data,
1719 		    sizeof (struct fcp_ioctl), mode)) {
1720 			return (EFAULT);
1721 		}
1722 		break;
1723 	}
1724 #else	/* _MULTI_DATAMODEL */
1725 
1726 	if (ddi_copyout((void *)&fioctl, (void *)data,
1727 	    sizeof (struct fcp_ioctl), mode)) {
1728 		return (EFAULT);
1729 	}
1730 #endif	/* _MULTI_DATAMODEL */
1731 
1732 	return (0);
1733 }
1734 
1735 /*
1736  * Fetch the target mappings (path, etc.) for all LUNs
1737  * on this port.
1738  */
1739 /* ARGSUSED */
1740 static int
1741 fcp_get_target_mappings(struct fcp_ioctl *data,
1742     int mode, int *rval)
1743 {
1744 	struct fcp_port	    *pptr;
1745 	fc_hba_target_mappings_t    *mappings;
1746 	fc_hba_mapping_entry_t	    *map;
1747 	struct fcp_tgt	    *ptgt = NULL;
1748 	struct fcp_lun	    *plun = NULL;
1749 	int			    i, mapIndex, mappingSize;
1750 	int			    listlen;
1751 	struct fcp_ioctl	    fioctl;
1752 	char			    *path;
1753 	fcp_ent_addr_t		    sam_lun_addr;
1754 
1755 #ifdef	_MULTI_DATAMODEL
1756 	switch (ddi_model_convert_from(mode & FMODELS)) {
1757 	case DDI_MODEL_ILP32: {
1758 		struct fcp32_ioctl f32_ioctl;
1759 
1760 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1761 		    sizeof (struct fcp32_ioctl), mode)) {
1762 			return (EFAULT);
1763 		}
1764 		fioctl.fp_minor = f32_ioctl.fp_minor;
1765 		fioctl.listlen = f32_ioctl.listlen;
1766 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1767 		break;
1768 	}
1769 	case DDI_MODEL_NONE:
1770 		if (ddi_copyin((void *)data, (void *)&fioctl,
1771 		    sizeof (struct fcp_ioctl), mode)) {
1772 			return (EFAULT);
1773 		}
1774 		break;
1775 	}
1776 
1777 #else	/* _MULTI_DATAMODEL */
1778 	if (ddi_copyin((void *)data, (void *)&fioctl,
1779 	    sizeof (struct fcp_ioctl), mode)) {
1780 		return (EFAULT);
1781 	}
1782 #endif	/* _MULTI_DATAMODEL */
1783 
1784 	/*
1785 	 * Right now we can assume that the minor number matches with
1786 	 * this instance of fp. If this changes we will need to
1787 	 * revisit this logic.
1788 	 */
1789 	mutex_enter(&fcp_global_mutex);
1790 	pptr = fcp_port_head;
1791 	while (pptr) {
1792 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1793 			break;
1794 		} else {
1795 			pptr = pptr->port_next;
1796 		}
1797 	}
1798 	mutex_exit(&fcp_global_mutex);
1799 	if (pptr == NULL) {
1800 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1801 		    fioctl.fp_minor);
1802 		return (ENXIO);
1803 	}
1804 
1805 
1806 	/* We use listlen to show the total buffer size */
1807 	mappingSize = fioctl.listlen;
1808 
1809 	/* Now calculate how many mapping entries will fit */
1810 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1811 	    - sizeof (fc_hba_target_mappings_t);
1812 	if (listlen <= 0) {
1813 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1814 		return (ENXIO);
1815 	}
1816 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1817 
1818 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1819 		return (ENOMEM);
1820 	}
1821 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1822 
1823 	/* Now get to work */
1824 	mapIndex = 0;
1825 
1826 	mutex_enter(&pptr->port_mutex);
1827 	/* Loop through all targets on this port */
1828 	for (i = 0; i < FCP_NUM_HASH; i++) {
1829 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1830 		    ptgt = ptgt->tgt_next) {
1831 
1832 			mutex_enter(&ptgt->tgt_mutex);
1833 
1834 			/* Loop through all LUNs on this target */
1835 			for (plun = ptgt->tgt_lun; plun != NULL;
1836 			    plun = plun->lun_next) {
1837 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1838 					continue;
1839 				}
1840 
1841 				path = fcp_get_lun_path(plun);
1842 				if (path == NULL) {
1843 					continue;
1844 				}
1845 
1846 				if (mapIndex >= listlen) {
1847 					mapIndex ++;
1848 					kmem_free(path, MAXPATHLEN);
1849 					continue;
1850 				}
1851 				map = &mappings->entries[mapIndex++];
1852 				bcopy(path, map->targetDriver,
1853 				    sizeof (map->targetDriver));
1854 				map->d_id = ptgt->tgt_d_id;
1855 				map->busNumber = 0;
1856 				map->targetNumber = ptgt->tgt_d_id;
1857 				map->osLUN = plun->lun_num;
1858 
1859 				/*
1860 				 * We had swapped lun when we stored it in
1861 				 * lun_addr. We need to swap it back before
1862 				 * returning it to user land
1863 				 */
1864 
1865 				sam_lun_addr.ent_addr_0 =
1866 				    BE_16(plun->lun_addr.ent_addr_0);
1867 				sam_lun_addr.ent_addr_1 =
1868 				    BE_16(plun->lun_addr.ent_addr_1);
1869 				sam_lun_addr.ent_addr_2 =
1870 				    BE_16(plun->lun_addr.ent_addr_2);
1871 				sam_lun_addr.ent_addr_3 =
1872 				    BE_16(plun->lun_addr.ent_addr_3);
1873 
1874 				bcopy(&sam_lun_addr, &map->samLUN,
1875 				    FCP_LUN_SIZE);
1876 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1877 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1878 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1879 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1880 
1881 				if (plun->lun_guid) {
1882 
1883 					/* convert ascii wwn to bytes */
1884 					fcp_ascii_to_wwn(plun->lun_guid,
1885 					    map->guid, sizeof (map->guid));
1886 
1887 					if ((sizeof (map->guid)) <
1888 					    plun->lun_guid_size / 2) {
1889 						cmn_err(CE_WARN,
1890 						    "fcp_get_target_mappings:"
1891 						    "guid copy space "
1892 						    "insufficient."
1893 						    "Copy Truncation - "
1894 						    "available %d; need %d",
1895 						    (int)sizeof (map->guid),
1896 						    (int)
1897 						    plun->lun_guid_size / 2);
1898 					}
1899 				}
1900 				kmem_free(path, MAXPATHLEN);
1901 			}
1902 			mutex_exit(&ptgt->tgt_mutex);
1903 		}
1904 	}
1905 	mutex_exit(&pptr->port_mutex);
1906 	mappings->numLuns = mapIndex;
1907 
1908 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1909 		kmem_free(mappings, mappingSize);
1910 		return (EFAULT);
1911 	}
1912 	kmem_free(mappings, mappingSize);
1913 
1914 #ifdef	_MULTI_DATAMODEL
1915 	switch (ddi_model_convert_from(mode & FMODELS)) {
1916 	case DDI_MODEL_ILP32: {
1917 		struct fcp32_ioctl f32_ioctl;
1918 
1919 		f32_ioctl.fp_minor = fioctl.fp_minor;
1920 		f32_ioctl.listlen = fioctl.listlen;
1921 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1922 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1923 		    sizeof (struct fcp32_ioctl), mode)) {
1924 			return (EFAULT);
1925 		}
1926 		break;
1927 	}
1928 	case DDI_MODEL_NONE:
1929 		if (ddi_copyout((void *)&fioctl, (void *)data,
1930 		    sizeof (struct fcp_ioctl), mode)) {
1931 			return (EFAULT);
1932 		}
1933 		break;
1934 	}
1935 #else	/* _MULTI_DATAMODEL */
1936 
1937 	if (ddi_copyout((void *)&fioctl, (void *)data,
1938 	    sizeof (struct fcp_ioctl), mode)) {
1939 		return (EFAULT);
1940 	}
1941 #endif	/* _MULTI_DATAMODEL */
1942 
1943 	return (0);
1944 }
1945 
1946 /*
1947  * fcp_setup_scsi_ioctl
1948  *	Setup handler for the "scsi passthru" style of
1949  *	ioctl for FCP.	See "fcp_util.h" for data structure
1950  *	definition.
1951  *
1952  * Input:
1953  *	u_fscsi	= ioctl data (user address space)
1954  *	mode	= See ioctl(9E)
1955  *
1956  * Output:
1957  *	u_fscsi	= ioctl data (user address space)
1958  *	rval	= return value - see ioctl(9E)
1959  *
1960  * Returns:
1961  *	0	= OK
1962  *	EAGAIN	= See errno.h
1963  *	EBUSY	= See errno.h
1964  *	EFAULT	= See errno.h
1965  *	EINTR	= See errno.h
1966  *	EINVAL	= See errno.h
1967  *	EIO	= See errno.h
1968  *	ENOMEM	= See errno.h
1969  *	ENXIO	= See errno.h
1970  *
1971  * Context:
1972  *	Kernel context.
1973  */
1974 /* ARGSUSED */
1975 static int
1976 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1977     int mode, int *rval)
1978 {
1979 	int			ret		= 0;
1980 	int			temp_ret;
1981 	caddr_t			k_cdbbufaddr	= NULL;
1982 	caddr_t			k_bufaddr	= NULL;
1983 	caddr_t			k_rqbufaddr	= NULL;
1984 	caddr_t			u_cdbbufaddr;
1985 	caddr_t			u_bufaddr;
1986 	caddr_t			u_rqbufaddr;
1987 	struct fcp_scsi_cmd	k_fscsi;
1988 
1989 	/*
1990 	 * Get fcp_scsi_cmd array element from user address space
1991 	 */
1992 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1993 	    != 0) {
1994 		return (ret);
1995 	}
1996 
1997 
1998 	/*
1999 	 * Even though kmem_alloc() checks the validity of the
2000 	 * buffer length, this check is needed when the
2001 	 * kmem_flags set and the zero buffer length is passed.
2002 	 */
2003 	if ((k_fscsi.scsi_cdblen <= 0) ||
2004 	    (k_fscsi.scsi_buflen <= 0) ||
2005 	    (k_fscsi.scsi_rqlen <= 0)) {
2006 		return (EINVAL);
2007 	}
2008 
2009 	/*
2010 	 * Allocate data for fcp_scsi_cmd pointer fields
2011 	 */
2012 	if (ret == 0) {
2013 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2014 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2015 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
2016 
2017 		if (k_cdbbufaddr == NULL ||
2018 		    k_bufaddr	 == NULL ||
2019 		    k_rqbufaddr	 == NULL) {
2020 			ret = ENOMEM;
2021 		}
2022 	}
2023 
2024 	/*
2025 	 * Get fcp_scsi_cmd pointer fields from user
2026 	 * address space
2027 	 */
2028 	if (ret == 0) {
2029 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2030 		u_bufaddr    = k_fscsi.scsi_bufaddr;
2031 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
2032 
2033 		if (ddi_copyin(u_cdbbufaddr,
2034 		    k_cdbbufaddr,
2035 		    k_fscsi.scsi_cdblen,
2036 		    mode)) {
2037 			ret = EFAULT;
2038 		} else if (ddi_copyin(u_bufaddr,
2039 		    k_bufaddr,
2040 		    k_fscsi.scsi_buflen,
2041 		    mode)) {
2042 			ret = EFAULT;
2043 		} else if (ddi_copyin(u_rqbufaddr,
2044 		    k_rqbufaddr,
2045 		    k_fscsi.scsi_rqlen,
2046 		    mode)) {
2047 			ret = EFAULT;
2048 		}
2049 	}
2050 
2051 	/*
2052 	 * Send scsi command (blocking)
2053 	 */
2054 	if (ret == 0) {
2055 		/*
2056 		 * Prior to sending the scsi command, the
2057 		 * fcp_scsi_cmd data structure must contain kernel,
2058 		 * not user, addresses.
2059 		 */
2060 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2061 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2062 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2063 
2064 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2065 
2066 		/*
2067 		 * After sending the scsi command, the
2068 		 * fcp_scsi_cmd data structure must contain user,
2069 		 * not kernel, addresses.
2070 		 */
2071 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2072 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2073 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2074 	}
2075 
2076 	/*
2077 	 * Put fcp_scsi_cmd pointer fields to user address space
2078 	 */
2079 	if (ret == 0) {
2080 		if (ddi_copyout(k_cdbbufaddr,
2081 		    u_cdbbufaddr,
2082 		    k_fscsi.scsi_cdblen,
2083 		    mode)) {
2084 			ret = EFAULT;
2085 		} else if (ddi_copyout(k_bufaddr,
2086 		    u_bufaddr,
2087 		    k_fscsi.scsi_buflen,
2088 		    mode)) {
2089 			ret = EFAULT;
2090 		} else if (ddi_copyout(k_rqbufaddr,
2091 		    u_rqbufaddr,
2092 		    k_fscsi.scsi_rqlen,
2093 		    mode)) {
2094 			ret = EFAULT;
2095 		}
2096 	}
2097 
2098 	/*
2099 	 * Free data for fcp_scsi_cmd pointer fields
2100 	 */
2101 	if (k_cdbbufaddr != NULL) {
2102 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2103 	}
2104 	if (k_bufaddr != NULL) {
2105 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2106 	}
2107 	if (k_rqbufaddr != NULL) {
2108 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2109 	}
2110 
2111 	/*
2112 	 * Put fcp_scsi_cmd array element to user address space
2113 	 */
2114 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2115 	if (temp_ret != 0) {
2116 		ret = temp_ret;
2117 	}
2118 
2119 	/*
2120 	 * Return status
2121 	 */
2122 	return (ret);
2123 }
2124 
2125 
2126 /*
2127  * fcp_copyin_scsi_cmd
2128  *	Copy in fcp_scsi_cmd data structure from user address space.
2129  *	The data may be in 32 bit or 64 bit modes.
2130  *
2131  * Input:
2132  *	base_addr	= from address (user address space)
2133  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2134  *
2135  * Output:
2136  *	fscsi		= to address (kernel address space)
2137  *
2138  * Returns:
2139  *	0	= OK
2140  *	EFAULT	= Error
2141  *
2142  * Context:
2143  *	Kernel context.
2144  */
2145 static int
2146 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2147 {
2148 #ifdef	_MULTI_DATAMODEL
2149 	struct fcp32_scsi_cmd	f32scsi;
2150 
2151 	switch (ddi_model_convert_from(mode & FMODELS)) {
2152 	case DDI_MODEL_ILP32:
2153 		/*
2154 		 * Copy data from user address space
2155 		 */
2156 		if (ddi_copyin((void *)base_addr,
2157 		    &f32scsi,
2158 		    sizeof (struct fcp32_scsi_cmd),
2159 		    mode)) {
2160 			return (EFAULT);
2161 		}
2162 		/*
2163 		 * Convert from 32 bit to 64 bit
2164 		 */
2165 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2166 		break;
2167 	case DDI_MODEL_NONE:
2168 		/*
2169 		 * Copy data from user address space
2170 		 */
2171 		if (ddi_copyin((void *)base_addr,
2172 		    fscsi,
2173 		    sizeof (struct fcp_scsi_cmd),
2174 		    mode)) {
2175 			return (EFAULT);
2176 		}
2177 		break;
2178 	}
2179 #else	/* _MULTI_DATAMODEL */
2180 	/*
2181 	 * Copy data from user address space
2182 	 */
2183 	if (ddi_copyin((void *)base_addr,
2184 	    fscsi,
2185 	    sizeof (struct fcp_scsi_cmd),
2186 	    mode)) {
2187 		return (EFAULT);
2188 	}
2189 #endif	/* _MULTI_DATAMODEL */
2190 
2191 	return (0);
2192 }
2193 
2194 
2195 /*
2196  * fcp_copyout_scsi_cmd
2197  *	Copy out fcp_scsi_cmd data structure to user address space.
2198  *	The data may be in 32 bit or 64 bit modes.
2199  *
2200  * Input:
2201  *	fscsi		= to address (kernel address space)
2202  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2203  *
2204  * Output:
2205  *	base_addr	= from address (user address space)
2206  *
2207  * Returns:
2208  *	0	= OK
2209  *	EFAULT	= Error
2210  *
2211  * Context:
2212  *	Kernel context.
2213  */
2214 static int
2215 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2216 {
2217 #ifdef	_MULTI_DATAMODEL
2218 	struct fcp32_scsi_cmd	f32scsi;
2219 
2220 	switch (ddi_model_convert_from(mode & FMODELS)) {
2221 	case DDI_MODEL_ILP32:
2222 		/*
2223 		 * Convert from 64 bit to 32 bit
2224 		 */
2225 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2226 		/*
2227 		 * Copy data to user address space
2228 		 */
2229 		if (ddi_copyout(&f32scsi,
2230 		    (void *)base_addr,
2231 		    sizeof (struct fcp32_scsi_cmd),
2232 		    mode)) {
2233 			return (EFAULT);
2234 		}
2235 		break;
2236 	case DDI_MODEL_NONE:
2237 		/*
2238 		 * Copy data to user address space
2239 		 */
2240 		if (ddi_copyout(fscsi,
2241 		    (void *)base_addr,
2242 		    sizeof (struct fcp_scsi_cmd),
2243 		    mode)) {
2244 			return (EFAULT);
2245 		}
2246 		break;
2247 	}
2248 #else	/* _MULTI_DATAMODEL */
2249 	/*
2250 	 * Copy data to user address space
2251 	 */
2252 	if (ddi_copyout(fscsi,
2253 	    (void *)base_addr,
2254 	    sizeof (struct fcp_scsi_cmd),
2255 	    mode)) {
2256 		return (EFAULT);
2257 	}
2258 #endif	/* _MULTI_DATAMODEL */
2259 
2260 	return (0);
2261 }
2262 
2263 
2264 /*
2265  * fcp_send_scsi_ioctl
2266  *	Sends the SCSI command in blocking mode.
2267  *
2268  * Input:
2269  *	fscsi		= SCSI command data structure
2270  *
2271  * Output:
2272  *	fscsi		= SCSI command data structure
2273  *
2274  * Returns:
2275  *	0	= OK
2276  *	EAGAIN	= See errno.h
2277  *	EBUSY	= See errno.h
2278  *	EINTR	= See errno.h
2279  *	EINVAL	= See errno.h
2280  *	EIO	= See errno.h
2281  *	ENOMEM	= See errno.h
2282  *	ENXIO	= See errno.h
2283  *
2284  * Context:
2285  *	Kernel context.
2286  */
2287 static int
2288 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2289 {
2290 	struct fcp_lun	*plun		= NULL;
2291 	struct fcp_port	*pptr		= NULL;
2292 	struct fcp_tgt	*ptgt		= NULL;
2293 	fc_packet_t		*fpkt		= NULL;
2294 	struct fcp_ipkt	*icmd		= NULL;
2295 	int			target_created	= FALSE;
2296 	fc_frame_hdr_t		*hp;
2297 	struct fcp_cmd		fcp_cmd;
2298 	struct fcp_cmd		*fcmd;
2299 	union scsi_cdb		*scsi_cdb;
2300 	la_wwn_t		*wwn_ptr;
2301 	int			nodma;
2302 	struct fcp_rsp		*rsp;
2303 	struct fcp_rsp_info	*rsp_info;
2304 	caddr_t			rsp_sense;
2305 	int			buf_len;
2306 	int			info_len;
2307 	int			sense_len;
2308 	struct scsi_extended_sense	*sense_to = NULL;
2309 	timeout_id_t		tid;
2310 	uint8_t			reconfig_lun = FALSE;
2311 	uint8_t			reconfig_pending = FALSE;
2312 	uint8_t			scsi_cmd;
2313 	int			rsp_len;
2314 	int			cmd_index;
2315 	int			fc_status;
2316 	int			pkt_state;
2317 	int			pkt_action;
2318 	int			pkt_reason;
2319 	int			ret, xport_retval = ~FC_SUCCESS;
2320 	int			lcount;
2321 	int			tcount;
2322 	int			reconfig_status;
2323 	int			port_busy = FALSE;
2324 	uchar_t			*lun_string;
2325 
2326 	/*
2327 	 * Check valid SCSI command
2328 	 */
2329 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2330 	ret = EINVAL;
2331 	for (cmd_index = 0;
2332 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2333 	    ret != 0;
2334 	    cmd_index++) {
2335 		/*
2336 		 * First byte of CDB is the SCSI command
2337 		 */
2338 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2339 			ret = 0;
2340 		}
2341 	}
2342 
2343 	/*
2344 	 * Check inputs
2345 	 */
2346 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2347 		ret = EINVAL;
2348 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2349 		/* no larger than */
2350 		ret = EINVAL;
2351 	}
2352 
2353 
2354 	/*
2355 	 * Find FC port
2356 	 */
2357 	if (ret == 0) {
2358 		/*
2359 		 * Acquire global mutex
2360 		 */
2361 		mutex_enter(&fcp_global_mutex);
2362 
2363 		pptr = fcp_port_head;
2364 		while (pptr) {
2365 			if (pptr->port_instance ==
2366 			    (uint32_t)fscsi->scsi_fc_port_num) {
2367 				break;
2368 			} else {
2369 				pptr = pptr->port_next;
2370 			}
2371 		}
2372 
2373 		if (pptr == NULL) {
2374 			ret = ENXIO;
2375 		} else {
2376 			/*
2377 			 * fc_ulp_busy_port can raise power
2378 			 *  so, we must not hold any mutexes involved in PM
2379 			 */
2380 			mutex_exit(&fcp_global_mutex);
2381 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2382 		}
2383 
2384 		if (ret == 0) {
2385 
2386 			/* remember port is busy, so we will release later */
2387 			port_busy = TRUE;
2388 
2389 			/*
2390 			 * If there is a reconfiguration in progress, wait
2391 			 * for it to complete.
2392 			 */
2393 
2394 			fcp_reconfig_wait(pptr);
2395 
2396 			/* reacquire mutexes in order */
2397 			mutex_enter(&fcp_global_mutex);
2398 			mutex_enter(&pptr->port_mutex);
2399 
2400 			/*
2401 			 * Will port accept DMA?
2402 			 */
2403 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2404 			    ? 1 : 0;
2405 
2406 			/*
2407 			 * If init or offline, device not known
2408 			 *
2409 			 * If we are discovering (onlining), we can
2410 			 * NOT obviously provide reliable data about
2411 			 * devices until it is complete
2412 			 */
2413 			if (pptr->port_state &	  (FCP_STATE_INIT |
2414 			    FCP_STATE_OFFLINE)) {
2415 				ret = ENXIO;
2416 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2417 				ret = EBUSY;
2418 			} else {
2419 				/*
2420 				 * Find target from pwwn
2421 				 *
2422 				 * The wwn must be put into a local
2423 				 * variable to ensure alignment.
2424 				 */
2425 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2426 				ptgt = fcp_lookup_target(pptr,
2427 				    (uchar_t *)wwn_ptr);
2428 
2429 				/*
2430 				 * If target not found,
2431 				 */
2432 				if (ptgt == NULL) {
2433 					/*
2434 					 * Note: Still have global &
2435 					 * port mutexes
2436 					 */
2437 					mutex_exit(&pptr->port_mutex);
2438 					ptgt = fcp_port_create_tgt(pptr,
2439 					    wwn_ptr, &ret, &fc_status,
2440 					    &pkt_state, &pkt_action,
2441 					    &pkt_reason);
2442 					mutex_enter(&pptr->port_mutex);
2443 
2444 					fscsi->scsi_fc_status  = fc_status;
2445 					fscsi->scsi_pkt_state  =
2446 					    (uchar_t)pkt_state;
2447 					fscsi->scsi_pkt_reason = pkt_reason;
2448 					fscsi->scsi_pkt_action =
2449 					    (uchar_t)pkt_action;
2450 
2451 					if (ptgt != NULL) {
2452 						target_created = TRUE;
2453 					} else if (ret == 0) {
2454 						ret = ENOMEM;
2455 					}
2456 				}
2457 
2458 				if (ret == 0) {
2459 					/*
2460 					 * Acquire target
2461 					 */
2462 					mutex_enter(&ptgt->tgt_mutex);
2463 
2464 					/*
2465 					 * If target is mark or busy,
2466 					 * then target can not be used
2467 					 */
2468 					if (ptgt->tgt_state &
2469 					    (FCP_TGT_MARK |
2470 					    FCP_TGT_BUSY)) {
2471 						ret = EBUSY;
2472 					} else {
2473 						/*
2474 						 * Mark target as busy
2475 						 */
2476 						ptgt->tgt_state |=
2477 						    FCP_TGT_BUSY;
2478 					}
2479 
2480 					/*
2481 					 * Release target
2482 					 */
2483 					lcount = pptr->port_link_cnt;
2484 					tcount = ptgt->tgt_change_cnt;
2485 					mutex_exit(&ptgt->tgt_mutex);
2486 				}
2487 			}
2488 
2489 			/*
2490 			 * Release port
2491 			 */
2492 			mutex_exit(&pptr->port_mutex);
2493 		}
2494 
2495 		/*
2496 		 * Release global mutex
2497 		 */
2498 		mutex_exit(&fcp_global_mutex);
2499 	}
2500 
2501 	if (ret == 0) {
2502 		uint64_t belun = BE_64(fscsi->scsi_lun);
2503 
2504 		/*
2505 		 * If it's a target device, find lun from pwwn
2506 		 * The wwn must be put into a local
2507 		 * variable to ensure alignment.
2508 		 */
2509 		mutex_enter(&pptr->port_mutex);
2510 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2511 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2512 			/* this is not a target */
2513 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2514 			ret = ENXIO;
2515 		} else if ((belun << 16) != 0) {
2516 			/*
2517 			 * Since fcp only support PD and LU addressing method
2518 			 * so far, the last 6 bytes of a valid LUN are expected
2519 			 * to be filled with 00h.
2520 			 */
2521 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2522 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2523 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2524 			    (uint8_t)(belun >> 62), belun);
2525 			ret = ENXIO;
2526 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2527 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2528 			/*
2529 			 * This is a SCSI target, but no LUN at this
2530 			 * address.
2531 			 *
2532 			 * In the future, we may want to send this to
2533 			 * the target, and let it respond
2534 			 * appropriately
2535 			 */
2536 			ret = ENXIO;
2537 		}
2538 		mutex_exit(&pptr->port_mutex);
2539 	}
2540 
2541 	/*
2542 	 * Finished grabbing external resources
2543 	 * Allocate internal packet (icmd)
2544 	 */
2545 	if (ret == 0) {
2546 		/*
2547 		 * Calc rsp len assuming rsp info included
2548 		 */
2549 		rsp_len = sizeof (struct fcp_rsp) +
2550 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2551 
2552 		icmd = fcp_icmd_alloc(pptr, ptgt,
2553 		    sizeof (struct fcp_cmd),
2554 		    rsp_len,
2555 		    fscsi->scsi_buflen,
2556 		    nodma,
2557 		    lcount,			/* ipkt_link_cnt */
2558 		    tcount,			/* ipkt_change_cnt */
2559 		    0,				/* cause */
2560 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2561 
2562 		if (icmd == NULL) {
2563 			ret = ENOMEM;
2564 		} else {
2565 			/*
2566 			 * Setup internal packet as sema sync
2567 			 */
2568 			fcp_ipkt_sema_init(icmd);
2569 		}
2570 	}
2571 
2572 	if (ret == 0) {
2573 		/*
2574 		 * Init fpkt pointer for use.
2575 		 */
2576 
2577 		fpkt = icmd->ipkt_fpkt;
2578 
2579 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2580 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2581 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2582 
2583 		/*
2584 		 * Init fcmd pointer for use by SCSI command
2585 		 */
2586 
2587 		if (nodma) {
2588 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2589 		} else {
2590 			fcmd = &fcp_cmd;
2591 		}
2592 		bzero(fcmd, sizeof (struct fcp_cmd));
2593 		ptgt = plun->lun_tgt;
2594 
2595 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2596 
2597 		fcmd->fcp_ent_addr.ent_addr_0 =
2598 		    BE_16(*(uint16_t *)&(lun_string[0]));
2599 		fcmd->fcp_ent_addr.ent_addr_1 =
2600 		    BE_16(*(uint16_t *)&(lun_string[2]));
2601 		fcmd->fcp_ent_addr.ent_addr_2 =
2602 		    BE_16(*(uint16_t *)&(lun_string[4]));
2603 		fcmd->fcp_ent_addr.ent_addr_3 =
2604 		    BE_16(*(uint16_t *)&(lun_string[6]));
2605 
2606 		/*
2607 		 * Setup internal packet(icmd)
2608 		 */
2609 		icmd->ipkt_lun		= plun;
2610 		icmd->ipkt_restart	= 0;
2611 		icmd->ipkt_retries	= 0;
2612 		icmd->ipkt_opcode	= 0;
2613 
2614 		/*
2615 		 * Init the frame HEADER Pointer for use
2616 		 */
2617 		hp = &fpkt->pkt_cmd_fhdr;
2618 
2619 		hp->s_id	= pptr->port_id;
2620 		hp->d_id	= ptgt->tgt_d_id;
2621 		hp->r_ctl	= R_CTL_COMMAND;
2622 		hp->type	= FC_TYPE_SCSI_FCP;
2623 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2624 		hp->rsvd	= 0;
2625 		hp->seq_id	= 0;
2626 		hp->seq_cnt	= 0;
2627 		hp->ox_id	= 0xffff;
2628 		hp->rx_id	= 0xffff;
2629 		hp->ro		= 0;
2630 
2631 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2632 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2633 		fcmd->fcp_cntl.cntl_write_data	= 0;
2634 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2635 
2636 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2637 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2638 		    fscsi->scsi_cdblen);
2639 
2640 		if (!nodma) {
2641 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2642 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2643 		}
2644 
2645 		/*
2646 		 * Send SCSI command to FC transport
2647 		 */
2648 
2649 		if (ret == 0) {
2650 			mutex_enter(&ptgt->tgt_mutex);
2651 
2652 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2653 				mutex_exit(&ptgt->tgt_mutex);
2654 				fscsi->scsi_fc_status = xport_retval =
2655 				    fc_ulp_transport(pptr->port_fp_handle,
2656 				    fpkt);
2657 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2658 					ret = EIO;
2659 				}
2660 			} else {
2661 				mutex_exit(&ptgt->tgt_mutex);
2662 				ret = EBUSY;
2663 			}
2664 		}
2665 	}
2666 
2667 	/*
2668 	 * Wait for completion only if fc_ulp_transport was called and it
2669 	 * returned a success. This is the only time callback will happen.
2670 	 * Otherwise, there is no point in waiting
2671 	 */
2672 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2673 		ret = fcp_ipkt_sema_wait(icmd);
2674 	}
2675 
2676 	/*
2677 	 * Copy data to IOCTL data structures
2678 	 */
2679 	rsp = NULL;
2680 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2681 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2682 
2683 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2684 			fcp_log(CE_WARN, pptr->port_dip,
2685 			    "!SCSI command to d_id=0x%x lun=0x%x"
2686 			    " failed, Bad FCP response values:"
2687 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2688 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2689 			    ptgt->tgt_d_id, plun->lun_num,
2690 			    rsp->reserved_0, rsp->reserved_1,
2691 			    rsp->fcp_u.fcp_status.reserved_0,
2692 			    rsp->fcp_u.fcp_status.reserved_1,
2693 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2694 
2695 			ret = EIO;
2696 		}
2697 	}
2698 
2699 	if ((ret == 0) && (rsp != NULL)) {
2700 		/*
2701 		 * Calc response lengths
2702 		 */
2703 		sense_len = 0;
2704 		info_len = 0;
2705 
2706 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2707 			info_len = rsp->fcp_response_len;
2708 		}
2709 
2710 		rsp_info   = (struct fcp_rsp_info *)
2711 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2712 
2713 		/*
2714 		 * Get SCSI status
2715 		 */
2716 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2717 		/*
2718 		 * If a lun was just added or removed and the next command
2719 		 * comes through this interface, we need to capture the check
2720 		 * condition so we can discover the new topology.
2721 		 */
2722 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2723 		    rsp->fcp_u.fcp_status.sense_len_set) {
2724 			sense_len = rsp->fcp_sense_len;
2725 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2726 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2727 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2728 			    (FCP_SENSE_NO_LUN(sense_to))) {
2729 				reconfig_lun = TRUE;
2730 			}
2731 		}
2732 
2733 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2734 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2735 			if (reconfig_lun == FALSE) {
2736 				reconfig_status =
2737 				    fcp_is_reconfig_needed(ptgt, fpkt);
2738 			}
2739 
2740 			if ((reconfig_lun == TRUE) ||
2741 			    (reconfig_status == TRUE)) {
2742 				mutex_enter(&ptgt->tgt_mutex);
2743 				if (ptgt->tgt_tid == NULL) {
2744 					/*
2745 					 * Either we've been notified the
2746 					 * REPORT_LUN data has changed, or
2747 					 * we've determined on our own that
2748 					 * we're out of date.  Kick off
2749 					 * rediscovery.
2750 					 */
2751 					tid = timeout(fcp_reconfigure_luns,
2752 					    (caddr_t)ptgt, drv_usectohz(1));
2753 
2754 					ptgt->tgt_tid = tid;
2755 					ptgt->tgt_state |= FCP_TGT_BUSY;
2756 					ret = EBUSY;
2757 					reconfig_pending = TRUE;
2758 				}
2759 				mutex_exit(&ptgt->tgt_mutex);
2760 			}
2761 		}
2762 
2763 		/*
2764 		 * Calc residuals and buffer lengths
2765 		 */
2766 
2767 		if (ret == 0) {
2768 			buf_len = fscsi->scsi_buflen;
2769 			fscsi->scsi_bufresid	= 0;
2770 			if (rsp->fcp_u.fcp_status.resid_under) {
2771 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2772 					fscsi->scsi_bufresid = rsp->fcp_resid;
2773 				} else {
2774 					cmn_err(CE_WARN, "fcp: bad residue %x "
2775 					    "for txfer len %x", rsp->fcp_resid,
2776 					    fscsi->scsi_buflen);
2777 					fscsi->scsi_bufresid =
2778 					    fscsi->scsi_buflen;
2779 				}
2780 				buf_len -= fscsi->scsi_bufresid;
2781 			}
2782 			if (rsp->fcp_u.fcp_status.resid_over) {
2783 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2784 			}
2785 
2786 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2787 			if (fscsi->scsi_rqlen < sense_len) {
2788 				sense_len = fscsi->scsi_rqlen;
2789 			}
2790 
2791 			fscsi->scsi_fc_rspcode	= 0;
2792 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2793 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2794 			}
2795 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2796 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2797 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2798 
2799 			/*
2800 			 * Copy data and request sense
2801 			 *
2802 			 * Data must be copied by using the FCP_CP_IN macro.
2803 			 * This will ensure the proper byte order since the data
2804 			 * is being copied directly from the memory mapped
2805 			 * device register.
2806 			 *
2807 			 * The response (and request sense) will be in the
2808 			 * correct byte order.	No special copy is necessary.
2809 			 */
2810 
2811 			if (buf_len) {
2812 				FCP_CP_IN(fpkt->pkt_data,
2813 				    fscsi->scsi_bufaddr,
2814 				    fpkt->pkt_data_acc,
2815 				    buf_len);
2816 			}
2817 			bcopy((void *)rsp_sense,
2818 			    (void *)fscsi->scsi_rqbufaddr,
2819 			    sense_len);
2820 		}
2821 	}
2822 
2823 	/*
2824 	 * Cleanup transport data structures if icmd was alloc-ed
2825 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2826 	 */
2827 	if (icmd != NULL) {
2828 		fcp_ipkt_sema_cleanup(icmd);
2829 	}
2830 
2831 	/* restore pm busy/idle status */
2832 	if (port_busy) {
2833 		fc_ulp_idle_port(pptr->port_fp_handle);
2834 	}
2835 
2836 	/*
2837 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2838 	 * flag, it'll be cleared when the reconfig is complete.
2839 	 */
2840 	if ((ptgt != NULL) && !reconfig_pending) {
2841 		/*
2842 		 * If target was created,
2843 		 */
2844 		if (target_created) {
2845 			mutex_enter(&ptgt->tgt_mutex);
2846 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2847 			mutex_exit(&ptgt->tgt_mutex);
2848 		} else {
2849 			/*
2850 			 * De-mark target as busy
2851 			 */
2852 			mutex_enter(&ptgt->tgt_mutex);
2853 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2854 			mutex_exit(&ptgt->tgt_mutex);
2855 		}
2856 	}
2857 	return (ret);
2858 }
2859 
2860 
2861 static int
2862 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2863     fc_packet_t	*fpkt)
2864 {
2865 	uchar_t			*lun_string;
2866 	uint16_t		lun_num, i;
2867 	int			num_luns;
2868 	int			actual_luns;
2869 	int			num_masked_luns;
2870 	int			lun_buflen;
2871 	struct fcp_lun	*plun	= NULL;
2872 	struct fcp_reportlun_resp	*report_lun;
2873 	uint8_t			reconfig_needed = FALSE;
2874 	uint8_t			lun_exists = FALSE;
2875 	fcp_port_t			*pptr		 = ptgt->tgt_port;
2876 
2877 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2878 
2879 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2880 	    fpkt->pkt_datalen);
2881 
2882 	/* get number of luns (which is supplied as LUNS * 8) */
2883 	num_luns = BE_32(report_lun->num_lun) >> 3;
2884 
2885 	/*
2886 	 * Figure out exactly how many lun strings our response buffer
2887 	 * can hold.
2888 	 */
2889 	lun_buflen = (fpkt->pkt_datalen -
2890 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2891 
2892 	/*
2893 	 * Is our response buffer full or not? We don't want to
2894 	 * potentially walk beyond the number of luns we have.
2895 	 */
2896 	if (num_luns <= lun_buflen) {
2897 		actual_luns = num_luns;
2898 	} else {
2899 		actual_luns = lun_buflen;
2900 	}
2901 
2902 	mutex_enter(&ptgt->tgt_mutex);
2903 
2904 	/* Scan each lun to see if we have masked it. */
2905 	num_masked_luns = 0;
2906 	if (fcp_lun_blacklist != NULL) {
2907 		for (i = 0; i < actual_luns; i++) {
2908 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2909 			switch (lun_string[0] & 0xC0) {
2910 			case FCP_LUN_ADDRESSING:
2911 			case FCP_PD_ADDRESSING:
2912 			case FCP_VOLUME_ADDRESSING:
2913 				lun_num = ((lun_string[0] & 0x3F) << 8)
2914 				    | lun_string[1];
2915 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2916 				    lun_num) == TRUE) {
2917 					num_masked_luns++;
2918 				}
2919 				break;
2920 			default:
2921 				break;
2922 			}
2923 		}
2924 	}
2925 
2926 	/*
2927 	 * The quick and easy check.  If the number of LUNs reported
2928 	 * doesn't match the number we currently know about, we need
2929 	 * to reconfigure.
2930 	 */
2931 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2932 		mutex_exit(&ptgt->tgt_mutex);
2933 		kmem_free(report_lun, fpkt->pkt_datalen);
2934 		return (TRUE);
2935 	}
2936 
2937 	/*
2938 	 * If the quick and easy check doesn't turn up anything, we walk
2939 	 * the list of luns from the REPORT_LUN response and look for
2940 	 * any luns we don't know about.  If we find one, we know we need
2941 	 * to reconfigure. We will skip LUNs that are masked because of the
2942 	 * blacklist.
2943 	 */
2944 	for (i = 0; i < actual_luns; i++) {
2945 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2946 		lun_exists = FALSE;
2947 		switch (lun_string[0] & 0xC0) {
2948 		case FCP_LUN_ADDRESSING:
2949 		case FCP_PD_ADDRESSING:
2950 		case FCP_VOLUME_ADDRESSING:
2951 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2952 
2953 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2954 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2955 				lun_exists = TRUE;
2956 				break;
2957 			}
2958 
2959 			for (plun = ptgt->tgt_lun; plun;
2960 			    plun = plun->lun_next) {
2961 				if (plun->lun_num == lun_num) {
2962 					lun_exists = TRUE;
2963 					break;
2964 				}
2965 			}
2966 			break;
2967 		default:
2968 			break;
2969 		}
2970 
2971 		if (lun_exists == FALSE) {
2972 			reconfig_needed = TRUE;
2973 			break;
2974 		}
2975 	}
2976 
2977 	mutex_exit(&ptgt->tgt_mutex);
2978 	kmem_free(report_lun, fpkt->pkt_datalen);
2979 
2980 	return (reconfig_needed);
2981 }
2982 
2983 /*
2984  * This function is called by fcp_handle_page83 and uses inquiry response data
2985  * stored in plun->lun_inq to determine whether or not a device is a member of
2986  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2987  * otherwise 1.
2988  */
2989 static int
2990 fcp_symmetric_device_probe(struct fcp_lun *plun)
2991 {
2992 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2993 	char			*devidptr;
2994 	int			i, len;
2995 
2996 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2997 		devidptr = fcp_symmetric_disk_table[i];
2998 		len = (int)strlen(devidptr);
2999 
3000 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3001 			return (0);
3002 		}
3003 	}
3004 	return (1);
3005 }
3006 
3007 
3008 /*
3009  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3010  * It basically returns the current count of # of state change callbacks
3011  * i.e the value of tgt_change_cnt.
3012  *
3013  * INPUT:
3014  *   fcp_ioctl.fp_minor -> The minor # of the fp port
3015  *   fcp_ioctl.listlen	-> 1
3016  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
3017  */
3018 /*ARGSUSED2*/
3019 static int
3020 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3021 {
3022 	int			ret;
3023 	uint32_t		link_cnt;
3024 	struct fcp_ioctl	fioctl;
3025 	struct fcp_port	*pptr = NULL;
3026 
3027 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3028 	    &pptr)) != 0) {
3029 		return (ret);
3030 	}
3031 
3032 	ASSERT(pptr != NULL);
3033 
3034 	if (fioctl.listlen != 1) {
3035 		return (EINVAL);
3036 	}
3037 
3038 	mutex_enter(&pptr->port_mutex);
3039 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3040 		mutex_exit(&pptr->port_mutex);
3041 		return (ENXIO);
3042 	}
3043 
3044 	/*
3045 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3046 	 * When the fcp initially attaches to the port and there are nothing
3047 	 * hanging out of the port or if there was a repeat offline state change
3048 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3049 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3050 	 * will differentiate the 2 cases.
3051 	 */
3052 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3053 		mutex_exit(&pptr->port_mutex);
3054 		return (ENXIO);
3055 	}
3056 
3057 	link_cnt = pptr->port_link_cnt;
3058 	mutex_exit(&pptr->port_mutex);
3059 
3060 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3061 		return (EFAULT);
3062 	}
3063 
3064 #ifdef	_MULTI_DATAMODEL
3065 	switch (ddi_model_convert_from(mode & FMODELS)) {
3066 	case DDI_MODEL_ILP32: {
3067 		struct fcp32_ioctl f32_ioctl;
3068 
3069 		f32_ioctl.fp_minor = fioctl.fp_minor;
3070 		f32_ioctl.listlen = fioctl.listlen;
3071 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3072 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3073 		    sizeof (struct fcp32_ioctl), mode)) {
3074 			return (EFAULT);
3075 		}
3076 		break;
3077 	}
3078 	case DDI_MODEL_NONE:
3079 		if (ddi_copyout((void *)&fioctl, (void *)data,
3080 		    sizeof (struct fcp_ioctl), mode)) {
3081 			return (EFAULT);
3082 		}
3083 		break;
3084 	}
3085 #else	/* _MULTI_DATAMODEL */
3086 
3087 	if (ddi_copyout((void *)&fioctl, (void *)data,
3088 	    sizeof (struct fcp_ioctl), mode)) {
3089 		return (EFAULT);
3090 	}
3091 #endif	/* _MULTI_DATAMODEL */
3092 
3093 	return (0);
3094 }
3095 
3096 /*
3097  * This function copies the fcp_ioctl structure passed in from user land
3098  * into kernel land. Handles 32 bit applications.
3099  */
3100 /*ARGSUSED*/
3101 static int
3102 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3103     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3104 {
3105 	struct fcp_port	*t_pptr;
3106 
3107 #ifdef	_MULTI_DATAMODEL
3108 	switch (ddi_model_convert_from(mode & FMODELS)) {
3109 	case DDI_MODEL_ILP32: {
3110 		struct fcp32_ioctl f32_ioctl;
3111 
3112 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3113 		    sizeof (struct fcp32_ioctl), mode)) {
3114 			return (EFAULT);
3115 		}
3116 		fioctl->fp_minor = f32_ioctl.fp_minor;
3117 		fioctl->listlen = f32_ioctl.listlen;
3118 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3119 		break;
3120 	}
3121 	case DDI_MODEL_NONE:
3122 		if (ddi_copyin((void *)data, (void *)fioctl,
3123 		    sizeof (struct fcp_ioctl), mode)) {
3124 			return (EFAULT);
3125 		}
3126 		break;
3127 	}
3128 
3129 #else	/* _MULTI_DATAMODEL */
3130 	if (ddi_copyin((void *)data, (void *)fioctl,
3131 	    sizeof (struct fcp_ioctl), mode)) {
3132 		return (EFAULT);
3133 	}
3134 #endif	/* _MULTI_DATAMODEL */
3135 
3136 	/*
3137 	 * Right now we can assume that the minor number matches with
3138 	 * this instance of fp. If this changes we will need to
3139 	 * revisit this logic.
3140 	 */
3141 	mutex_enter(&fcp_global_mutex);
3142 	t_pptr = fcp_port_head;
3143 	while (t_pptr) {
3144 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3145 			break;
3146 		} else {
3147 			t_pptr = t_pptr->port_next;
3148 		}
3149 	}
3150 	*pptr = t_pptr;
3151 	mutex_exit(&fcp_global_mutex);
3152 	if (t_pptr == NULL) {
3153 		return (ENXIO);
3154 	}
3155 
3156 	return (0);
3157 }
3158 
3159 /*
3160  *     Function: fcp_port_create_tgt
3161  *
3162  *  Description: As the name suggest this function creates the target context
3163  *		 specified by the the WWN provided by the caller.  If the
3164  *		 creation goes well and the target is known by fp/fctl a PLOGI
3165  *		 followed by a PRLI are issued.
3166  *
3167  *     Argument: pptr		fcp port structure
3168  *		 pwwn		WWN of the target
3169  *		 ret_val	Address of the return code.  It could be:
3170  *				EIO, ENOMEM or 0.
3171  *		 fc_status	PLOGI or PRLI status completion
3172  *		 fc_pkt_state	PLOGI or PRLI state completion
3173  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3174  *		 fc_pkt_action	PLOGI or PRLI action completion
3175  *
3176  * Return Value: NULL if it failed
3177  *		 Target structure address if it succeeds
3178  */
3179 static struct fcp_tgt *
3180 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3181     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3182 {
3183 	struct fcp_tgt	*ptgt = NULL;
3184 	fc_portmap_t		devlist;
3185 	int			lcount;
3186 	int			error;
3187 
3188 	*ret_val = 0;
3189 
3190 	/*
3191 	 * Check FC port device & get port map
3192 	 */
3193 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3194 	    &error, 1) == NULL) {
3195 		*ret_val = EIO;
3196 	} else {
3197 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3198 		    &devlist) != FC_SUCCESS) {
3199 			*ret_val = EIO;
3200 		}
3201 	}
3202 
3203 	/* Set port map flags */
3204 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3205 
3206 	/* Allocate target */
3207 	if (*ret_val == 0) {
3208 		lcount = pptr->port_link_cnt;
3209 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3210 		if (ptgt == NULL) {
3211 			fcp_log(CE_WARN, pptr->port_dip,
3212 			    "!FC target allocation failed");
3213 			*ret_val = ENOMEM;
3214 		} else {
3215 			/* Setup target */
3216 			mutex_enter(&ptgt->tgt_mutex);
3217 
3218 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3219 			ptgt->tgt_tmp_cnt	= 1;
3220 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3221 			ptgt->tgt_hard_addr	=
3222 			    devlist.map_hard_addr.hard_addr;
3223 			ptgt->tgt_pd_handle	= devlist.map_pd;
3224 			ptgt->tgt_fca_dev	= NULL;
3225 
3226 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3227 			    FC_WWN_SIZE);
3228 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3229 			    FC_WWN_SIZE);
3230 
3231 			mutex_exit(&ptgt->tgt_mutex);
3232 		}
3233 	}
3234 
3235 	/* Release global mutex for PLOGI and PRLI */
3236 	mutex_exit(&fcp_global_mutex);
3237 
3238 	/* Send PLOGI (If necessary) */
3239 	if (*ret_val == 0) {
3240 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3241 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3242 	}
3243 
3244 	/* Send PRLI (If necessary) */
3245 	if (*ret_val == 0) {
3246 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3247 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3248 	}
3249 
3250 	mutex_enter(&fcp_global_mutex);
3251 
3252 	return (ptgt);
3253 }
3254 
3255 /*
3256  *     Function: fcp_tgt_send_plogi
3257  *
3258  *  Description: This function sends a PLOGI to the target specified by the
3259  *		 caller and waits till it completes.
3260  *
3261  *     Argument: ptgt		Target to send the plogi to.
3262  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3263  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3264  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3265  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3266  *
3267  * Return Value: 0
3268  *		 ENOMEM
3269  *		 EIO
3270  *
3271  *	Context: User context.
3272  */
3273 static int
3274 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3275     int *fc_pkt_reason, int *fc_pkt_action)
3276 {
3277 	struct fcp_port	*pptr;
3278 	struct fcp_ipkt	*icmd;
3279 	struct fc_packet	*fpkt;
3280 	fc_frame_hdr_t		*hp;
3281 	struct la_els_logi	logi;
3282 	int			tcount;
3283 	int			lcount;
3284 	int			ret, login_retval = ~FC_SUCCESS;
3285 
3286 	ret = 0;
3287 
3288 	pptr = ptgt->tgt_port;
3289 
3290 	lcount = pptr->port_link_cnt;
3291 	tcount = ptgt->tgt_change_cnt;
3292 
3293 	/* Alloc internal packet */
3294 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3295 	    sizeof (la_els_logi_t), 0,
3296 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3297 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3298 
3299 	if (icmd == NULL) {
3300 		ret = ENOMEM;
3301 	} else {
3302 		/*
3303 		 * Setup internal packet as sema sync
3304 		 */
3305 		fcp_ipkt_sema_init(icmd);
3306 
3307 		/*
3308 		 * Setup internal packet (icmd)
3309 		 */
3310 		icmd->ipkt_lun		= NULL;
3311 		icmd->ipkt_restart	= 0;
3312 		icmd->ipkt_retries	= 0;
3313 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3314 
3315 		/*
3316 		 * Setup fc_packet
3317 		 */
3318 		fpkt = icmd->ipkt_fpkt;
3319 
3320 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3321 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3322 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3323 
3324 		/*
3325 		 * Setup FC frame header
3326 		 */
3327 		hp = &fpkt->pkt_cmd_fhdr;
3328 
3329 		hp->s_id	= pptr->port_id;	/* source ID */
3330 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3331 		hp->r_ctl	= R_CTL_ELS_REQ;
3332 		hp->type	= FC_TYPE_EXTENDED_LS;
3333 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3334 		hp->seq_id	= 0;
3335 		hp->rsvd	= 0;
3336 		hp->df_ctl	= 0;
3337 		hp->seq_cnt	= 0;
3338 		hp->ox_id	= 0xffff;		/* i.e. none */
3339 		hp->rx_id	= 0xffff;		/* i.e. none */
3340 		hp->ro		= 0;
3341 
3342 		/*
3343 		 * Setup PLOGI
3344 		 */
3345 		bzero(&logi, sizeof (struct la_els_logi));
3346 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3347 
3348 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3349 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3350 
3351 		/*
3352 		 * Send PLOGI
3353 		 */
3354 		*fc_status = login_retval =
3355 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3356 		if (*fc_status != FC_SUCCESS) {
3357 			ret = EIO;
3358 		}
3359 	}
3360 
3361 	/*
3362 	 * Wait for completion
3363 	 */
3364 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3365 		ret = fcp_ipkt_sema_wait(icmd);
3366 
3367 		*fc_pkt_state	= fpkt->pkt_state;
3368 		*fc_pkt_reason	= fpkt->pkt_reason;
3369 		*fc_pkt_action	= fpkt->pkt_action;
3370 	}
3371 
3372 	/*
3373 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3374 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3375 	 * Otherwise, cleanup happens in callback routine.
3376 	 */
3377 	if (icmd != NULL) {
3378 		fcp_ipkt_sema_cleanup(icmd);
3379 	}
3380 
3381 	return (ret);
3382 }
3383 
3384 /*
3385  *     Function: fcp_tgt_send_prli
3386  *
3387  *  Description: Does nothing as of today.
3388  *
3389  *     Argument: ptgt		Target to send the prli to.
3390  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3391  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3392  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3393  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3394  *
3395  * Return Value: 0
3396  */
3397 /*ARGSUSED*/
3398 static int
3399 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3400     int *fc_pkt_reason, int *fc_pkt_action)
3401 {
3402 	return (0);
3403 }
3404 
3405 /*
3406  *     Function: fcp_ipkt_sema_init
3407  *
3408  *  Description: Initializes the semaphore contained in the internal packet.
3409  *
3410  *     Argument: icmd	Internal packet the semaphore of which must be
3411  *			initialized.
3412  *
3413  * Return Value: None
3414  *
3415  *	Context: User context only.
3416  */
3417 static void
3418 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3419 {
3420 	struct fc_packet	*fpkt;
3421 
3422 	fpkt = icmd->ipkt_fpkt;
3423 
3424 	/* Create semaphore for sync */
3425 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3426 
3427 	/* Setup the completion callback */
3428 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3429 }
3430 
3431 /*
3432  *     Function: fcp_ipkt_sema_wait
3433  *
3434  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3435  *		 semaphore is released in the callback.
3436  *
3437  *     Argument: icmd	Internal packet to wait on for completion.
3438  *
3439  * Return Value: 0
3440  *		 EIO
3441  *		 EBUSY
3442  *		 EAGAIN
3443  *
3444  *	Context: User context only.
3445  *
3446  * This function does a conversion between the field pkt_state of the fc_packet
3447  * embedded in the internal packet (icmd) and the code it returns.
3448  */
3449 static int
3450 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3451 {
3452 	struct fc_packet	*fpkt;
3453 	int	ret;
3454 
3455 	ret = EIO;
3456 	fpkt = icmd->ipkt_fpkt;
3457 
3458 	/*
3459 	 * Wait on semaphore
3460 	 */
3461 	sema_p(&(icmd->ipkt_sema));
3462 
3463 	/*
3464 	 * Check the status of the FC packet
3465 	 */
3466 	switch (fpkt->pkt_state) {
3467 	case FC_PKT_SUCCESS:
3468 		ret = 0;
3469 		break;
3470 	case FC_PKT_LOCAL_RJT:
3471 		switch (fpkt->pkt_reason) {
3472 		case FC_REASON_SEQ_TIMEOUT:
3473 		case FC_REASON_RX_BUF_TIMEOUT:
3474 			ret = EAGAIN;
3475 			break;
3476 		case FC_REASON_PKT_BUSY:
3477 			ret = EBUSY;
3478 			break;
3479 		}
3480 		break;
3481 	case FC_PKT_TIMEOUT:
3482 		ret = EAGAIN;
3483 		break;
3484 	case FC_PKT_LOCAL_BSY:
3485 	case FC_PKT_TRAN_BSY:
3486 	case FC_PKT_NPORT_BSY:
3487 	case FC_PKT_FABRIC_BSY:
3488 		ret = EBUSY;
3489 		break;
3490 	case FC_PKT_LS_RJT:
3491 	case FC_PKT_BA_RJT:
3492 		switch (fpkt->pkt_reason) {
3493 		case FC_REASON_LOGICAL_BSY:
3494 			ret = EBUSY;
3495 			break;
3496 		}
3497 		break;
3498 	case FC_PKT_FS_RJT:
3499 		switch (fpkt->pkt_reason) {
3500 		case FC_REASON_FS_LOGICAL_BUSY:
3501 			ret = EBUSY;
3502 			break;
3503 		}
3504 		break;
3505 	}
3506 
3507 	return (ret);
3508 }
3509 
3510 /*
3511  *     Function: fcp_ipkt_sema_callback
3512  *
3513  *  Description: Registered as the completion callback function for the FC
3514  *		 transport when the ipkt semaphore is used for sync. This will
3515  *		 cleanup the used data structures, if necessary and wake up
3516  *		 the user thread to complete the transaction.
3517  *
3518  *     Argument: fpkt	FC packet (points to the icmd)
3519  *
3520  * Return Value: None
3521  *
3522  *	Context: User context only
3523  */
3524 static void
3525 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3526 {
3527 	struct fcp_ipkt	*icmd;
3528 
3529 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3530 
3531 	/*
3532 	 * Wake up user thread
3533 	 */
3534 	sema_v(&(icmd->ipkt_sema));
3535 }
3536 
3537 /*
3538  *     Function: fcp_ipkt_sema_cleanup
3539  *
3540  *  Description: Called to cleanup (if necessary) the data structures used
3541  *		 when ipkt sema is used for sync.  This function will detect
3542  *		 whether the caller is the last thread (via counter) and
3543  *		 cleanup only if necessary.
3544  *
3545  *     Argument: icmd	Internal command packet
3546  *
3547  * Return Value: None
3548  *
3549  *	Context: User context only
3550  */
3551 static void
3552 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3553 {
3554 	struct fcp_tgt	*ptgt;
3555 	struct fcp_port	*pptr;
3556 
3557 	ptgt = icmd->ipkt_tgt;
3558 	pptr = icmd->ipkt_port;
3559 
3560 	/*
3561 	 * Acquire data structure
3562 	 */
3563 	mutex_enter(&ptgt->tgt_mutex);
3564 
3565 	/*
3566 	 * Destroy semaphore
3567 	 */
3568 	sema_destroy(&(icmd->ipkt_sema));
3569 
3570 	/*
3571 	 * Cleanup internal packet
3572 	 */
3573 	mutex_exit(&ptgt->tgt_mutex);
3574 	fcp_icmd_free(pptr, icmd);
3575 }
3576 
3577 /*
3578  *     Function: fcp_port_attach
3579  *
3580  *  Description: Called by the transport framework to resume, suspend or
3581  *		 attach a new port.
3582  *
3583  *     Argument: ulph		Port handle
3584  *		 *pinfo		Port information
3585  *		 cmd		Command
3586  *		 s_id		Port ID
3587  *
3588  * Return Value: FC_FAILURE or FC_SUCCESS
3589  */
3590 /*ARGSUSED*/
3591 static int
3592 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3593     fc_attach_cmd_t cmd, uint32_t s_id)
3594 {
3595 	int	instance;
3596 	int	res = FC_FAILURE; /* default result */
3597 
3598 	ASSERT(pinfo != NULL);
3599 
3600 	instance = ddi_get_instance(pinfo->port_dip);
3601 
3602 	switch (cmd) {
3603 	case FC_CMD_ATTACH:
3604 		/*
3605 		 * this port instance attaching for the first time (or after
3606 		 * being detached before)
3607 		 */
3608 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3609 		    instance) == DDI_SUCCESS) {
3610 			res = FC_SUCCESS;
3611 		} else {
3612 			ASSERT(ddi_get_soft_state(fcp_softstate,
3613 			    instance) == NULL);
3614 		}
3615 		break;
3616 
3617 	case FC_CMD_RESUME:
3618 	case FC_CMD_POWER_UP:
3619 		/*
3620 		 * this port instance was attached and the suspended and
3621 		 * will now be resumed
3622 		 */
3623 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3624 		    instance) == DDI_SUCCESS) {
3625 			res = FC_SUCCESS;
3626 		}
3627 		break;
3628 
3629 	default:
3630 		/* shouldn't happen */
3631 		FCP_TRACE(fcp_logq, "fcp",
3632 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3633 		    "port_attach: unknown cmdcommand: %d", cmd);
3634 		break;
3635 	}
3636 
3637 	/* return result */
3638 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3639 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3640 
3641 	return (res);
3642 }
3643 
3644 
3645 /*
3646  * detach or suspend this port instance
3647  *
3648  * acquires and releases the global mutex
3649  *
3650  * acquires and releases the mutex for this port
3651  *
3652  * acquires and releases the hotplug mutex for this port
3653  */
3654 /*ARGSUSED*/
3655 static int
3656 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3657     fc_detach_cmd_t cmd)
3658 {
3659 	int			flag;
3660 	int			instance;
3661 	struct fcp_port		*pptr;
3662 
3663 	instance = ddi_get_instance(info->port_dip);
3664 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3665 
3666 	switch (cmd) {
3667 	case FC_CMD_SUSPEND:
3668 		FCP_DTRACE(fcp_logq, "fcp",
3669 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3670 		    "port suspend called for port %d", instance);
3671 		flag = FCP_STATE_SUSPENDED;
3672 		break;
3673 
3674 	case FC_CMD_POWER_DOWN:
3675 		FCP_DTRACE(fcp_logq, "fcp",
3676 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3677 		    "port power down called for port %d", instance);
3678 		flag = FCP_STATE_POWER_DOWN;
3679 		break;
3680 
3681 	case FC_CMD_DETACH:
3682 		FCP_DTRACE(fcp_logq, "fcp",
3683 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3684 		    "port detach called for port %d", instance);
3685 		flag = FCP_STATE_DETACHING;
3686 		break;
3687 
3688 	default:
3689 		/* shouldn't happen */
3690 		return (FC_FAILURE);
3691 	}
3692 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3693 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3694 
3695 	return (fcp_handle_port_detach(pptr, flag, instance));
3696 }
3697 
3698 
3699 /*
3700  * called for ioctls on the transport's devctl interface, and the transport
3701  * has passed it to us
3702  *
3703  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3704  *
3705  * return FC_SUCCESS if we decide to claim the ioctl,
3706  * else return FC_UNCLAIMED
3707  *
3708  * *rval is set iff we decide to claim the ioctl
3709  */
3710 /*ARGSUSED*/
3711 static int
3712 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3713     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3714 {
3715 	int			retval = FC_UNCLAIMED;	/* return value */
3716 	struct fcp_port		*pptr = NULL;		/* our soft state */
3717 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3718 	dev_info_t		*cdip;
3719 	mdi_pathinfo_t		*pip = NULL;
3720 	char			*ndi_nm;		/* NDI name */
3721 	char			*ndi_addr;		/* NDI addr */
3722 	int			is_mpxio, circ;
3723 	int			devi_entered = 0;
3724 	time_t			end_time;
3725 
3726 	ASSERT(rval != NULL);
3727 
3728 	FCP_DTRACE(fcp_logq, "fcp",
3729 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3730 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3731 
3732 	/* if already claimed then forget it */
3733 	if (claimed) {
3734 		/*
3735 		 * for now, if this ioctl has already been claimed, then
3736 		 * we just ignore it
3737 		 */
3738 		return (retval);
3739 	}
3740 
3741 	/* get our port info */
3742 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3743 		fcp_log(CE_WARN, NULL,
3744 		    "!fcp:Invalid port handle handle in ioctl");
3745 		*rval = ENXIO;
3746 		return (retval);
3747 	}
3748 	is_mpxio = pptr->port_mpxio;
3749 
3750 	switch (cmd) {
3751 	case DEVCTL_BUS_GETSTATE:
3752 	case DEVCTL_BUS_QUIESCE:
3753 	case DEVCTL_BUS_UNQUIESCE:
3754 	case DEVCTL_BUS_RESET:
3755 	case DEVCTL_BUS_RESETALL:
3756 
3757 	case DEVCTL_BUS_DEV_CREATE:
3758 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3759 			return (retval);
3760 		}
3761 		break;
3762 
3763 	case DEVCTL_DEVICE_GETSTATE:
3764 	case DEVCTL_DEVICE_OFFLINE:
3765 	case DEVCTL_DEVICE_ONLINE:
3766 	case DEVCTL_DEVICE_REMOVE:
3767 	case DEVCTL_DEVICE_RESET:
3768 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3769 			return (retval);
3770 		}
3771 
3772 		ASSERT(dcp != NULL);
3773 
3774 		/* ensure we have a name and address */
3775 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3776 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3777 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3778 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3779 			    "ioctl: can't get name (%s) or addr (%s)",
3780 			    ndi_nm ? ndi_nm : "<null ptr>",
3781 			    ndi_addr ? ndi_addr : "<null ptr>");
3782 			ndi_dc_freehdl(dcp);
3783 			return (retval);
3784 		}
3785 
3786 
3787 		/* get our child's DIP */
3788 		ASSERT(pptr != NULL);
3789 		if (is_mpxio) {
3790 			mdi_devi_enter(pptr->port_dip, &circ);
3791 		} else {
3792 			ndi_devi_enter(pptr->port_dip, &circ);
3793 		}
3794 		devi_entered = 1;
3795 
3796 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3797 		    ndi_addr)) == NULL) {
3798 			/* Look for virtually enumerated devices. */
3799 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3800 			if (pip == NULL ||
3801 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3802 				*rval = ENXIO;
3803 				goto out;
3804 			}
3805 		}
3806 		break;
3807 
3808 	default:
3809 		*rval = ENOTTY;
3810 		return (retval);
3811 	}
3812 
3813 	/* this ioctl is ours -- process it */
3814 
3815 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3816 
3817 	/* we assume it will be a success; else we'll set error value */
3818 	*rval = 0;
3819 
3820 
3821 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3822 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3823 	    "ioctl: claiming this one");
3824 
3825 	/* handle ioctls now */
3826 	switch (cmd) {
3827 	case DEVCTL_DEVICE_GETSTATE:
3828 		ASSERT(cdip != NULL);
3829 		ASSERT(dcp != NULL);
3830 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3831 			*rval = EFAULT;
3832 		}
3833 		break;
3834 
3835 	case DEVCTL_DEVICE_REMOVE:
3836 	case DEVCTL_DEVICE_OFFLINE: {
3837 		int			flag = 0;
3838 		int			lcount;
3839 		int			tcount;
3840 		struct fcp_pkt	*head = NULL;
3841 		struct fcp_lun	*plun;
3842 		child_info_t		*cip = CIP(cdip);
3843 		int			all = 1;
3844 		struct fcp_lun	*tplun;
3845 		struct fcp_tgt	*ptgt;
3846 
3847 		ASSERT(pptr != NULL);
3848 		ASSERT(cdip != NULL);
3849 
3850 		mutex_enter(&pptr->port_mutex);
3851 		if (pip != NULL) {
3852 			cip = CIP(pip);
3853 		}
3854 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3855 			mutex_exit(&pptr->port_mutex);
3856 			*rval = ENXIO;
3857 			break;
3858 		}
3859 
3860 		head = fcp_scan_commands(plun);
3861 		if (head != NULL) {
3862 			fcp_abort_commands(head, LUN_PORT);
3863 		}
3864 		lcount = pptr->port_link_cnt;
3865 		tcount = plun->lun_tgt->tgt_change_cnt;
3866 		mutex_exit(&pptr->port_mutex);
3867 
3868 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3869 			flag = NDI_DEVI_REMOVE;
3870 		}
3871 
3872 		if (is_mpxio) {
3873 			mdi_devi_exit(pptr->port_dip, circ);
3874 		} else {
3875 			ndi_devi_exit(pptr->port_dip, circ);
3876 		}
3877 		devi_entered = 0;
3878 
3879 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3880 		    FCP_OFFLINE, lcount, tcount, flag);
3881 
3882 		if (*rval != NDI_SUCCESS) {
3883 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3884 			break;
3885 		}
3886 
3887 		fcp_update_offline_flags(plun);
3888 
3889 		ptgt = plun->lun_tgt;
3890 		mutex_enter(&ptgt->tgt_mutex);
3891 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3892 		    tplun->lun_next) {
3893 			mutex_enter(&tplun->lun_mutex);
3894 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3895 				all = 0;
3896 			}
3897 			mutex_exit(&tplun->lun_mutex);
3898 		}
3899 
3900 		if (all) {
3901 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3902 			/*
3903 			 * The user is unconfiguring/offlining the device.
3904 			 * If fabric and the auto configuration is set
3905 			 * then make sure the user is the only one who
3906 			 * can reconfigure the device.
3907 			 */
3908 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3909 			    fcp_enable_auto_configuration) {
3910 				ptgt->tgt_manual_config_only = 1;
3911 			}
3912 		}
3913 		mutex_exit(&ptgt->tgt_mutex);
3914 		break;
3915 	}
3916 
3917 	case DEVCTL_DEVICE_ONLINE: {
3918 		int			lcount;
3919 		int			tcount;
3920 		struct fcp_lun	*plun;
3921 		child_info_t		*cip = CIP(cdip);
3922 
3923 		ASSERT(cdip != NULL);
3924 		ASSERT(pptr != NULL);
3925 
3926 		mutex_enter(&pptr->port_mutex);
3927 		if (pip != NULL) {
3928 			cip = CIP(pip);
3929 		}
3930 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3931 			mutex_exit(&pptr->port_mutex);
3932 			*rval = ENXIO;
3933 			break;
3934 		}
3935 		lcount = pptr->port_link_cnt;
3936 		tcount = plun->lun_tgt->tgt_change_cnt;
3937 		mutex_exit(&pptr->port_mutex);
3938 
3939 		/*
3940 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3941 		 * to allow the device attach to occur when the device is
3942 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3943 		 * from the scsi_probe()).
3944 		 */
3945 		mutex_enter(&LUN_TGT->tgt_mutex);
3946 		plun->lun_state |= FCP_LUN_ONLINING;
3947 		mutex_exit(&LUN_TGT->tgt_mutex);
3948 
3949 		if (is_mpxio) {
3950 			mdi_devi_exit(pptr->port_dip, circ);
3951 		} else {
3952 			ndi_devi_exit(pptr->port_dip, circ);
3953 		}
3954 		devi_entered = 0;
3955 
3956 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3957 		    FCP_ONLINE, lcount, tcount, 0);
3958 
3959 		if (*rval != NDI_SUCCESS) {
3960 			/* Reset the FCP_LUN_ONLINING bit */
3961 			mutex_enter(&LUN_TGT->tgt_mutex);
3962 			plun->lun_state &= ~FCP_LUN_ONLINING;
3963 			mutex_exit(&LUN_TGT->tgt_mutex);
3964 			*rval = EIO;
3965 			break;
3966 		}
3967 		mutex_enter(&LUN_TGT->tgt_mutex);
3968 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3969 		    FCP_LUN_ONLINING);
3970 		mutex_exit(&LUN_TGT->tgt_mutex);
3971 		break;
3972 	}
3973 
3974 	case DEVCTL_BUS_DEV_CREATE: {
3975 		uchar_t			*bytes = NULL;
3976 		uint_t			nbytes;
3977 		struct fcp_tgt		*ptgt = NULL;
3978 		struct fcp_lun		*plun = NULL;
3979 		dev_info_t		*useless_dip = NULL;
3980 
3981 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3982 		    DEVCTL_CONSTRUCT, &useless_dip);
3983 		if (*rval != 0 || useless_dip == NULL) {
3984 			break;
3985 		}
3986 
3987 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3988 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3989 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3990 			*rval = EINVAL;
3991 			(void) ndi_devi_free(useless_dip);
3992 			if (bytes != NULL) {
3993 				ddi_prop_free(bytes);
3994 			}
3995 			break;
3996 		}
3997 
3998 		*rval = fcp_create_on_demand(pptr, bytes);
3999 		if (*rval == 0) {
4000 			mutex_enter(&pptr->port_mutex);
4001 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4002 			if (ptgt) {
4003 				/*
4004 				 * We now have a pointer to the target that
4005 				 * was created. Lets point to the first LUN on
4006 				 * this new target.
4007 				 */
4008 				mutex_enter(&ptgt->tgt_mutex);
4009 
4010 				plun = ptgt->tgt_lun;
4011 				/*
4012 				 * There may be stale/offline LUN entries on
4013 				 * this list (this is by design) and so we have
4014 				 * to make sure we point to the first online
4015 				 * LUN
4016 				 */
4017 				while (plun &&
4018 				    plun->lun_state & FCP_LUN_OFFLINE) {
4019 					plun = plun->lun_next;
4020 				}
4021 
4022 				mutex_exit(&ptgt->tgt_mutex);
4023 			}
4024 			mutex_exit(&pptr->port_mutex);
4025 		}
4026 
4027 		if (*rval == 0 && ptgt && plun) {
4028 			mutex_enter(&plun->lun_mutex);
4029 			/*
4030 			 * Allow up to fcp_lun_ready_retry seconds to
4031 			 * configure all the luns behind the target.
4032 			 *
4033 			 * The intent here is to allow targets with long
4034 			 * reboot/reset-recovery times to become available
4035 			 * while limiting the maximum wait time for an
4036 			 * unresponsive target.
4037 			 */
4038 			end_time = ddi_get_lbolt() +
4039 			    SEC_TO_TICK(fcp_lun_ready_retry);
4040 
4041 			while (ddi_get_lbolt() < end_time) {
4042 				retval = FC_SUCCESS;
4043 
4044 				/*
4045 				 * The new ndi interfaces for on-demand creation
4046 				 * are inflexible, Do some more work to pass on
4047 				 * a path name of some LUN (design is broken !)
4048 				 */
4049 				if (plun->lun_cip) {
4050 					if (plun->lun_mpxio == 0) {
4051 						cdip = DIP(plun->lun_cip);
4052 					} else {
4053 						cdip = mdi_pi_get_client(
4054 						    PIP(plun->lun_cip));
4055 					}
4056 					if (cdip == NULL) {
4057 						*rval = ENXIO;
4058 						break;
4059 					}
4060 
4061 					if (!i_ddi_devi_attached(cdip)) {
4062 						mutex_exit(&plun->lun_mutex);
4063 						delay(drv_usectohz(1000000));
4064 						mutex_enter(&plun->lun_mutex);
4065 					} else {
4066 						/*
4067 						 * This Lun is ready, lets
4068 						 * check the next one.
4069 						 */
4070 						mutex_exit(&plun->lun_mutex);
4071 						plun = plun->lun_next;
4072 						while (plun && (plun->lun_state
4073 						    & FCP_LUN_OFFLINE)) {
4074 							plun = plun->lun_next;
4075 						}
4076 						if (!plun) {
4077 							break;
4078 						}
4079 						mutex_enter(&plun->lun_mutex);
4080 					}
4081 				} else {
4082 					/*
4083 					 * lun_cip field for a valid lun
4084 					 * should never be NULL. Fail the
4085 					 * command.
4086 					 */
4087 					*rval = ENXIO;
4088 					break;
4089 				}
4090 			}
4091 			if (plun) {
4092 				mutex_exit(&plun->lun_mutex);
4093 			} else {
4094 				char devnm[MAXNAMELEN];
4095 				int nmlen;
4096 
4097 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4098 				    ddi_node_name(cdip),
4099 				    ddi_get_name_addr(cdip));
4100 
4101 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4102 				    0) {
4103 					*rval = EFAULT;
4104 				}
4105 			}
4106 		} else {
4107 			int	i;
4108 			char	buf[25];
4109 
4110 			for (i = 0; i < FC_WWN_SIZE; i++) {
4111 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4112 			}
4113 
4114 			fcp_log(CE_WARN, pptr->port_dip,
4115 			    "!Failed to create nodes for pwwn=%s; error=%x",
4116 			    buf, *rval);
4117 		}
4118 
4119 		(void) ndi_devi_free(useless_dip);
4120 		ddi_prop_free(bytes);
4121 		break;
4122 	}
4123 
4124 	case DEVCTL_DEVICE_RESET: {
4125 		struct fcp_lun		*plun;
4126 		child_info_t		*cip = CIP(cdip);
4127 
4128 		ASSERT(cdip != NULL);
4129 		ASSERT(pptr != NULL);
4130 		mutex_enter(&pptr->port_mutex);
4131 		if (pip != NULL) {
4132 			cip = CIP(pip);
4133 		}
4134 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4135 			mutex_exit(&pptr->port_mutex);
4136 			*rval = ENXIO;
4137 			break;
4138 		}
4139 		mutex_exit(&pptr->port_mutex);
4140 
4141 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4142 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4143 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4144 
4145 			*rval = ENXIO;
4146 			break;
4147 		}
4148 
4149 		if (plun->lun_sd == NULL) {
4150 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4151 
4152 			*rval = ENXIO;
4153 			break;
4154 		}
4155 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4156 
4157 		/*
4158 		 * set up ap so that fcp_reset can figure out
4159 		 * which target to reset
4160 		 */
4161 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4162 		    RESET_TARGET) == FALSE) {
4163 			*rval = EIO;
4164 		}
4165 		break;
4166 	}
4167 
4168 	case DEVCTL_BUS_GETSTATE:
4169 		ASSERT(dcp != NULL);
4170 		ASSERT(pptr != NULL);
4171 		ASSERT(pptr->port_dip != NULL);
4172 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4173 		    NDI_SUCCESS) {
4174 			*rval = EFAULT;
4175 		}
4176 		break;
4177 
4178 	case DEVCTL_BUS_QUIESCE:
4179 	case DEVCTL_BUS_UNQUIESCE:
4180 		*rval = ENOTSUP;
4181 		break;
4182 
4183 	case DEVCTL_BUS_RESET:
4184 	case DEVCTL_BUS_RESETALL:
4185 		ASSERT(pptr != NULL);
4186 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4187 		break;
4188 
4189 	default:
4190 		ASSERT(dcp != NULL);
4191 		*rval = ENOTTY;
4192 		break;
4193 	}
4194 
4195 	/* all done -- clean up and return */
4196 out:	if (devi_entered) {
4197 		if (is_mpxio) {
4198 			mdi_devi_exit(pptr->port_dip, circ);
4199 		} else {
4200 			ndi_devi_exit(pptr->port_dip, circ);
4201 		}
4202 	}
4203 
4204 	if (dcp != NULL) {
4205 		ndi_dc_freehdl(dcp);
4206 	}
4207 
4208 	return (retval);
4209 }
4210 
4211 
4212 /*ARGSUSED*/
4213 static int
4214 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4215     uint32_t claimed)
4216 {
4217 	uchar_t			r_ctl;
4218 	uchar_t			ls_code;
4219 	struct fcp_port	*pptr;
4220 
4221 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4222 		return (FC_UNCLAIMED);
4223 	}
4224 
4225 	mutex_enter(&pptr->port_mutex);
4226 	if (pptr->port_state & (FCP_STATE_DETACHING |
4227 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4228 		mutex_exit(&pptr->port_mutex);
4229 		return (FC_UNCLAIMED);
4230 	}
4231 	mutex_exit(&pptr->port_mutex);
4232 
4233 	r_ctl = buf->ub_frame.r_ctl;
4234 
4235 	switch (r_ctl & R_CTL_ROUTING) {
4236 	case R_CTL_EXTENDED_SVC:
4237 		if (r_ctl == R_CTL_ELS_REQ) {
4238 			ls_code = buf->ub_buffer[0];
4239 
4240 			switch (ls_code) {
4241 			case LA_ELS_PRLI:
4242 				/*
4243 				 * We really don't care if something fails.
4244 				 * If the PRLI was not sent out, then the
4245 				 * other end will time it out.
4246 				 */
4247 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4248 					return (FC_SUCCESS);
4249 				}
4250 				return (FC_UNCLAIMED);
4251 				/* NOTREACHED */
4252 
4253 			default:
4254 				break;
4255 			}
4256 		}
4257 		/* FALLTHROUGH */
4258 
4259 	default:
4260 		return (FC_UNCLAIMED);
4261 	}
4262 }
4263 
4264 
4265 /*ARGSUSED*/
4266 static int
4267 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4268     uint32_t claimed)
4269 {
4270 	return (FC_UNCLAIMED);
4271 }
4272 
4273 /*
4274  *     Function: fcp_statec_callback
4275  *
4276  *  Description: The purpose of this function is to handle a port state change.
4277  *		 It is called from fp/fctl and, in a few instances, internally.
4278  *
4279  *     Argument: ulph		fp/fctl port handle
4280  *		 port_handle	fcp_port structure
4281  *		 port_state	Physical state of the port
4282  *		 port_top	Topology
4283  *		 *devlist	Pointer to the first entry of a table
4284  *				containing the remote ports that can be
4285  *				reached.
4286  *		 dev_cnt	Number of entries pointed by devlist.
4287  *		 port_sid	Port ID of the local port.
4288  *
4289  * Return Value: None
4290  */
4291 /*ARGSUSED*/
4292 static void
4293 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4294     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4295     uint32_t dev_cnt, uint32_t port_sid)
4296 {
4297 	uint32_t		link_count;
4298 	int			map_len = 0;
4299 	struct fcp_port	*pptr;
4300 	fcp_map_tag_t		*map_tag = NULL;
4301 
4302 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4303 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4304 		return;			/* nothing to work with! */
4305 	}
4306 
4307 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4308 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4309 	    "fcp_statec_callback: port state/dev_cnt/top ="
4310 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4311 	    dev_cnt, port_top);
4312 
4313 	mutex_enter(&pptr->port_mutex);
4314 
4315 	/*
4316 	 * If a thread is in detach, don't do anything.
4317 	 */
4318 	if (pptr->port_state & (FCP_STATE_DETACHING |
4319 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4320 		mutex_exit(&pptr->port_mutex);
4321 		return;
4322 	}
4323 
4324 	/*
4325 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4326 	 * init_pkt is called, it knows whether or not the target's status
4327 	 * (or pd) might be changing.
4328 	 */
4329 
4330 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4331 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4332 	}
4333 
4334 	/*
4335 	 * the transport doesn't allocate or probe unless being
4336 	 * asked to by either the applications or ULPs
4337 	 *
4338 	 * in cases where the port is OFFLINE at the time of port
4339 	 * attach callback and the link comes ONLINE later, for
4340 	 * easier automatic node creation (i.e. without you having to
4341 	 * go out and run the utility to perform LOGINs) the
4342 	 * following conditional is helpful
4343 	 */
4344 	pptr->port_phys_state = port_state;
4345 
4346 	if (dev_cnt) {
4347 		mutex_exit(&pptr->port_mutex);
4348 
4349 		map_len = sizeof (*map_tag) * dev_cnt;
4350 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4351 		if (map_tag == NULL) {
4352 			fcp_log(CE_WARN, pptr->port_dip,
4353 			    "!fcp%d: failed to allocate for map tags; "
4354 			    " state change will not be processed",
4355 			    pptr->port_instance);
4356 
4357 			mutex_enter(&pptr->port_mutex);
4358 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4359 			mutex_exit(&pptr->port_mutex);
4360 
4361 			return;
4362 		}
4363 
4364 		mutex_enter(&pptr->port_mutex);
4365 	}
4366 
4367 	if (pptr->port_id != port_sid) {
4368 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4369 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4370 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4371 		    port_sid);
4372 		/*
4373 		 * The local port changed ID. It is the first time a port ID
4374 		 * is assigned or something drastic happened.  We might have
4375 		 * been unplugged and replugged on another loop or fabric port
4376 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4377 		 * the fabric we were plugged into.
4378 		 */
4379 		pptr->port_id = port_sid;
4380 	}
4381 
4382 	switch (FC_PORT_STATE_MASK(port_state)) {
4383 	case FC_STATE_OFFLINE:
4384 	case FC_STATE_RESET_REQUESTED:
4385 		/*
4386 		 * link has gone from online to offline -- just update the
4387 		 * state of this port to BUSY and MARKed to go offline
4388 		 */
4389 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4390 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4391 		    "link went offline");
4392 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4393 			/*
4394 			 * We were offline a while ago and this one
4395 			 * seems to indicate that the loop has gone
4396 			 * dead forever.
4397 			 */
4398 			pptr->port_tmp_cnt += dev_cnt;
4399 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4400 			pptr->port_state |= FCP_STATE_INIT;
4401 			link_count = pptr->port_link_cnt;
4402 			fcp_handle_devices(pptr, devlist, dev_cnt,
4403 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4404 		} else {
4405 			pptr->port_link_cnt++;
4406 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4407 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4408 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4409 			if (pptr->port_mpxio) {
4410 				fcp_update_mpxio_path_verifybusy(pptr);
4411 			}
4412 			pptr->port_state |= FCP_STATE_OFFLINE;
4413 			pptr->port_state &=
4414 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4415 			pptr->port_tmp_cnt = 0;
4416 		}
4417 		mutex_exit(&pptr->port_mutex);
4418 		break;
4419 
4420 	case FC_STATE_ONLINE:
4421 	case FC_STATE_LIP:
4422 	case FC_STATE_LIP_LBIT_SET:
4423 		/*
4424 		 * link has gone from offline to online
4425 		 */
4426 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4427 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4428 		    "link went online");
4429 
4430 		pptr->port_link_cnt++;
4431 
4432 		while (pptr->port_ipkt_cnt) {
4433 			mutex_exit(&pptr->port_mutex);
4434 			delay(drv_usectohz(1000000));
4435 			mutex_enter(&pptr->port_mutex);
4436 		}
4437 
4438 		pptr->port_topology = port_top;
4439 
4440 		/*
4441 		 * The state of the targets and luns accessible through this
4442 		 * port is updated.
4443 		 */
4444 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4445 		    FCP_CAUSE_LINK_CHANGE);
4446 
4447 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4448 		pptr->port_state |= FCP_STATE_ONLINING;
4449 		pptr->port_tmp_cnt = dev_cnt;
4450 		link_count = pptr->port_link_cnt;
4451 
4452 		pptr->port_deadline = fcp_watchdog_time +
4453 		    FCP_ICMD_DEADLINE;
4454 
4455 		if (!dev_cnt) {
4456 			/*
4457 			 * We go directly to the online state if no remote
4458 			 * ports were discovered.
4459 			 */
4460 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4461 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4462 			    "No remote ports discovered");
4463 
4464 			pptr->port_state &= ~FCP_STATE_ONLINING;
4465 			pptr->port_state |= FCP_STATE_ONLINE;
4466 		}
4467 
4468 		switch (port_top) {
4469 		case FC_TOP_FABRIC:
4470 		case FC_TOP_PUBLIC_LOOP:
4471 		case FC_TOP_PRIVATE_LOOP:
4472 		case FC_TOP_PT_PT:
4473 
4474 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4475 				fcp_retry_ns_registry(pptr, port_sid);
4476 			}
4477 
4478 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4479 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4480 			break;
4481 
4482 		default:
4483 			/*
4484 			 * We got here because we were provided with an unknown
4485 			 * topology.
4486 			 */
4487 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4488 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4489 			}
4490 
4491 			pptr->port_tmp_cnt -= dev_cnt;
4492 			fcp_log(CE_WARN, pptr->port_dip,
4493 			    "!unknown/unsupported topology (0x%x)", port_top);
4494 			break;
4495 		}
4496 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4497 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4498 		    "Notify ssd of the reset to reinstate the reservations");
4499 
4500 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4501 		    &pptr->port_reset_notify_listf);
4502 
4503 		mutex_exit(&pptr->port_mutex);
4504 
4505 		break;
4506 
4507 	case FC_STATE_RESET:
4508 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4509 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4510 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4511 		    "RESET state, waiting for Offline/Online state_cb");
4512 		mutex_exit(&pptr->port_mutex);
4513 		break;
4514 
4515 	case FC_STATE_DEVICE_CHANGE:
4516 		/*
4517 		 * We come here when an application has requested
4518 		 * Dynamic node creation/deletion in Fabric connectivity.
4519 		 */
4520 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4521 		    FCP_STATE_INIT)) {
4522 			/*
4523 			 * This case can happen when the FCTL is in the
4524 			 * process of giving us on online and the host on
4525 			 * the other side issues a PLOGI/PLOGO. Ideally
4526 			 * the state changes should be serialized unless
4527 			 * they are opposite (online-offline).
4528 			 * The transport will give us a final state change
4529 			 * so we can ignore this for the time being.
4530 			 */
4531 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4532 			mutex_exit(&pptr->port_mutex);
4533 			break;
4534 		}
4535 
4536 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4537 			fcp_retry_ns_registry(pptr, port_sid);
4538 		}
4539 
4540 		/*
4541 		 * Extend the deadline under steady state conditions
4542 		 * to provide more time for the device-change-commands
4543 		 */
4544 		if (!pptr->port_ipkt_cnt) {
4545 			pptr->port_deadline = fcp_watchdog_time +
4546 			    FCP_ICMD_DEADLINE;
4547 		}
4548 
4549 		/*
4550 		 * There is another race condition here, where if we were
4551 		 * in ONLINEING state and a devices in the map logs out,
4552 		 * fp will give another state change as DEVICE_CHANGE
4553 		 * and OLD. This will result in that target being offlined.
4554 		 * The pd_handle is freed. If from the first statec callback
4555 		 * we were going to fire a PLOGI/PRLI, the system will
4556 		 * panic in fc_ulp_transport with invalid pd_handle.
4557 		 * The fix is to check for the link_cnt before issuing
4558 		 * any command down.
4559 		 */
4560 		fcp_update_targets(pptr, devlist, dev_cnt,
4561 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4562 
4563 		link_count = pptr->port_link_cnt;
4564 
4565 		fcp_handle_devices(pptr, devlist, dev_cnt,
4566 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4567 
4568 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4569 
4570 		mutex_exit(&pptr->port_mutex);
4571 		break;
4572 
4573 	case FC_STATE_TARGET_PORT_RESET:
4574 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4575 			fcp_retry_ns_registry(pptr, port_sid);
4576 		}
4577 
4578 		/* Do nothing else */
4579 		mutex_exit(&pptr->port_mutex);
4580 		break;
4581 
4582 	default:
4583 		fcp_log(CE_WARN, pptr->port_dip,
4584 		    "!Invalid state change=0x%x", port_state);
4585 		mutex_exit(&pptr->port_mutex);
4586 		break;
4587 	}
4588 
4589 	if (map_tag) {
4590 		kmem_free(map_tag, map_len);
4591 	}
4592 }
4593 
4594 /*
4595  *     Function: fcp_handle_devices
4596  *
4597  *  Description: This function updates the devices currently known by
4598  *		 walking the list provided by the caller.  The list passed
4599  *		 by the caller is supposed to be the list of reachable
4600  *		 devices.
4601  *
4602  *     Argument: *pptr		Fcp port structure.
4603  *		 *devlist	Pointer to the first entry of a table
4604  *				containing the remote ports that can be
4605  *				reached.
4606  *		 dev_cnt	Number of entries pointed by devlist.
4607  *		 link_cnt	Link state count.
4608  *		 *map_tag	Array of fcp_map_tag_t structures.
4609  *		 cause		What caused this function to be called.
4610  *
4611  * Return Value: None
4612  *
4613  *	  Notes: The pptr->port_mutex must be held.
4614  */
4615 static void
4616 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4617     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4618 {
4619 	int			i;
4620 	int			check_finish_init = 0;
4621 	fc_portmap_t		*map_entry;
4622 	struct fcp_tgt	*ptgt = NULL;
4623 
4624 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4625 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4626 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4627 
4628 	if (dev_cnt) {
4629 		ASSERT(map_tag != NULL);
4630 	}
4631 
4632 	/*
4633 	 * The following code goes through the list of remote ports that are
4634 	 * accessible through this (pptr) local port (The list walked is the
4635 	 * one provided by the caller which is the list of the remote ports
4636 	 * currently reachable).  It checks if any of them was already
4637 	 * known by looking for the corresponding target structure based on
4638 	 * the world wide name.	 If a target is part of the list it is tagged
4639 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4640 	 *
4641 	 * Old comment
4642 	 * -----------
4643 	 * Before we drop port mutex; we MUST get the tags updated; This
4644 	 * two step process is somewhat slow, but more reliable.
4645 	 */
4646 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4647 		map_entry = &(devlist[i]);
4648 
4649 		/*
4650 		 * get ptr to this map entry in our port's
4651 		 * list (if any)
4652 		 */
4653 		ptgt = fcp_lookup_target(pptr,
4654 		    (uchar_t *)&(map_entry->map_pwwn));
4655 
4656 		if (ptgt) {
4657 			map_tag[i] = ptgt->tgt_change_cnt;
4658 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4659 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4660 			}
4661 		}
4662 	}
4663 
4664 	/*
4665 	 * At this point we know which devices of the new list were already
4666 	 * known (The field tgt_aux_state of the target structure has been
4667 	 * set to FCP_TGT_TAGGED).
4668 	 *
4669 	 * The following code goes through the list of targets currently known
4670 	 * by the local port (the list is actually a hashing table).  If a
4671 	 * target is found and is not tagged, it means the target cannot
4672 	 * be reached anymore through the local port (pptr).  It is offlined.
4673 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4674 	 */
4675 	for (i = 0; i < FCP_NUM_HASH; i++) {
4676 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4677 		    ptgt = ptgt->tgt_next) {
4678 			mutex_enter(&ptgt->tgt_mutex);
4679 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4680 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4681 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4682 				fcp_offline_target_now(pptr, ptgt,
4683 				    link_cnt, ptgt->tgt_change_cnt, 0);
4684 			}
4685 			mutex_exit(&ptgt->tgt_mutex);
4686 		}
4687 	}
4688 
4689 	/*
4690 	 * At this point, the devices that were known but cannot be reached
4691 	 * anymore, have most likely been offlined.
4692 	 *
4693 	 * The following section of code seems to go through the list of
4694 	 * remote ports that can now be reached.  For every single one it
4695 	 * checks if it is already known or if it is a new port.
4696 	 */
4697 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4698 
4699 		if (check_finish_init) {
4700 			ASSERT(i > 0);
4701 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4702 			    map_tag[i - 1], cause);
4703 			check_finish_init = 0;
4704 		}
4705 
4706 		/* get a pointer to this map entry */
4707 		map_entry = &(devlist[i]);
4708 
4709 		/*
4710 		 * Check for the duplicate map entry flag. If we have marked
4711 		 * this entry as a duplicate we skip it since the correct
4712 		 * (perhaps even same) state change will be encountered
4713 		 * later in the list.
4714 		 */
4715 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4716 			continue;
4717 		}
4718 
4719 		/* get ptr to this map entry in our port's list (if any) */
4720 		ptgt = fcp_lookup_target(pptr,
4721 		    (uchar_t *)&(map_entry->map_pwwn));
4722 
4723 		if (ptgt) {
4724 			/*
4725 			 * This device was already known.  The field
4726 			 * tgt_aux_state is reset (was probably set to
4727 			 * FCP_TGT_TAGGED previously in this routine).
4728 			 */
4729 			ptgt->tgt_aux_state = 0;
4730 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4731 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4732 			    "handle_devices: map did/state/type/flags = "
4733 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4734 			    "tgt_state=%d",
4735 			    map_entry->map_did.port_id, map_entry->map_state,
4736 			    map_entry->map_type, map_entry->map_flags,
4737 			    ptgt->tgt_d_id, ptgt->tgt_state);
4738 		}
4739 
4740 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4741 		    map_entry->map_type == PORT_DEVICE_NEW ||
4742 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4743 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4744 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4745 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4746 			    "map_type=%x, did = %x",
4747 			    map_entry->map_type,
4748 			    map_entry->map_did.port_id);
4749 		}
4750 
4751 		switch (map_entry->map_type) {
4752 		case PORT_DEVICE_NOCHANGE:
4753 		case PORT_DEVICE_USER_CREATE:
4754 		case PORT_DEVICE_USER_LOGIN:
4755 		case PORT_DEVICE_NEW:
4756 		case PORT_DEVICE_REPORTLUN_CHANGED:
4757 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4758 
4759 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4760 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4761 			    cause) == TRUE) {
4762 
4763 				FCP_TGT_TRACE(ptgt, map_tag[i],
4764 				    FCP_TGT_TRACE_2);
4765 				check_finish_init++;
4766 			}
4767 			break;
4768 
4769 		case PORT_DEVICE_OLD:
4770 			if (ptgt != NULL) {
4771 				FCP_TGT_TRACE(ptgt, map_tag[i],
4772 				    FCP_TGT_TRACE_3);
4773 
4774 				mutex_enter(&ptgt->tgt_mutex);
4775 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4776 					/*
4777 					 * Must do an in-line wait for I/Os
4778 					 * to get drained
4779 					 */
4780 					mutex_exit(&ptgt->tgt_mutex);
4781 					mutex_exit(&pptr->port_mutex);
4782 
4783 					mutex_enter(&ptgt->tgt_mutex);
4784 					while (ptgt->tgt_ipkt_cnt ||
4785 					    fcp_outstanding_lun_cmds(ptgt)
4786 					    == FC_SUCCESS) {
4787 						mutex_exit(&ptgt->tgt_mutex);
4788 						delay(drv_usectohz(1000000));
4789 						mutex_enter(&ptgt->tgt_mutex);
4790 					}
4791 					mutex_exit(&ptgt->tgt_mutex);
4792 
4793 					mutex_enter(&pptr->port_mutex);
4794 					mutex_enter(&ptgt->tgt_mutex);
4795 
4796 					(void) fcp_offline_target(pptr, ptgt,
4797 					    link_cnt, map_tag[i], 0, 0);
4798 				}
4799 				mutex_exit(&ptgt->tgt_mutex);
4800 			}
4801 			check_finish_init++;
4802 			break;
4803 
4804 		case PORT_DEVICE_USER_DELETE:
4805 		case PORT_DEVICE_USER_LOGOUT:
4806 			if (ptgt != NULL) {
4807 				FCP_TGT_TRACE(ptgt, map_tag[i],
4808 				    FCP_TGT_TRACE_4);
4809 
4810 				mutex_enter(&ptgt->tgt_mutex);
4811 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4812 					(void) fcp_offline_target(pptr, ptgt,
4813 					    link_cnt, map_tag[i], 1, 0);
4814 				}
4815 				mutex_exit(&ptgt->tgt_mutex);
4816 			}
4817 			check_finish_init++;
4818 			break;
4819 
4820 		case PORT_DEVICE_CHANGED:
4821 			if (ptgt != NULL) {
4822 				FCP_TGT_TRACE(ptgt, map_tag[i],
4823 				    FCP_TGT_TRACE_5);
4824 
4825 				if (fcp_device_changed(pptr, ptgt,
4826 				    map_entry, link_cnt, map_tag[i],
4827 				    cause) == TRUE) {
4828 					check_finish_init++;
4829 				}
4830 			} else {
4831 				if (fcp_handle_mapflags(pptr, ptgt,
4832 				    map_entry, link_cnt, 0, cause) == TRUE) {
4833 					check_finish_init++;
4834 				}
4835 			}
4836 			break;
4837 
4838 		default:
4839 			fcp_log(CE_WARN, pptr->port_dip,
4840 			    "!Invalid map_type=0x%x", map_entry->map_type);
4841 			check_finish_init++;
4842 			break;
4843 		}
4844 	}
4845 
4846 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4847 		ASSERT(i > 0);
4848 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4849 		    map_tag[i-1], cause);
4850 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4851 		fcp_offline_all(pptr, link_cnt, cause);
4852 	}
4853 }
4854 
4855 static int
4856 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4857 {
4858 	struct fcp_lun	*plun;
4859 	struct fcp_port *pptr;
4860 	int		 rscn_count;
4861 	int		 lun0_newalloc;
4862 	int		 ret  = TRUE;
4863 
4864 	ASSERT(ptgt);
4865 	pptr = ptgt->tgt_port;
4866 	lun0_newalloc = 0;
4867 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4868 		/*
4869 		 * no LUN struct for LUN 0 yet exists,
4870 		 * so create one
4871 		 */
4872 		plun = fcp_alloc_lun(ptgt);
4873 		if (plun == NULL) {
4874 			fcp_log(CE_WARN, pptr->port_dip,
4875 			    "!Failed to allocate lun 0 for"
4876 			    " D_ID=%x", ptgt->tgt_d_id);
4877 			return (ret);
4878 		}
4879 		lun0_newalloc = 1;
4880 	}
4881 
4882 	mutex_enter(&ptgt->tgt_mutex);
4883 	/*
4884 	 * consider lun 0 as device not connected if it is
4885 	 * offlined or newly allocated
4886 	 */
4887 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4888 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4889 	}
4890 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4891 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4892 	ptgt->tgt_lun_cnt = 1;
4893 	ptgt->tgt_report_lun_cnt = 0;
4894 	mutex_exit(&ptgt->tgt_mutex);
4895 
4896 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4897 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4898 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4899 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4900 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4901 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4902 		    "to D_ID=%x", ptgt->tgt_d_id);
4903 	} else {
4904 		ret = FALSE;
4905 	}
4906 
4907 	return (ret);
4908 }
4909 
4910 /*
4911  *     Function: fcp_handle_mapflags
4912  *
4913  *  Description: This function creates a target structure if the ptgt passed
4914  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4915  *		 into the target yet or the PRLI if we are logged into the
4916  *		 target already.  The rest of the treatment is done in the
4917  *		 callbacks of the PLOGI or PRLI.
4918  *
4919  *     Argument: *pptr		FCP Port structure.
4920  *		 *ptgt		Target structure.
4921  *		 *map_entry	Array of fc_portmap_t structures.
4922  *		 link_cnt	Link state count.
4923  *		 tgt_cnt	Target state count.
4924  *		 cause		What caused this function to be called.
4925  *
4926  * Return Value: TRUE	Failed
4927  *		 FALSE	Succeeded
4928  *
4929  *	  Notes: pptr->port_mutex must be owned.
4930  */
4931 static int
4932 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4933     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4934 {
4935 	int			lcount;
4936 	int			tcount;
4937 	int			ret = TRUE;
4938 	int			alloc;
4939 	struct fcp_ipkt	*icmd;
4940 	struct fcp_lun	*pseq_lun = NULL;
4941 	uchar_t			opcode;
4942 	int			valid_ptgt_was_passed = FALSE;
4943 
4944 	ASSERT(mutex_owned(&pptr->port_mutex));
4945 
4946 	/*
4947 	 * This case is possible where the FCTL has come up and done discovery
4948 	 * before FCP was loaded and attached. FCTL would have discovered the
4949 	 * devices and later the ULP came online. In this case ULP's would get
4950 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4951 	 */
4952 	if (ptgt == NULL) {
4953 		/* don't already have a target */
4954 		mutex_exit(&pptr->port_mutex);
4955 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4956 		mutex_enter(&pptr->port_mutex);
4957 
4958 		if (ptgt == NULL) {
4959 			fcp_log(CE_WARN, pptr->port_dip,
4960 			    "!FC target allocation failed");
4961 			return (ret);
4962 		}
4963 		mutex_enter(&ptgt->tgt_mutex);
4964 		ptgt->tgt_statec_cause = cause;
4965 		ptgt->tgt_tmp_cnt = 1;
4966 		mutex_exit(&ptgt->tgt_mutex);
4967 	} else {
4968 		valid_ptgt_was_passed = TRUE;
4969 	}
4970 
4971 	/*
4972 	 * Copy in the target parameters
4973 	 */
4974 	mutex_enter(&ptgt->tgt_mutex);
4975 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4976 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4977 	ptgt->tgt_pd_handle = map_entry->map_pd;
4978 	ptgt->tgt_fca_dev = NULL;
4979 
4980 	/* Copy port and node WWNs */
4981 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4982 	    FC_WWN_SIZE);
4983 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4984 	    FC_WWN_SIZE);
4985 
4986 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4987 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4988 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4989 	    valid_ptgt_was_passed) {
4990 		/*
4991 		 * determine if there are any tape LUNs on this target
4992 		 */
4993 		for (pseq_lun = ptgt->tgt_lun;
4994 		    pseq_lun != NULL;
4995 		    pseq_lun = pseq_lun->lun_next) {
4996 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4997 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4998 				fcp_update_tgt_state(ptgt, FCP_RESET,
4999 				    FCP_LUN_MARK);
5000 				mutex_exit(&ptgt->tgt_mutex);
5001 				return (ret);
5002 			}
5003 		}
5004 	}
5005 
5006 	/*
5007 	 * if UA'REPORT_LUN_CHANGED received,
5008 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5009 	 */
5010 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5011 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5012 		mutex_exit(&ptgt->tgt_mutex);
5013 		mutex_exit(&pptr->port_mutex);
5014 
5015 		ret = fcp_handle_reportlun_changed(ptgt, cause);
5016 
5017 		mutex_enter(&pptr->port_mutex);
5018 		return (ret);
5019 	}
5020 
5021 	/*
5022 	 * If ptgt was NULL when this function was entered, then tgt_node_state
5023 	 * was never specifically initialized but zeroed out which means
5024 	 * FCP_TGT_NODE_NONE.
5025 	 */
5026 	switch (ptgt->tgt_node_state) {
5027 	case FCP_TGT_NODE_NONE:
5028 	case FCP_TGT_NODE_ON_DEMAND:
5029 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5030 		    !fcp_enable_auto_configuration &&
5031 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5032 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5033 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5034 		    fcp_enable_auto_configuration &&
5035 		    (ptgt->tgt_manual_config_only == 1) &&
5036 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5037 			/*
5038 			 * If auto configuration is set and
5039 			 * the tgt_manual_config_only flag is set then
5040 			 * we only want the user to be able to change
5041 			 * the state through create_on_demand.
5042 			 */
5043 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5044 		} else {
5045 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5046 		}
5047 		break;
5048 
5049 	case FCP_TGT_NODE_PRESENT:
5050 		break;
5051 	}
5052 	/*
5053 	 * If we are booting from a fabric device, make sure we
5054 	 * mark the node state appropriately for this target to be
5055 	 * enumerated
5056 	 */
5057 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5058 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5059 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5060 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5061 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5062 		}
5063 	}
5064 	mutex_exit(&ptgt->tgt_mutex);
5065 
5066 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5067 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5068 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5069 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5070 	    map_entry->map_rscn_info.ulp_rscn_count);
5071 
5072 	mutex_enter(&ptgt->tgt_mutex);
5073 
5074 	/*
5075 	 * Reset target OFFLINE state and mark the target BUSY
5076 	 */
5077 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5078 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5079 
5080 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5081 	lcount = link_cnt;
5082 
5083 	mutex_exit(&ptgt->tgt_mutex);
5084 	mutex_exit(&pptr->port_mutex);
5085 
5086 	/*
5087 	 * if we are already logged in, then we do a PRLI, else
5088 	 * we do a PLOGI first (to get logged in)
5089 	 *
5090 	 * We will not check if we are the PLOGI initiator
5091 	 */
5092 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5093 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5094 
5095 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5096 
5097 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5098 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5099 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5100 
5101 	if (icmd == NULL) {
5102 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5103 		/*
5104 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5105 		 * we need to make sure we reacquire it before returning.
5106 		 */
5107 		mutex_enter(&pptr->port_mutex);
5108 		return (FALSE);
5109 	}
5110 
5111 	/* TRUE is only returned while target is intended skipped */
5112 	ret = FALSE;
5113 	/* discover info about this target */
5114 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5115 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5116 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5117 	} else {
5118 		fcp_icmd_free(pptr, icmd);
5119 		ret = TRUE;
5120 	}
5121 	mutex_enter(&pptr->port_mutex);
5122 
5123 	return (ret);
5124 }
5125 
5126 /*
5127  *     Function: fcp_send_els
5128  *
5129  *  Description: Sends an ELS to the target specified by the caller.  Supports
5130  *		 PLOGI and PRLI.
5131  *
5132  *     Argument: *pptr		Fcp port.
5133  *		 *ptgt		Target to send the ELS to.
5134  *		 *icmd		Internal packet
5135  *		 opcode		ELS opcode
5136  *		 lcount		Link state change counter
5137  *		 tcount		Target state change counter
5138  *		 cause		What caused the call
5139  *
5140  * Return Value: DDI_SUCCESS
5141  *		 Others
5142  */
5143 static int
5144 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5145     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5146 {
5147 	fc_packet_t		*fpkt;
5148 	fc_frame_hdr_t		*hp;
5149 	int			internal = 0;
5150 	int			alloc;
5151 	int			cmd_len;
5152 	int			resp_len;
5153 	int			res = DDI_FAILURE; /* default result */
5154 	int			rval = DDI_FAILURE;
5155 
5156 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5157 	ASSERT(ptgt->tgt_port == pptr);
5158 
5159 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5160 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5161 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5162 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5163 
5164 	if (opcode == LA_ELS_PLOGI) {
5165 		cmd_len = sizeof (la_els_logi_t);
5166 		resp_len = sizeof (la_els_logi_t);
5167 	} else {
5168 		ASSERT(opcode == LA_ELS_PRLI);
5169 		cmd_len = sizeof (la_els_prli_t);
5170 		resp_len = sizeof (la_els_prli_t);
5171 	}
5172 
5173 	if (icmd == NULL) {
5174 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5175 		    sizeof (la_els_prli_t));
5176 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5177 		    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5178 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5179 		if (icmd == NULL) {
5180 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5181 			return (res);
5182 		}
5183 		internal++;
5184 	}
5185 	fpkt = icmd->ipkt_fpkt;
5186 
5187 	fpkt->pkt_cmdlen = cmd_len;
5188 	fpkt->pkt_rsplen = resp_len;
5189 	fpkt->pkt_datalen = 0;
5190 	icmd->ipkt_retries = 0;
5191 
5192 	/* fill in fpkt info */
5193 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5194 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5195 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5196 
5197 	/* get ptr to frame hdr in fpkt */
5198 	hp = &fpkt->pkt_cmd_fhdr;
5199 
5200 	/*
5201 	 * fill in frame hdr
5202 	 */
5203 	hp->r_ctl = R_CTL_ELS_REQ;
5204 	hp->s_id = pptr->port_id;	/* source ID */
5205 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5206 	hp->type = FC_TYPE_EXTENDED_LS;
5207 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5208 	hp->seq_id = 0;
5209 	hp->rsvd = 0;
5210 	hp->df_ctl  = 0;
5211 	hp->seq_cnt = 0;
5212 	hp->ox_id = 0xffff;		/* i.e. none */
5213 	hp->rx_id = 0xffff;		/* i.e. none */
5214 	hp->ro = 0;
5215 
5216 	/*
5217 	 * at this point we have a filled in cmd pkt
5218 	 *
5219 	 * fill in the respective info, then use the transport to send
5220 	 * the packet
5221 	 *
5222 	 * for a PLOGI call fc_ulp_login(), and
5223 	 * for a PRLI call fc_ulp_issue_els()
5224 	 */
5225 	switch (opcode) {
5226 	case LA_ELS_PLOGI: {
5227 		struct la_els_logi logi;
5228 
5229 		bzero(&logi, sizeof (struct la_els_logi));
5230 
5231 		hp = &fpkt->pkt_cmd_fhdr;
5232 		hp->r_ctl = R_CTL_ELS_REQ;
5233 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5234 		logi.ls_code.mbz = 0;
5235 
5236 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5237 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5238 
5239 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5240 
5241 		mutex_enter(&pptr->port_mutex);
5242 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5243 
5244 			mutex_exit(&pptr->port_mutex);
5245 
5246 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5247 			if (rval == FC_SUCCESS) {
5248 				res = DDI_SUCCESS;
5249 				break;
5250 			}
5251 
5252 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5253 
5254 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5255 			    rval, "PLOGI");
5256 		} else {
5257 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5258 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5259 			    "fcp_send_els1: state change occured"
5260 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5261 			mutex_exit(&pptr->port_mutex);
5262 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5263 		}
5264 		break;
5265 	}
5266 
5267 	case LA_ELS_PRLI: {
5268 		struct la_els_prli	prli;
5269 		struct fcp_prli		*fprli;
5270 
5271 		bzero(&prli, sizeof (struct la_els_prli));
5272 
5273 		hp = &fpkt->pkt_cmd_fhdr;
5274 		hp->r_ctl = R_CTL_ELS_REQ;
5275 
5276 		/* fill in PRLI cmd ELS fields */
5277 		prli.ls_code = LA_ELS_PRLI;
5278 		prli.page_length = 0x10;	/* huh? */
5279 		prli.payload_length = sizeof (struct la_els_prli);
5280 
5281 		icmd->ipkt_opcode = LA_ELS_PRLI;
5282 
5283 		/* get ptr to PRLI service params */
5284 		fprli = (struct fcp_prli *)prli.service_params;
5285 
5286 		/* fill in service params */
5287 		fprli->type = 0x08;
5288 		fprli->resvd1 = 0;
5289 		fprli->orig_process_assoc_valid = 0;
5290 		fprli->resp_process_assoc_valid = 0;
5291 		fprli->establish_image_pair = 1;
5292 		fprli->resvd2 = 0;
5293 		fprli->resvd3 = 0;
5294 		fprli->obsolete_1 = 0;
5295 		fprli->obsolete_2 = 0;
5296 		fprli->data_overlay_allowed = 0;
5297 		fprli->initiator_fn = 1;
5298 		fprli->confirmed_compl_allowed = 1;
5299 
5300 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5301 			fprli->target_fn = 1;
5302 		} else {
5303 			fprli->target_fn = 0;
5304 		}
5305 
5306 		fprli->retry = 1;
5307 		fprli->read_xfer_rdy_disabled = 1;
5308 		fprli->write_xfer_rdy_disabled = 0;
5309 
5310 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5311 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5312 
5313 		/* issue the PRLI request */
5314 
5315 		mutex_enter(&pptr->port_mutex);
5316 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5317 
5318 			mutex_exit(&pptr->port_mutex);
5319 
5320 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5321 			if (rval == FC_SUCCESS) {
5322 				res = DDI_SUCCESS;
5323 				break;
5324 			}
5325 
5326 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5327 
5328 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5329 			    rval, "PRLI");
5330 		} else {
5331 			mutex_exit(&pptr->port_mutex);
5332 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5333 		}
5334 		break;
5335 	}
5336 
5337 	default:
5338 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5339 		break;
5340 	}
5341 
5342 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5343 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5344 	    "fcp_send_els: returning %d", res);
5345 
5346 	if (res != DDI_SUCCESS) {
5347 		if (internal) {
5348 			fcp_icmd_free(pptr, icmd);
5349 		}
5350 	}
5351 
5352 	return (res);
5353 }
5354 
5355 
5356 /*
5357  * called internally update the state of all of the tgts and each LUN
5358  * for this port (i.e. each target  known to be attached to this port)
5359  * if they are not already offline
5360  *
5361  * must be called with the port mutex owned
5362  *
5363  * acquires and releases the target mutexes for each target attached
5364  * to this port
5365  */
5366 void
5367 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5368 {
5369 	int i;
5370 	struct fcp_tgt *ptgt;
5371 
5372 	ASSERT(mutex_owned(&pptr->port_mutex));
5373 
5374 	for (i = 0; i < FCP_NUM_HASH; i++) {
5375 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5376 		    ptgt = ptgt->tgt_next) {
5377 			mutex_enter(&ptgt->tgt_mutex);
5378 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5379 			ptgt->tgt_change_cnt++;
5380 			ptgt->tgt_statec_cause = cause;
5381 			ptgt->tgt_tmp_cnt = 1;
5382 			ptgt->tgt_done = 0;
5383 			mutex_exit(&ptgt->tgt_mutex);
5384 		}
5385 	}
5386 }
5387 
5388 
5389 static void
5390 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5391 {
5392 	int i;
5393 	int ndevs;
5394 	struct fcp_tgt *ptgt;
5395 
5396 	ASSERT(mutex_owned(&pptr->port_mutex));
5397 
5398 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5399 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5400 		    ptgt = ptgt->tgt_next) {
5401 			ndevs++;
5402 		}
5403 	}
5404 
5405 	if (ndevs == 0) {
5406 		return;
5407 	}
5408 	pptr->port_tmp_cnt = ndevs;
5409 
5410 	for (i = 0; i < FCP_NUM_HASH; i++) {
5411 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5412 		    ptgt = ptgt->tgt_next) {
5413 			(void) fcp_call_finish_init_held(pptr, ptgt,
5414 			    lcount, ptgt->tgt_change_cnt, cause);
5415 		}
5416 	}
5417 }
5418 
5419 /*
5420  *     Function: fcp_update_tgt_state
5421  *
5422  *  Description: This function updates the field tgt_state of a target.	 That
5423  *		 field is a bitmap and which bit can be set or reset
5424  *		 individually.	The action applied to the target state is also
5425  *		 applied to all the LUNs belonging to the target (provided the
5426  *		 LUN is not offline).  A side effect of applying the state
5427  *		 modification to the target and the LUNs is the field tgt_trace
5428  *		 of the target and lun_trace of the LUNs is set to zero.
5429  *
5430  *
5431  *     Argument: *ptgt	Target structure.
5432  *		 flag	Flag indication what action to apply (set/reset).
5433  *		 state	State bits to update.
5434  *
5435  * Return Value: None
5436  *
5437  *	Context: Interrupt, Kernel or User context.
5438  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5439  *		 calling this function.
5440  */
5441 void
5442 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5443 {
5444 	struct fcp_lun *plun;
5445 
5446 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5447 
5448 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5449 		/* The target is not offline. */
5450 		if (flag == FCP_SET) {
5451 			ptgt->tgt_state |= state;
5452 			ptgt->tgt_trace = 0;
5453 		} else {
5454 			ptgt->tgt_state &= ~state;
5455 		}
5456 
5457 		for (plun = ptgt->tgt_lun; plun != NULL;
5458 		    plun = plun->lun_next) {
5459 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5460 				/* The LUN is not offline. */
5461 				if (flag == FCP_SET) {
5462 					plun->lun_state |= state;
5463 					plun->lun_trace = 0;
5464 				} else {
5465 					plun->lun_state &= ~state;
5466 				}
5467 			}
5468 		}
5469 	}
5470 }
5471 
5472 /*
5473  *     Function: fcp_update_tgt_state
5474  *
5475  *  Description: This function updates the field lun_state of a LUN.  That
5476  *		 field is a bitmap and which bit can be set or reset
5477  *		 individually.
5478  *
5479  *     Argument: *plun	LUN structure.
5480  *		 flag	Flag indication what action to apply (set/reset).
5481  *		 state	State bits to update.
5482  *
5483  * Return Value: None
5484  *
5485  *	Context: Interrupt, Kernel or User context.
5486  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5487  *		 calling this function.
5488  */
5489 void
5490 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5491 {
5492 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5493 
5494 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5495 
5496 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5497 		if (flag == FCP_SET) {
5498 			plun->lun_state |= state;
5499 		} else {
5500 			plun->lun_state &= ~state;
5501 		}
5502 	}
5503 }
5504 
5505 /*
5506  *     Function: fcp_get_port
5507  *
5508  *  Description: This function returns the fcp_port structure from the opaque
5509  *		 handle passed by the caller.  That opaque handle is the handle
5510  *		 used by fp/fctl to identify a particular local port.  That
5511  *		 handle has been stored in the corresponding fcp_port
5512  *		 structure.  This function is going to walk the global list of
5513  *		 fcp_port structures till one has a port_fp_handle that matches
5514  *		 the handle passed by the caller.  This function enters the
5515  *		 mutex fcp_global_mutex while walking the global list and then
5516  *		 releases it.
5517  *
5518  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5519  *				particular port.
5520  *
5521  * Return Value: NULL		Not found.
5522  *		 Not NULL	Pointer to the fcp_port structure.
5523  *
5524  *	Context: Interrupt, Kernel or User context.
5525  */
5526 static struct fcp_port *
5527 fcp_get_port(opaque_t port_handle)
5528 {
5529 	struct fcp_port *pptr;
5530 
5531 	ASSERT(port_handle != NULL);
5532 
5533 	mutex_enter(&fcp_global_mutex);
5534 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5535 		if (pptr->port_fp_handle == port_handle) {
5536 			break;
5537 		}
5538 	}
5539 	mutex_exit(&fcp_global_mutex);
5540 
5541 	return (pptr);
5542 }
5543 
5544 
5545 static void
5546 fcp_unsol_callback(fc_packet_t *fpkt)
5547 {
5548 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5549 	struct fcp_port *pptr = icmd->ipkt_port;
5550 
5551 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5552 		caddr_t state, reason, action, expln;
5553 
5554 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5555 		    &action, &expln);
5556 
5557 		fcp_log(CE_WARN, pptr->port_dip,
5558 		    "!couldn't post response to unsolicited request: "
5559 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5560 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5561 		    fpkt->pkt_cmd_fhdr.rx_id);
5562 	}
5563 	fcp_icmd_free(pptr, icmd);
5564 }
5565 
5566 
5567 /*
5568  * Perform general purpose preparation of a response to an unsolicited request
5569  */
5570 static void
5571 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5572     uchar_t r_ctl, uchar_t type)
5573 {
5574 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5575 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5576 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5577 	pkt->pkt_cmd_fhdr.type = type;
5578 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5579 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5580 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5581 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5582 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5583 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5584 	pkt->pkt_cmd_fhdr.ro = 0;
5585 	pkt->pkt_cmd_fhdr.rsvd = 0;
5586 	pkt->pkt_comp = fcp_unsol_callback;
5587 	pkt->pkt_pd = NULL;
5588 	pkt->pkt_ub_resp_token = (opaque_t)buf;
5589 }
5590 
5591 
5592 /*ARGSUSED*/
5593 static int
5594 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5595 {
5596 	fc_packet_t		*fpkt;
5597 	struct la_els_prli	prli;
5598 	struct fcp_prli		*fprli;
5599 	struct fcp_ipkt	*icmd;
5600 	struct la_els_prli	*from;
5601 	struct fcp_prli		*orig;
5602 	struct fcp_tgt	*ptgt;
5603 	int			tcount = 0;
5604 	int			lcount;
5605 
5606 	from = (struct la_els_prli *)buf->ub_buffer;
5607 	orig = (struct fcp_prli *)from->service_params;
5608 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5609 	    NULL) {
5610 		mutex_enter(&ptgt->tgt_mutex);
5611 		tcount = ptgt->tgt_change_cnt;
5612 		mutex_exit(&ptgt->tgt_mutex);
5613 	}
5614 
5615 	mutex_enter(&pptr->port_mutex);
5616 	lcount = pptr->port_link_cnt;
5617 	mutex_exit(&pptr->port_mutex);
5618 
5619 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5620 	    sizeof (la_els_prli_t), 0,
5621 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5622 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5623 		return (FC_FAILURE);
5624 	}
5625 
5626 	fpkt = icmd->ipkt_fpkt;
5627 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5628 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5629 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5630 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5631 	fpkt->pkt_rsplen = 0;
5632 	fpkt->pkt_datalen = 0;
5633 
5634 	icmd->ipkt_opcode = LA_ELS_PRLI;
5635 
5636 	bzero(&prli, sizeof (struct la_els_prli));
5637 	fprli = (struct fcp_prli *)prli.service_params;
5638 	prli.ls_code = LA_ELS_ACC;
5639 	prli.page_length = 0x10;
5640 	prli.payload_length = sizeof (struct la_els_prli);
5641 
5642 	/* fill in service params */
5643 	fprli->type = 0x08;
5644 	fprli->resvd1 = 0;
5645 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5646 	fprli->orig_process_associator = orig->orig_process_associator;
5647 	fprli->resp_process_assoc_valid = 0;
5648 	fprli->establish_image_pair = 1;
5649 	fprli->resvd2 = 0;
5650 	fprli->resvd3 = 0;
5651 	fprli->obsolete_1 = 0;
5652 	fprli->obsolete_2 = 0;
5653 	fprli->data_overlay_allowed = 0;
5654 	fprli->initiator_fn = 1;
5655 	fprli->confirmed_compl_allowed = 1;
5656 
5657 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5658 		fprli->target_fn = 1;
5659 	} else {
5660 		fprli->target_fn = 0;
5661 	}
5662 
5663 	fprli->retry = 1;
5664 	fprli->read_xfer_rdy_disabled = 1;
5665 	fprli->write_xfer_rdy_disabled = 0;
5666 
5667 	/* save the unsol prli payload first */
5668 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5669 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5670 
5671 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5672 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5673 
5674 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5675 
5676 	mutex_enter(&pptr->port_mutex);
5677 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5678 		int rval;
5679 		mutex_exit(&pptr->port_mutex);
5680 
5681 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5682 		    FC_SUCCESS) {
5683 			if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5684 			    ptgt != NULL) {
5685 				fcp_queue_ipkt(pptr, fpkt);
5686 				return (FC_SUCCESS);
5687 			}
5688 			/* Let it timeout */
5689 			fcp_icmd_free(pptr, icmd);
5690 			return (FC_FAILURE);
5691 		}
5692 	} else {
5693 		mutex_exit(&pptr->port_mutex);
5694 		fcp_icmd_free(pptr, icmd);
5695 		return (FC_FAILURE);
5696 	}
5697 
5698 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5699 
5700 	return (FC_SUCCESS);
5701 }
5702 
5703 /*
5704  *     Function: fcp_icmd_alloc
5705  *
5706  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5707  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5708  *		 modified by the caller (such as fcp_send_scsi).  The
5709  *		 structure is also tied to the state of the line and of the
5710  *		 target at a particular time.  That link is established by
5711  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5712  *		 and tcount which came respectively from pptr->link_cnt and
5713  *		 ptgt->tgt_change_cnt.
5714  *
5715  *     Argument: *pptr		Fcp port.
5716  *		 *ptgt		Target (destination of the command).
5717  *		 cmd_len	Length of the command.
5718  *		 resp_len	Length of the expected response.
5719  *		 data_len	Length of the data.
5720  *		 nodma		Indicates weither the command and response.
5721  *				will be transfer through DMA or not.
5722  *		 lcount		Link state change counter.
5723  *		 tcount		Target state change counter.
5724  *		 cause		Reason that lead to this call.
5725  *
5726  * Return Value: NULL		Failed.
5727  *		 Not NULL	Internal packet address.
5728  */
5729 static struct fcp_ipkt *
5730 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5731     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5732     uint32_t rscn_count)
5733 {
5734 	int			dma_setup = 0;
5735 	fc_packet_t		*fpkt;
5736 	struct fcp_ipkt	*icmd = NULL;
5737 
5738 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5739 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5740 	    KM_NOSLEEP);
5741 	if (icmd == NULL) {
5742 		fcp_log(CE_WARN, pptr->port_dip,
5743 		    "!internal packet allocation failed");
5744 		return (NULL);
5745 	}
5746 
5747 	/*
5748 	 * initialize the allocated packet
5749 	 */
5750 	icmd->ipkt_nodma = nodma;
5751 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5752 	icmd->ipkt_lun = NULL;
5753 
5754 	icmd->ipkt_link_cnt = lcount;
5755 	icmd->ipkt_change_cnt = tcount;
5756 	icmd->ipkt_cause = cause;
5757 
5758 	mutex_enter(&pptr->port_mutex);
5759 	icmd->ipkt_port = pptr;
5760 	mutex_exit(&pptr->port_mutex);
5761 
5762 	/* keep track of amt of data to be sent in pkt */
5763 	icmd->ipkt_cmdlen = cmd_len;
5764 	icmd->ipkt_resplen = resp_len;
5765 	icmd->ipkt_datalen = data_len;
5766 
5767 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5768 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5769 
5770 	/* set pkt's private ptr to point to cmd pkt */
5771 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5772 
5773 	/* set FCA private ptr to memory just beyond */
5774 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5775 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5776 	    pptr->port_dmacookie_sz);
5777 
5778 	/* get ptr to fpkt substruct and fill it in */
5779 	fpkt = icmd->ipkt_fpkt;
5780 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5781 	    sizeof (struct fcp_ipkt));
5782 
5783 	if (ptgt != NULL) {
5784 		icmd->ipkt_tgt = ptgt;
5785 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5786 	}
5787 
5788 	fpkt->pkt_comp = fcp_icmd_callback;
5789 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5790 	fpkt->pkt_cmdlen = cmd_len;
5791 	fpkt->pkt_rsplen = resp_len;
5792 	fpkt->pkt_datalen = data_len;
5793 
5794 	/*
5795 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5796 	 * rscn_count as fcp knows down to the transport. If a valid count was
5797 	 * passed into this function, we allocate memory to actually pass down
5798 	 * this info.
5799 	 *
5800 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5801 	 * basically mean that fcp will not be able to help transport
5802 	 * distinguish if a new RSCN has come after fcp was last informed about
5803 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5804 	 * 5068068 where the device might end up going offline in case of RSCN
5805 	 * storms.
5806 	 */
5807 	fpkt->pkt_ulp_rscn_infop = NULL;
5808 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5809 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5810 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5811 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5812 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5813 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5814 			    "Failed to alloc memory to pass rscn info");
5815 		}
5816 	}
5817 
5818 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5819 		fc_ulp_rscn_info_t	*rscnp;
5820 
5821 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5822 		rscnp->ulp_rscn_count = rscn_count;
5823 	}
5824 
5825 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5826 		goto fail;
5827 	}
5828 	dma_setup++;
5829 
5830 	/*
5831 	 * Must hold target mutex across setting of pkt_pd and call to
5832 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5833 	 * away while we're not looking.
5834 	 */
5835 	if (ptgt != NULL) {
5836 		mutex_enter(&ptgt->tgt_mutex);
5837 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5838 
5839 		/* ask transport to do its initialization on this pkt */
5840 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5841 		    != FC_SUCCESS) {
5842 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5843 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5844 			    "fc_ulp_init_packet failed");
5845 			mutex_exit(&ptgt->tgt_mutex);
5846 			goto fail;
5847 		}
5848 		mutex_exit(&ptgt->tgt_mutex);
5849 	} else {
5850 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5851 		    != FC_SUCCESS) {
5852 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5853 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5854 			    "fc_ulp_init_packet failed");
5855 			goto fail;
5856 		}
5857 	}
5858 
5859 	mutex_enter(&pptr->port_mutex);
5860 	if (pptr->port_state & (FCP_STATE_DETACHING |
5861 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5862 		int rval;
5863 
5864 		mutex_exit(&pptr->port_mutex);
5865 
5866 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5867 		ASSERT(rval == FC_SUCCESS);
5868 
5869 		goto fail;
5870 	}
5871 
5872 	if (ptgt != NULL) {
5873 		mutex_enter(&ptgt->tgt_mutex);
5874 		ptgt->tgt_ipkt_cnt++;
5875 		mutex_exit(&ptgt->tgt_mutex);
5876 	}
5877 
5878 	pptr->port_ipkt_cnt++;
5879 
5880 	mutex_exit(&pptr->port_mutex);
5881 
5882 	return (icmd);
5883 
5884 fail:
5885 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5886 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5887 		    sizeof (fc_ulp_rscn_info_t));
5888 		fpkt->pkt_ulp_rscn_infop = NULL;
5889 	}
5890 
5891 	if (dma_setup) {
5892 		fcp_free_dma(pptr, icmd);
5893 	}
5894 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5895 	    (size_t)pptr->port_dmacookie_sz);
5896 
5897 	return (NULL);
5898 }
5899 
5900 /*
5901  *     Function: fcp_icmd_free
5902  *
5903  *  Description: Frees the internal command passed by the caller.
5904  *
5905  *     Argument: *pptr		Fcp port.
5906  *		 *icmd		Internal packet to free.
5907  *
5908  * Return Value: None
5909  */
5910 static void
5911 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5912 {
5913 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5914 
5915 	/* Let the underlying layers do their cleanup. */
5916 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5917 	    icmd->ipkt_fpkt);
5918 
5919 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5920 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5921 		    sizeof (fc_ulp_rscn_info_t));
5922 	}
5923 
5924 	fcp_free_dma(pptr, icmd);
5925 
5926 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5927 	    (size_t)pptr->port_dmacookie_sz);
5928 
5929 	mutex_enter(&pptr->port_mutex);
5930 
5931 	if (ptgt) {
5932 		mutex_enter(&ptgt->tgt_mutex);
5933 		ptgt->tgt_ipkt_cnt--;
5934 		mutex_exit(&ptgt->tgt_mutex);
5935 	}
5936 
5937 	pptr->port_ipkt_cnt--;
5938 	mutex_exit(&pptr->port_mutex);
5939 }
5940 
5941 /*
5942  *     Function: fcp_alloc_dma
5943  *
5944  *  Description: Allocated the DMA resources required for the internal
5945  *		 packet.
5946  *
5947  *     Argument: *pptr	FCP port.
5948  *		 *icmd	Internal FCP packet.
5949  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5950  *		 flags	Allocation flags (Sleep or NoSleep).
5951  *
5952  * Return Value: FC_SUCCESS
5953  *		 FC_NOMEM
5954  */
5955 static int
5956 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5957     int nodma, int flags)
5958 {
5959 	int		rval;
5960 	size_t		real_size;
5961 	uint_t		ccount;
5962 	int		bound = 0;
5963 	int		cmd_resp = 0;
5964 	fc_packet_t	*fpkt;
5965 	ddi_dma_cookie_t	pkt_data_cookie;
5966 	ddi_dma_cookie_t	*cp;
5967 	uint32_t		cnt;
5968 
5969 	fpkt = &icmd->ipkt_fc_packet;
5970 
5971 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5972 	    fpkt->pkt_resp_dma == NULL);
5973 
5974 	icmd->ipkt_nodma = nodma;
5975 
5976 	if (nodma) {
5977 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5978 		if (fpkt->pkt_cmd == NULL) {
5979 			goto fail;
5980 		}
5981 
5982 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5983 		if (fpkt->pkt_resp == NULL) {
5984 			goto fail;
5985 		}
5986 	} else {
5987 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5988 
5989 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5990 		if (rval == FC_FAILURE) {
5991 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5992 			    fpkt->pkt_resp_dma == NULL);
5993 			goto fail;
5994 		}
5995 		cmd_resp++;
5996 	}
5997 
5998 	if ((fpkt->pkt_datalen != 0) &&
5999 	    !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6000 		/*
6001 		 * set up DMA handle and memory for the data in this packet
6002 		 */
6003 		if (ddi_dma_alloc_handle(pptr->port_dip,
6004 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6005 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6006 			goto fail;
6007 		}
6008 
6009 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6010 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6011 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6012 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6013 			goto fail;
6014 		}
6015 
6016 		/* was DMA mem size gotten < size asked for/needed ?? */
6017 		if (real_size < fpkt->pkt_datalen) {
6018 			goto fail;
6019 		}
6020 
6021 		/* bind DMA address and handle together */
6022 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6023 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6024 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6025 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6026 			goto fail;
6027 		}
6028 		bound++;
6029 
6030 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6031 			goto fail;
6032 		}
6033 
6034 		fpkt->pkt_data_cookie_cnt = ccount;
6035 
6036 		cp = fpkt->pkt_data_cookie;
6037 		*cp = pkt_data_cookie;
6038 		cp++;
6039 
6040 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
6041 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
6042 			    &pkt_data_cookie);
6043 			*cp = pkt_data_cookie;
6044 		}
6045 
6046 	} else if (fpkt->pkt_datalen != 0) {
6047 		/*
6048 		 * If it's a pseudo FCA, then it can't support DMA even in
6049 		 * SCSI data phase.
6050 		 */
6051 		fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6052 		if (fpkt->pkt_data == NULL) {
6053 			goto fail;
6054 		}
6055 
6056 	}
6057 
6058 	return (FC_SUCCESS);
6059 
6060 fail:
6061 	if (bound) {
6062 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6063 	}
6064 
6065 	if (fpkt->pkt_data_dma) {
6066 		if (fpkt->pkt_data) {
6067 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6068 		}
6069 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6070 	} else {
6071 		if (fpkt->pkt_data) {
6072 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6073 		}
6074 	}
6075 
6076 	if (nodma) {
6077 		if (fpkt->pkt_cmd) {
6078 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6079 		}
6080 		if (fpkt->pkt_resp) {
6081 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6082 		}
6083 	} else {
6084 		if (cmd_resp) {
6085 			fcp_free_cmd_resp(pptr, fpkt);
6086 		}
6087 	}
6088 
6089 	return (FC_NOMEM);
6090 }
6091 
6092 
6093 static void
6094 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6095 {
6096 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6097 
6098 	if (fpkt->pkt_data_dma) {
6099 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6100 		if (fpkt->pkt_data) {
6101 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6102 		}
6103 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6104 	} else {
6105 		if (fpkt->pkt_data) {
6106 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6107 		}
6108 		/*
6109 		 * Need we reset pkt_* to zero???
6110 		 */
6111 	}
6112 
6113 	if (icmd->ipkt_nodma) {
6114 		if (fpkt->pkt_cmd) {
6115 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6116 		}
6117 		if (fpkt->pkt_resp) {
6118 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6119 		}
6120 	} else {
6121 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6122 
6123 		fcp_free_cmd_resp(pptr, fpkt);
6124 	}
6125 }
6126 
6127 /*
6128  *     Function: fcp_lookup_target
6129  *
6130  *  Description: Finds a target given a WWN.
6131  *
6132  *     Argument: *pptr	FCP port.
6133  *		 *wwn	World Wide Name of the device to look for.
6134  *
6135  * Return Value: NULL		No target found
6136  *		 Not NULL	Target structure
6137  *
6138  *	Context: Interrupt context.
6139  *		 The mutex pptr->port_mutex must be owned.
6140  */
6141 /* ARGSUSED */
6142 static struct fcp_tgt *
6143 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6144 {
6145 	int			hash;
6146 	struct fcp_tgt	*ptgt;
6147 
6148 	ASSERT(mutex_owned(&pptr->port_mutex));
6149 
6150 	hash = FCP_HASH(wwn);
6151 
6152 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6153 	    ptgt = ptgt->tgt_next) {
6154 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6155 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6156 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6157 			break;
6158 		}
6159 	}
6160 
6161 	return (ptgt);
6162 }
6163 
6164 
6165 /*
6166  * Find target structure given a port identifier
6167  */
6168 static struct fcp_tgt *
6169 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6170 {
6171 	fc_portid_t		port_id;
6172 	la_wwn_t		pwwn;
6173 	struct fcp_tgt	*ptgt = NULL;
6174 
6175 	port_id.priv_lilp_posit = 0;
6176 	port_id.port_id = d_id;
6177 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6178 	    &pwwn) == FC_SUCCESS) {
6179 		mutex_enter(&pptr->port_mutex);
6180 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6181 		mutex_exit(&pptr->port_mutex);
6182 	}
6183 
6184 	return (ptgt);
6185 }
6186 
6187 
6188 /*
6189  * the packet completion callback routine for info cmd pkts
6190  *
6191  * this means fpkt pts to a response to either a PLOGI or a PRLI
6192  *
6193  * if there is an error an attempt is made to call a routine to resend
6194  * the command that failed
6195  */
6196 static void
6197 fcp_icmd_callback(fc_packet_t *fpkt)
6198 {
6199 	struct fcp_ipkt	*icmd;
6200 	struct fcp_port	*pptr;
6201 	struct fcp_tgt	*ptgt;
6202 	struct la_els_prli	*prli;
6203 	struct la_els_prli	prli_s;
6204 	struct fcp_prli		*fprli;
6205 	struct fcp_lun	*plun;
6206 	int		free_pkt = 1;
6207 	int		rval;
6208 	ls_code_t	resp;
6209 	uchar_t		prli_acc = 0;
6210 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6211 	int		lun0_newalloc;
6212 
6213 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6214 
6215 	/* get ptrs to the port and target structs for the cmd */
6216 	pptr = icmd->ipkt_port;
6217 	ptgt = icmd->ipkt_tgt;
6218 
6219 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6220 
6221 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6222 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6223 		    sizeof (prli_s));
6224 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6225 	}
6226 
6227 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6228 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6229 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6230 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6231 	    ptgt->tgt_d_id);
6232 
6233 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6234 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6235 
6236 		mutex_enter(&ptgt->tgt_mutex);
6237 		if (ptgt->tgt_pd_handle == NULL) {
6238 			/*
6239 			 * in a fabric environment the port device handles
6240 			 * get created only after successful LOGIN into the
6241 			 * transport, so the transport makes this port
6242 			 * device (pd) handle available in this packet, so
6243 			 * save it now
6244 			 */
6245 			ASSERT(fpkt->pkt_pd != NULL);
6246 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6247 		}
6248 		mutex_exit(&ptgt->tgt_mutex);
6249 
6250 		/* which ELS cmd is this response for ?? */
6251 		switch (icmd->ipkt_opcode) {
6252 		case LA_ELS_PLOGI:
6253 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6254 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6255 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6256 			    ptgt->tgt_d_id,
6257 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6258 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6259 
6260 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6261 			    FCP_TGT_TRACE_15);
6262 
6263 			/* Note that we are not allocating a new icmd */
6264 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6265 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6266 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6267 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6268 				    FCP_TGT_TRACE_16);
6269 				goto fail;
6270 			}
6271 			break;
6272 
6273 		case LA_ELS_PRLI:
6274 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6275 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6276 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6277 
6278 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6279 			    FCP_TGT_TRACE_17);
6280 
6281 			prli = &prli_s;
6282 
6283 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6284 			    sizeof (prli_s));
6285 
6286 			fprli = (struct fcp_prli *)prli->service_params;
6287 
6288 			mutex_enter(&ptgt->tgt_mutex);
6289 			ptgt->tgt_icap = fprli->initiator_fn;
6290 			ptgt->tgt_tcap = fprli->target_fn;
6291 			mutex_exit(&ptgt->tgt_mutex);
6292 
6293 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6294 				/*
6295 				 * this FCP device does not support target mode
6296 				 */
6297 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6298 				    FCP_TGT_TRACE_18);
6299 				goto fail;
6300 			}
6301 			if (fprli->retry == 1) {
6302 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6303 				    &ptgt->tgt_port_wwn);
6304 			}
6305 
6306 			/* target is no longer offline */
6307 			mutex_enter(&pptr->port_mutex);
6308 			mutex_enter(&ptgt->tgt_mutex);
6309 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6310 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6311 				    FCP_TGT_MARK);
6312 			} else {
6313 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6314 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6315 				    "fcp_icmd_callback,1: state change "
6316 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6317 				mutex_exit(&ptgt->tgt_mutex);
6318 				mutex_exit(&pptr->port_mutex);
6319 				goto fail;
6320 			}
6321 			mutex_exit(&ptgt->tgt_mutex);
6322 			mutex_exit(&pptr->port_mutex);
6323 
6324 			/*
6325 			 * lun 0 should always respond to inquiry, so
6326 			 * get the LUN struct for LUN 0
6327 			 *
6328 			 * Currently we deal with first level of addressing.
6329 			 * If / when we start supporting 0x device types
6330 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6331 			 * this logic will need revisiting.
6332 			 */
6333 			lun0_newalloc = 0;
6334 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6335 				/*
6336 				 * no LUN struct for LUN 0 yet exists,
6337 				 * so create one
6338 				 */
6339 				plun = fcp_alloc_lun(ptgt);
6340 				if (plun == NULL) {
6341 					fcp_log(CE_WARN, pptr->port_dip,
6342 					    "!Failed to allocate lun 0 for"
6343 					    " D_ID=%x", ptgt->tgt_d_id);
6344 					goto fail;
6345 				}
6346 				lun0_newalloc = 1;
6347 			}
6348 
6349 			/* fill in LUN info */
6350 			mutex_enter(&ptgt->tgt_mutex);
6351 			/*
6352 			 * consider lun 0 as device not connected if it is
6353 			 * offlined or newly allocated
6354 			 */
6355 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6356 			    lun0_newalloc) {
6357 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6358 			}
6359 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6360 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6361 			ptgt->tgt_lun_cnt = 1;
6362 			ptgt->tgt_report_lun_cnt = 0;
6363 			mutex_exit(&ptgt->tgt_mutex);
6364 
6365 			/* Retrieve the rscn count (if a valid one exists) */
6366 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6367 				rscn_count = ((fc_ulp_rscn_info_t *)
6368 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6369 				    ->ulp_rscn_count;
6370 			} else {
6371 				rscn_count = FC_INVALID_RSCN_COUNT;
6372 			}
6373 
6374 			/* send Report Lun request to target */
6375 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6376 			    sizeof (struct fcp_reportlun_resp),
6377 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6378 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6379 				mutex_enter(&pptr->port_mutex);
6380 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6381 					fcp_log(CE_WARN, pptr->port_dip,
6382 					    "!Failed to send REPORT LUN to"
6383 					    "  D_ID=%x", ptgt->tgt_d_id);
6384 				} else {
6385 					FCP_TRACE(fcp_logq,
6386 					    pptr->port_instbuf, fcp_trace,
6387 					    FCP_BUF_LEVEL_5, 0,
6388 					    "fcp_icmd_callback,2:state change"
6389 					    " occured for D_ID=0x%x",
6390 					    ptgt->tgt_d_id);
6391 				}
6392 				mutex_exit(&pptr->port_mutex);
6393 
6394 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6395 				    FCP_TGT_TRACE_19);
6396 
6397 				goto fail;
6398 			} else {
6399 				free_pkt = 0;
6400 				fcp_icmd_free(pptr, icmd);
6401 			}
6402 			break;
6403 
6404 		default:
6405 			fcp_log(CE_WARN, pptr->port_dip,
6406 			    "!fcp_icmd_callback Invalid opcode");
6407 			goto fail;
6408 		}
6409 
6410 		return;
6411 	}
6412 
6413 
6414 	/*
6415 	 * Other PLOGI failures are not retried as the
6416 	 * transport does it already
6417 	 */
6418 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6419 		if (fcp_is_retryable(icmd) &&
6420 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6421 
6422 			if (FCP_MUST_RETRY(fpkt)) {
6423 				fcp_queue_ipkt(pptr, fpkt);
6424 				return;
6425 			}
6426 
6427 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6428 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6429 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6430 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6431 			    fpkt->pkt_reason);
6432 
6433 			/*
6434 			 * Retry by recalling the routine that
6435 			 * originally queued this packet
6436 			 */
6437 			mutex_enter(&pptr->port_mutex);
6438 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6439 				caddr_t msg;
6440 
6441 				mutex_exit(&pptr->port_mutex);
6442 
6443 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6444 
6445 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6446 					fpkt->pkt_timeout +=
6447 					    FCP_TIMEOUT_DELTA;
6448 				}
6449 
6450 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6451 				    fpkt);
6452 				if (rval == FC_SUCCESS) {
6453 					return;
6454 				}
6455 
6456 				if (rval == FC_STATEC_BUSY ||
6457 				    rval == FC_OFFLINE) {
6458 					fcp_queue_ipkt(pptr, fpkt);
6459 					return;
6460 				}
6461 				(void) fc_ulp_error(rval, &msg);
6462 
6463 				fcp_log(CE_NOTE, pptr->port_dip,
6464 				    "!ELS 0x%x failed to d_id=0x%x;"
6465 				    " %s", icmd->ipkt_opcode,
6466 				    ptgt->tgt_d_id, msg);
6467 			} else {
6468 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6469 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6470 				    "fcp_icmd_callback,3: state change "
6471 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6472 				mutex_exit(&pptr->port_mutex);
6473 			}
6474 		}
6475 	} else {
6476 		if (fcp_is_retryable(icmd) &&
6477 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6478 			if (FCP_MUST_RETRY(fpkt)) {
6479 				fcp_queue_ipkt(pptr, fpkt);
6480 				return;
6481 			}
6482 		}
6483 		mutex_enter(&pptr->port_mutex);
6484 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6485 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6486 			mutex_exit(&pptr->port_mutex);
6487 			fcp_print_error(fpkt);
6488 		} else {
6489 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6490 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6491 			    "fcp_icmd_callback,4: state change occured"
6492 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6493 			mutex_exit(&pptr->port_mutex);
6494 		}
6495 	}
6496 
6497 fail:
6498 	if (free_pkt) {
6499 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6500 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6501 		fcp_icmd_free(pptr, icmd);
6502 	}
6503 }
6504 
6505 
6506 /*
6507  * called internally to send an info cmd using the transport
6508  *
6509  * sends either an INQ or a REPORT_LUN
6510  *
6511  * when the packet is completed fcp_scsi_callback is called
6512  */
6513 static int
6514 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6515     int lcount, int tcount, int cause, uint32_t rscn_count)
6516 {
6517 	int			nodma;
6518 	struct fcp_ipkt		*icmd;
6519 	struct fcp_tgt		*ptgt;
6520 	struct fcp_port		*pptr;
6521 	fc_frame_hdr_t		*hp;
6522 	fc_packet_t		*fpkt;
6523 	struct fcp_cmd		fcp_cmd;
6524 	struct fcp_cmd		*fcmd;
6525 	union scsi_cdb		*scsi_cdb;
6526 
6527 	ASSERT(plun != NULL);
6528 
6529 	ptgt = plun->lun_tgt;
6530 	ASSERT(ptgt != NULL);
6531 
6532 	pptr = ptgt->tgt_port;
6533 	ASSERT(pptr != NULL);
6534 
6535 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6536 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6537 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6538 
6539 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6540 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6541 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6542 	    rscn_count);
6543 
6544 	if (icmd == NULL) {
6545 		return (DDI_FAILURE);
6546 	}
6547 
6548 	fpkt = icmd->ipkt_fpkt;
6549 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6550 	icmd->ipkt_retries = 0;
6551 	icmd->ipkt_opcode = opcode;
6552 	icmd->ipkt_lun = plun;
6553 
6554 	if (nodma) {
6555 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6556 	} else {
6557 		fcmd = &fcp_cmd;
6558 	}
6559 	bzero(fcmd, sizeof (struct fcp_cmd));
6560 
6561 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6562 
6563 	hp = &fpkt->pkt_cmd_fhdr;
6564 
6565 	hp->s_id = pptr->port_id;
6566 	hp->d_id = ptgt->tgt_d_id;
6567 	hp->r_ctl = R_CTL_COMMAND;
6568 	hp->type = FC_TYPE_SCSI_FCP;
6569 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6570 	hp->rsvd = 0;
6571 	hp->seq_id = 0;
6572 	hp->seq_cnt = 0;
6573 	hp->ox_id = 0xffff;
6574 	hp->rx_id = 0xffff;
6575 	hp->ro = 0;
6576 
6577 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6578 
6579 	/*
6580 	 * Request SCSI target for expedited processing
6581 	 */
6582 
6583 	/*
6584 	 * Set up for untagged queuing because we do not
6585 	 * know if the fibre device supports queuing.
6586 	 */
6587 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6588 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6589 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6590 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6591 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6592 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6593 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6594 
6595 	switch (opcode) {
6596 	case SCMD_INQUIRY_PAGE83:
6597 		/*
6598 		 * Prepare to get the Inquiry VPD page 83 information
6599 		 */
6600 		fcmd->fcp_cntl.cntl_read_data = 1;
6601 		fcmd->fcp_cntl.cntl_write_data = 0;
6602 		fcmd->fcp_data_len = alloc_len;
6603 
6604 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6605 		fpkt->pkt_comp = fcp_scsi_callback;
6606 
6607 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6608 		scsi_cdb->g0_addr2 = 0x01;
6609 		scsi_cdb->g0_addr1 = 0x83;
6610 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6611 		break;
6612 
6613 	case SCMD_INQUIRY:
6614 		fcmd->fcp_cntl.cntl_read_data = 1;
6615 		fcmd->fcp_cntl.cntl_write_data = 0;
6616 		fcmd->fcp_data_len = alloc_len;
6617 
6618 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6619 		fpkt->pkt_comp = fcp_scsi_callback;
6620 
6621 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6622 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6623 		break;
6624 
6625 	case SCMD_REPORT_LUN: {
6626 		fc_portid_t	d_id;
6627 		opaque_t	fca_dev;
6628 
6629 		ASSERT(alloc_len >= 16);
6630 
6631 		d_id.priv_lilp_posit = 0;
6632 		d_id.port_id = ptgt->tgt_d_id;
6633 
6634 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6635 
6636 		mutex_enter(&ptgt->tgt_mutex);
6637 		ptgt->tgt_fca_dev = fca_dev;
6638 		mutex_exit(&ptgt->tgt_mutex);
6639 
6640 		fcmd->fcp_cntl.cntl_read_data = 1;
6641 		fcmd->fcp_cntl.cntl_write_data = 0;
6642 		fcmd->fcp_data_len = alloc_len;
6643 
6644 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6645 		fpkt->pkt_comp = fcp_scsi_callback;
6646 
6647 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6648 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6649 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6650 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6651 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6652 		break;
6653 	}
6654 
6655 	default:
6656 		fcp_log(CE_WARN, pptr->port_dip,
6657 		    "!fcp_send_scsi Invalid opcode");
6658 		break;
6659 	}
6660 
6661 	if (!nodma) {
6662 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6663 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6664 	}
6665 
6666 	mutex_enter(&pptr->port_mutex);
6667 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6668 
6669 		mutex_exit(&pptr->port_mutex);
6670 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6671 		    FC_SUCCESS) {
6672 			fcp_icmd_free(pptr, icmd);
6673 			return (DDI_FAILURE);
6674 		}
6675 		return (DDI_SUCCESS);
6676 	} else {
6677 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6678 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6679 		    "fcp_send_scsi,1: state change occured"
6680 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6681 		mutex_exit(&pptr->port_mutex);
6682 		fcp_icmd_free(pptr, icmd);
6683 		return (DDI_FAILURE);
6684 	}
6685 }
6686 
6687 
6688 /*
6689  * called by fcp_scsi_callback to check to handle the case where
6690  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6691  */
6692 static int
6693 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6694 {
6695 	uchar_t				rqlen;
6696 	int				rval = DDI_FAILURE;
6697 	struct scsi_extended_sense	sense_info, *sense;
6698 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6699 	    fpkt->pkt_ulp_private;
6700 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6701 	struct fcp_port		*pptr = ptgt->tgt_port;
6702 
6703 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6704 
6705 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6706 		/*
6707 		 * SCSI-II Reserve Release support. Some older FC drives return
6708 		 * Reservation conflict for Report Luns command.
6709 		 */
6710 		if (icmd->ipkt_nodma) {
6711 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6712 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6713 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6714 		} else {
6715 			fcp_rsp_t	new_resp;
6716 
6717 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6718 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6719 
6720 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6721 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6722 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6723 
6724 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6725 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6726 		}
6727 
6728 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6729 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6730 
6731 		return (DDI_SUCCESS);
6732 	}
6733 
6734 	sense = &sense_info;
6735 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6736 		/* no need to continue if sense length is not set */
6737 		return (rval);
6738 	}
6739 
6740 	/* casting 64-bit integer to 8-bit */
6741 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6742 	    sizeof (struct scsi_extended_sense));
6743 
6744 	if (rqlen < 14) {
6745 		/* no need to continue if request length isn't long enough */
6746 		return (rval);
6747 	}
6748 
6749 	if (icmd->ipkt_nodma) {
6750 		/*
6751 		 * We can safely use fcp_response_len here since the
6752 		 * only path that calls fcp_check_reportlun,
6753 		 * fcp_scsi_callback, has already called
6754 		 * fcp_validate_fcp_response.
6755 		 */
6756 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6757 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6758 	} else {
6759 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6760 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6761 		    sizeof (struct scsi_extended_sense));
6762 	}
6763 
6764 	if (!FCP_SENSE_NO_LUN(sense)) {
6765 		mutex_enter(&ptgt->tgt_mutex);
6766 		/* clear the flag if any */
6767 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6768 		mutex_exit(&ptgt->tgt_mutex);
6769 	}
6770 
6771 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6772 	    (sense->es_add_code == 0x20)) {
6773 		if (icmd->ipkt_nodma) {
6774 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6775 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6776 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6777 		} else {
6778 			fcp_rsp_t	new_resp;
6779 
6780 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6781 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6782 
6783 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6784 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6785 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6786 
6787 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6788 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6789 		}
6790 
6791 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6792 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6793 
6794 		return (DDI_SUCCESS);
6795 	}
6796 
6797 	/*
6798 	 * This is for the STK library which returns a check condition,
6799 	 * to indicate device is not ready, manual assistance needed.
6800 	 * This is to a report lun command when the door is open.
6801 	 */
6802 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6803 		if (icmd->ipkt_nodma) {
6804 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6805 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6806 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6807 		} else {
6808 			fcp_rsp_t	new_resp;
6809 
6810 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6811 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6812 
6813 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6814 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6815 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6816 
6817 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6818 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6819 		}
6820 
6821 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6822 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6823 
6824 		return (DDI_SUCCESS);
6825 	}
6826 
6827 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6828 	    (FCP_SENSE_NO_LUN(sense))) {
6829 		mutex_enter(&ptgt->tgt_mutex);
6830 		if ((FCP_SENSE_NO_LUN(sense)) &&
6831 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6832 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6833 			mutex_exit(&ptgt->tgt_mutex);
6834 			/*
6835 			 * reconfig was triggred by ILLEGAL REQUEST but
6836 			 * got ILLEGAL REQUEST again
6837 			 */
6838 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6839 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6840 			    "!FCP: Unable to obtain Report Lun data"
6841 			    " target=%x", ptgt->tgt_d_id);
6842 		} else {
6843 			if (ptgt->tgt_tid == NULL) {
6844 				timeout_id_t	tid;
6845 				/*
6846 				 * REPORT LUN data has changed.	 Kick off
6847 				 * rediscovery
6848 				 */
6849 				tid = timeout(fcp_reconfigure_luns,
6850 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6851 
6852 				ptgt->tgt_tid = tid;
6853 				ptgt->tgt_state |= FCP_TGT_BUSY;
6854 			}
6855 			if (FCP_SENSE_NO_LUN(sense)) {
6856 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6857 			}
6858 			mutex_exit(&ptgt->tgt_mutex);
6859 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6860 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6861 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6862 				    "!FCP:Report Lun Has Changed"
6863 				    " target=%x", ptgt->tgt_d_id);
6864 			} else if (FCP_SENSE_NO_LUN(sense)) {
6865 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6866 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6867 				    "!FCP:LU Not Supported"
6868 				    " target=%x", ptgt->tgt_d_id);
6869 			}
6870 		}
6871 		rval = DDI_SUCCESS;
6872 	}
6873 
6874 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6875 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6876 	    "D_ID=%x, sense=%x, status=%x",
6877 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6878 	    rsp->fcp_u.fcp_status.scsi_status);
6879 
6880 	return (rval);
6881 }
6882 
6883 /*
6884  *     Function: fcp_scsi_callback
6885  *
6886  *  Description: This is the callback routine set by fcp_send_scsi() after
6887  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6888  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6889  *		 INQUIRY_PAGE83.
6890  *
6891  *     Argument: *fpkt	 FC packet used to convey the command
6892  *
6893  * Return Value: None
6894  */
6895 static void
6896 fcp_scsi_callback(fc_packet_t *fpkt)
6897 {
6898 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6899 	    fpkt->pkt_ulp_private;
6900 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6901 	struct fcp_port	*pptr;
6902 	struct fcp_tgt	*ptgt;
6903 	struct fcp_lun	*plun;
6904 	struct fcp_rsp		response, *rsp;
6905 
6906 	ptgt = icmd->ipkt_tgt;
6907 	pptr = ptgt->tgt_port;
6908 	plun = icmd->ipkt_lun;
6909 
6910 	if (icmd->ipkt_nodma) {
6911 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6912 	} else {
6913 		rsp = &response;
6914 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6915 		    sizeof (struct fcp_rsp));
6916 	}
6917 
6918 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6919 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6920 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6921 	    "status=%x, lun num=%x",
6922 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6923 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6924 
6925 	/*
6926 	 * Pre-init LUN GUID with NWWN if it is not a device that
6927 	 * supports multiple luns and we know it's not page83
6928 	 * compliant.  Although using a NWWN is not lun unique,
6929 	 * we will be fine since there is only one lun behind the taget
6930 	 * in this case.
6931 	 */
6932 	if ((plun->lun_guid_size == 0) &&
6933 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6934 	    (fcp_symmetric_device_probe(plun) == 0)) {
6935 
6936 		char ascii_wwn[FC_WWN_SIZE*2+1];
6937 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6938 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6939 	}
6940 
6941 	/*
6942 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6943 	 * when thay have more data than what is asked in CDB. An overrun
6944 	 * is really when FCP_DL is smaller than the data length in CDB.
6945 	 * In the case here we know that REPORT LUN command we formed within
6946 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6947 	 * behavior. In reality this is FC_SUCCESS.
6948 	 */
6949 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6950 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6951 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6952 		fpkt->pkt_state = FC_PKT_SUCCESS;
6953 	}
6954 
6955 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6956 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6957 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6958 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6959 		    ptgt->tgt_d_id);
6960 
6961 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6962 			/*
6963 			 * Inquiry VPD page command on A5K SES devices would
6964 			 * result in data CRC errors.
6965 			 */
6966 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6967 				(void) fcp_handle_page83(fpkt, icmd, 1);
6968 				return;
6969 			}
6970 		}
6971 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6972 		    FCP_MUST_RETRY(fpkt)) {
6973 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6974 			fcp_retry_scsi_cmd(fpkt);
6975 			return;
6976 		}
6977 
6978 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6979 		    FCP_TGT_TRACE_20);
6980 
6981 		mutex_enter(&pptr->port_mutex);
6982 		mutex_enter(&ptgt->tgt_mutex);
6983 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6984 			mutex_exit(&ptgt->tgt_mutex);
6985 			mutex_exit(&pptr->port_mutex);
6986 			fcp_print_error(fpkt);
6987 		} else {
6988 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6989 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6990 			    "fcp_scsi_callback,1: state change occured"
6991 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6992 			mutex_exit(&ptgt->tgt_mutex);
6993 			mutex_exit(&pptr->port_mutex);
6994 		}
6995 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6996 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6997 		fcp_icmd_free(pptr, icmd);
6998 		return;
6999 	}
7000 
7001 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7002 
7003 	mutex_enter(&pptr->port_mutex);
7004 	mutex_enter(&ptgt->tgt_mutex);
7005 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7006 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7007 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7008 		    "fcp_scsi_callback,2: state change occured"
7009 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7010 		mutex_exit(&ptgt->tgt_mutex);
7011 		mutex_exit(&pptr->port_mutex);
7012 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7013 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7014 		fcp_icmd_free(pptr, icmd);
7015 		return;
7016 	}
7017 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7018 
7019 	mutex_exit(&ptgt->tgt_mutex);
7020 	mutex_exit(&pptr->port_mutex);
7021 
7022 	if (icmd->ipkt_nodma) {
7023 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7024 		    sizeof (struct fcp_rsp));
7025 	} else {
7026 		bep = &fcp_rsp_err;
7027 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7028 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7029 	}
7030 
7031 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7032 		fcp_retry_scsi_cmd(fpkt);
7033 		return;
7034 	}
7035 
7036 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7037 	    FCP_NO_FAILURE) {
7038 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7039 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7040 		    "rsp_code=0x%x, rsp_len_set=0x%x",
7041 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7042 		fcp_retry_scsi_cmd(fpkt);
7043 		return;
7044 	}
7045 
7046 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7047 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7048 		fcp_queue_ipkt(pptr, fpkt);
7049 		return;
7050 	}
7051 
7052 	/*
7053 	 * Devices that do not support INQUIRY_PAGE83, return check condition
7054 	 * with illegal request as per SCSI spec.
7055 	 * Crossbridge is one such device and Daktari's SES node is another.
7056 	 * We want to ideally enumerate these devices as a non-mpxio devices.
7057 	 * SES nodes (Daktari only currently) are an exception to this.
7058 	 */
7059 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7060 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7061 
7062 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7063 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
7064 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7065 		    "check condition. May enumerate as non-mpxio device",
7066 		    ptgt->tgt_d_id, plun->lun_type);
7067 
7068 		/*
7069 		 * If we let Daktari's SES be enumerated as a non-mpxio
7070 		 * device, there will be a discrepency in that the other
7071 		 * internal FC disks will get enumerated as mpxio devices.
7072 		 * Applications like luxadm expect this to be consistent.
7073 		 *
7074 		 * So, we put in a hack here to check if this is an SES device
7075 		 * and handle it here.
7076 		 */
7077 		if (plun->lun_type == DTYPE_ESI) {
7078 			/*
7079 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7080 			 * at this stage, we fake a failure here so that
7081 			 * fcp_handle_page83 will create a device path using
7082 			 * the WWN instead of the GUID which is not there anyway
7083 			 */
7084 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7085 			(void) fcp_handle_page83(fpkt, icmd, 1);
7086 			return;
7087 		}
7088 
7089 		mutex_enter(&ptgt->tgt_mutex);
7090 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7091 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7092 		mutex_exit(&ptgt->tgt_mutex);
7093 
7094 		(void) fcp_call_finish_init(pptr, ptgt,
7095 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7096 		    icmd->ipkt_cause);
7097 		fcp_icmd_free(pptr, icmd);
7098 		return;
7099 	}
7100 
7101 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7102 		int rval = DDI_FAILURE;
7103 
7104 		/*
7105 		 * handle cases where report lun isn't supported
7106 		 * by faking up our own REPORT_LUN response or
7107 		 * UNIT ATTENTION
7108 		 */
7109 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7110 			rval = fcp_check_reportlun(rsp, fpkt);
7111 
7112 			/*
7113 			 * fcp_check_reportlun might have modified the
7114 			 * FCP response. Copy it in again to get an updated
7115 			 * FCP response
7116 			 */
7117 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7118 				rsp = &response;
7119 
7120 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7121 				    fpkt->pkt_resp_acc,
7122 				    sizeof (struct fcp_rsp));
7123 			}
7124 		}
7125 
7126 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7127 			if (rval == DDI_SUCCESS) {
7128 				(void) fcp_call_finish_init(pptr, ptgt,
7129 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7130 				    icmd->ipkt_cause);
7131 				fcp_icmd_free(pptr, icmd);
7132 			} else {
7133 				fcp_retry_scsi_cmd(fpkt);
7134 			}
7135 
7136 			return;
7137 		}
7138 	} else {
7139 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7140 			mutex_enter(&ptgt->tgt_mutex);
7141 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7142 			mutex_exit(&ptgt->tgt_mutex);
7143 		}
7144 	}
7145 
7146 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7147 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7148 		(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7149 		    DDI_DMA_SYNC_FORCPU);
7150 	}
7151 
7152 	switch (icmd->ipkt_opcode) {
7153 	case SCMD_INQUIRY:
7154 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7155 		fcp_handle_inquiry(fpkt, icmd);
7156 		break;
7157 
7158 	case SCMD_REPORT_LUN:
7159 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7160 		    FCP_TGT_TRACE_22);
7161 		fcp_handle_reportlun(fpkt, icmd);
7162 		break;
7163 
7164 	case SCMD_INQUIRY_PAGE83:
7165 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7166 		(void) fcp_handle_page83(fpkt, icmd, 0);
7167 		break;
7168 
7169 	default:
7170 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7171 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7172 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7173 		fcp_icmd_free(pptr, icmd);
7174 		break;
7175 	}
7176 }
7177 
7178 
7179 static void
7180 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7181 {
7182 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7183 	    fpkt->pkt_ulp_private;
7184 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7185 	struct fcp_port	*pptr = ptgt->tgt_port;
7186 
7187 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7188 	    fcp_is_retryable(icmd)) {
7189 		mutex_enter(&pptr->port_mutex);
7190 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7191 			mutex_exit(&pptr->port_mutex);
7192 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7193 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7194 			    "Retrying %s to %x; state=%x, reason=%x",
7195 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7196 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7197 			    fpkt->pkt_state, fpkt->pkt_reason);
7198 
7199 			fcp_queue_ipkt(pptr, fpkt);
7200 		} else {
7201 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7202 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7203 			    "fcp_retry_scsi_cmd,1: state change occured"
7204 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7205 			mutex_exit(&pptr->port_mutex);
7206 			(void) fcp_call_finish_init(pptr, ptgt,
7207 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7208 			    icmd->ipkt_cause);
7209 			fcp_icmd_free(pptr, icmd);
7210 		}
7211 	} else {
7212 		fcp_print_error(fpkt);
7213 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7214 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7215 		fcp_icmd_free(pptr, icmd);
7216 	}
7217 }
7218 
7219 /*
7220  *     Function: fcp_handle_page83
7221  *
7222  *  Description: Treats the response to INQUIRY_PAGE83.
7223  *
7224  *     Argument: *fpkt	FC packet used to convey the command.
7225  *		 *icmd	Original fcp_ipkt structure.
7226  *		 ignore_page83_data
7227  *			if it's 1, that means it's a special devices's
7228  *			page83 response, it should be enumerated under mpxio
7229  *
7230  * Return Value: None
7231  */
7232 static void
7233 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7234     int ignore_page83_data)
7235 {
7236 	struct fcp_port	*pptr;
7237 	struct fcp_lun	*plun;
7238 	struct fcp_tgt	*ptgt;
7239 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7240 	int			fail = 0;
7241 	ddi_devid_t		devid;
7242 	char			*guid = NULL;
7243 	int			ret;
7244 
7245 	ASSERT(icmd != NULL && fpkt != NULL);
7246 
7247 	pptr = icmd->ipkt_port;
7248 	ptgt = icmd->ipkt_tgt;
7249 	plun = icmd->ipkt_lun;
7250 
7251 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7252 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7253 
7254 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7255 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7256 
7257 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7258 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7259 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7260 		    "dtype=0x%x, lun num=%x",
7261 		    pptr->port_instance, ptgt->tgt_d_id,
7262 		    dev_id_page[0], plun->lun_num);
7263 
7264 		ret = ddi_devid_scsi_encode(
7265 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7266 		    NULL,		/* driver name */
7267 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7268 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7269 		    NULL,		/* page 80 data */
7270 		    0,		/* page 80 len */
7271 		    dev_id_page,	/* page 83 data */
7272 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7273 		    &devid);
7274 
7275 		if (ret == DDI_SUCCESS) {
7276 
7277 			guid = ddi_devid_to_guid(devid);
7278 
7279 			if (guid) {
7280 				/*
7281 				 * Check our current guid.  If it's non null
7282 				 * and it has changed, we need to copy it into
7283 				 * lun_old_guid since we might still need it.
7284 				 */
7285 				if (plun->lun_guid &&
7286 				    strcmp(guid, plun->lun_guid)) {
7287 					unsigned int len;
7288 
7289 					/*
7290 					 * If the guid of the LUN changes,
7291 					 * reconfiguration should be triggered
7292 					 * to reflect the changes.
7293 					 * i.e. we should offline the LUN with
7294 					 * the old guid, and online the LUN with
7295 					 * the new guid.
7296 					 */
7297 					plun->lun_state |= FCP_LUN_CHANGED;
7298 
7299 					if (plun->lun_old_guid) {
7300 						kmem_free(plun->lun_old_guid,
7301 						    plun->lun_old_guid_size);
7302 					}
7303 
7304 					len = plun->lun_guid_size;
7305 					plun->lun_old_guid_size = len;
7306 
7307 					plun->lun_old_guid = kmem_zalloc(len,
7308 					    KM_NOSLEEP);
7309 
7310 					if (plun->lun_old_guid) {
7311 						/*
7312 						 * The alloc was successful then
7313 						 * let's do the copy.
7314 						 */
7315 						bcopy(plun->lun_guid,
7316 						    plun->lun_old_guid, len);
7317 					} else {
7318 						fail = 1;
7319 						plun->lun_old_guid_size = 0;
7320 					}
7321 				}
7322 				if (!fail) {
7323 					if (fcp_copy_guid_2_lun_block(
7324 					    plun, guid)) {
7325 						fail = 1;
7326 					}
7327 				}
7328 				ddi_devid_free_guid(guid);
7329 
7330 			} else {
7331 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7332 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7333 				    "fcp_handle_page83: unable to create "
7334 				    "GUID");
7335 
7336 				/* couldn't create good guid from devid */
7337 				fail = 1;
7338 			}
7339 			ddi_devid_free(devid);
7340 
7341 		} else if (ret == DDI_NOT_WELL_FORMED) {
7342 			/* NULL filled data for page 83 */
7343 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7344 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7345 			    "fcp_handle_page83: retry GUID");
7346 
7347 			icmd->ipkt_retries = 0;
7348 			fcp_retry_scsi_cmd(fpkt);
7349 			return;
7350 		} else {
7351 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7352 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7353 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7354 			    ret);
7355 			/*
7356 			 * Since the page83 validation
7357 			 * introduced late, we are being
7358 			 * tolerant to the existing devices
7359 			 * that already found to be working
7360 			 * under mpxio, like A5200's SES device,
7361 			 * its page83 response will not be standard-compliant,
7362 			 * but we still want it to be enumerated under mpxio.
7363 			 */
7364 			if (fcp_symmetric_device_probe(plun) != 0) {
7365 				fail = 1;
7366 			}
7367 		}
7368 
7369 	} else {
7370 		/* bad packet state */
7371 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7372 
7373 		/*
7374 		 * For some special devices (A5K SES and Daktari's SES devices),
7375 		 * they should be enumerated under mpxio
7376 		 * or "luxadm dis" will fail
7377 		 */
7378 		if (ignore_page83_data) {
7379 			fail = 0;
7380 		} else {
7381 			fail = 1;
7382 		}
7383 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7384 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7385 		    "!Devid page cmd failed. "
7386 		    "fpkt_state: %x fpkt_reason: %x",
7387 		    "ignore_page83: %d",
7388 		    fpkt->pkt_state, fpkt->pkt_reason,
7389 		    ignore_page83_data);
7390 	}
7391 
7392 	mutex_enter(&pptr->port_mutex);
7393 	mutex_enter(&plun->lun_mutex);
7394 	/*
7395 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7396 	 * mismatch between lun_cip and lun_mpxio.
7397 	 */
7398 	if (plun->lun_cip == NULL) {
7399 		/*
7400 		 * If we don't have a guid for this lun it's because we were
7401 		 * unable to glean one from the page 83 response.  Set the
7402 		 * control flag to 0 here to make sure that we don't attempt to
7403 		 * enumerate it under mpxio.
7404 		 */
7405 		if (fail || pptr->port_mpxio == 0) {
7406 			plun->lun_mpxio = 0;
7407 		} else {
7408 			plun->lun_mpxio = 1;
7409 		}
7410 	}
7411 	mutex_exit(&plun->lun_mutex);
7412 	mutex_exit(&pptr->port_mutex);
7413 
7414 	mutex_enter(&ptgt->tgt_mutex);
7415 	plun->lun_state &=
7416 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7417 	mutex_exit(&ptgt->tgt_mutex);
7418 
7419 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7420 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7421 
7422 	fcp_icmd_free(pptr, icmd);
7423 }
7424 
7425 /*
7426  *     Function: fcp_handle_inquiry
7427  *
7428  *  Description: Called by fcp_scsi_callback to handle the response to an
7429  *		 INQUIRY request.
7430  *
7431  *     Argument: *fpkt	FC packet used to convey the command.
7432  *		 *icmd	Original fcp_ipkt structure.
7433  *
7434  * Return Value: None
7435  */
7436 static void
7437 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7438 {
7439 	struct fcp_port	*pptr;
7440 	struct fcp_lun	*plun;
7441 	struct fcp_tgt	*ptgt;
7442 	uchar_t		dtype;
7443 	uchar_t		pqual;
7444 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7445 
7446 	ASSERT(icmd != NULL && fpkt != NULL);
7447 
7448 	pptr = icmd->ipkt_port;
7449 	ptgt = icmd->ipkt_tgt;
7450 	plun = icmd->ipkt_lun;
7451 
7452 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7453 	    sizeof (struct scsi_inquiry));
7454 
7455 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7456 	pqual = plun->lun_inq.inq_dtype >> 5;
7457 
7458 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7459 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7460 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7461 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7462 	    plun->lun_num, dtype, pqual);
7463 
7464 	if (pqual != 0) {
7465 		/*
7466 		 * Non-zero peripheral qualifier
7467 		 */
7468 		fcp_log(CE_CONT, pptr->port_dip,
7469 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7470 		    "Device type=0x%x Peripheral qual=0x%x\n",
7471 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7472 
7473 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7474 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7475 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7476 		    "Device type=0x%x Peripheral qual=0x%x\n",
7477 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7478 
7479 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7480 
7481 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7482 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7483 		fcp_icmd_free(pptr, icmd);
7484 		return;
7485 	}
7486 
7487 	/*
7488 	 * If the device is already initialized, check the dtype
7489 	 * for a change. If it has changed then update the flags
7490 	 * so the create_luns will offline the old device and
7491 	 * create the new device. Refer to bug: 4764752
7492 	 */
7493 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7494 		plun->lun_state |= FCP_LUN_CHANGED;
7495 	}
7496 	plun->lun_type = plun->lun_inq.inq_dtype;
7497 
7498 	/*
7499 	 * This code is setting/initializing the throttling in the FCA
7500 	 * driver.
7501 	 */
7502 	mutex_enter(&pptr->port_mutex);
7503 	if (!pptr->port_notify) {
7504 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7505 			uint32_t cmd = 0;
7506 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7507 			    ((cmd & 0xFFFFFF00 >> 8) |
7508 			    FCP_SVE_THROTTLE << 8));
7509 			pptr->port_notify = 1;
7510 			mutex_exit(&pptr->port_mutex);
7511 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7512 			mutex_enter(&pptr->port_mutex);
7513 		}
7514 	}
7515 
7516 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7517 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7518 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7519 		    "fcp_handle_inquiry,1:state change occured"
7520 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7521 		mutex_exit(&pptr->port_mutex);
7522 
7523 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7524 		(void) fcp_call_finish_init(pptr, ptgt,
7525 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7526 		    icmd->ipkt_cause);
7527 		fcp_icmd_free(pptr, icmd);
7528 		return;
7529 	}
7530 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7531 	mutex_exit(&pptr->port_mutex);
7532 
7533 	/* Retrieve the rscn count (if a valid one exists) */
7534 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7535 		rscn_count = ((fc_ulp_rscn_info_t *)
7536 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7537 	} else {
7538 		rscn_count = FC_INVALID_RSCN_COUNT;
7539 	}
7540 
7541 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7542 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7543 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7544 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7545 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7546 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7547 		(void) fcp_call_finish_init(pptr, ptgt,
7548 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7549 		    icmd->ipkt_cause);
7550 	}
7551 
7552 	/*
7553 	 * Read Inquiry VPD Page 0x83 to uniquely
7554 	 * identify this logical unit.
7555 	 */
7556 	fcp_icmd_free(pptr, icmd);
7557 }
7558 
7559 /*
7560  *     Function: fcp_handle_reportlun
7561  *
7562  *  Description: Called by fcp_scsi_callback to handle the response to a
7563  *		 REPORT_LUN request.
7564  *
7565  *     Argument: *fpkt	FC packet used to convey the command.
7566  *		 *icmd	Original fcp_ipkt structure.
7567  *
7568  * Return Value: None
7569  */
7570 static void
7571 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7572 {
7573 	int				i;
7574 	int				nluns_claimed;
7575 	int				nluns_bufmax;
7576 	int				len;
7577 	uint16_t			lun_num;
7578 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7579 	struct fcp_port			*pptr;
7580 	struct fcp_tgt			*ptgt;
7581 	struct fcp_lun			*plun;
7582 	struct fcp_reportlun_resp	*report_lun;
7583 
7584 	pptr = icmd->ipkt_port;
7585 	ptgt = icmd->ipkt_tgt;
7586 	len = fpkt->pkt_datalen;
7587 
7588 	if ((len < FCP_LUN_HEADER) ||
7589 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7590 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7591 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7592 		fcp_icmd_free(pptr, icmd);
7593 		return;
7594 	}
7595 
7596 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7597 	    fpkt->pkt_datalen);
7598 
7599 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7600 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7601 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7602 	    pptr->port_instance, ptgt->tgt_d_id);
7603 
7604 	/*
7605 	 * Get the number of luns (which is supplied as LUNS * 8) the
7606 	 * device claims it has.
7607 	 */
7608 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7609 
7610 	/*
7611 	 * Get the maximum number of luns the buffer submitted can hold.
7612 	 */
7613 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7614 
7615 	/*
7616 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7617 	 */
7618 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7619 		kmem_free(report_lun, len);
7620 
7621 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7622 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7623 		    ptgt->tgt_d_id);
7624 
7625 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7626 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7627 		fcp_icmd_free(pptr, icmd);
7628 		return;
7629 	}
7630 
7631 	/*
7632 	 * If there are more LUNs than we have allocated memory for,
7633 	 * allocate more space and send down yet another report lun if
7634 	 * the maximum number of attempts hasn't been reached.
7635 	 */
7636 	mutex_enter(&ptgt->tgt_mutex);
7637 
7638 	if ((nluns_claimed > nluns_bufmax) &&
7639 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7640 
7641 		struct fcp_lun *plun;
7642 
7643 		ptgt->tgt_report_lun_cnt++;
7644 		plun = ptgt->tgt_lun;
7645 		ASSERT(plun != NULL);
7646 		mutex_exit(&ptgt->tgt_mutex);
7647 
7648 		kmem_free(report_lun, len);
7649 
7650 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7651 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7652 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7653 		    nluns_claimed, ptgt->tgt_d_id);
7654 
7655 		/* Retrieve the rscn count (if a valid one exists) */
7656 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7657 			rscn_count = ((fc_ulp_rscn_info_t *)
7658 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7659 			    ulp_rscn_count;
7660 		} else {
7661 			rscn_count = FC_INVALID_RSCN_COUNT;
7662 		}
7663 
7664 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7665 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7666 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7667 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7668 			(void) fcp_call_finish_init(pptr, ptgt,
7669 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7670 			    icmd->ipkt_cause);
7671 		}
7672 
7673 		fcp_icmd_free(pptr, icmd);
7674 		return;
7675 	}
7676 
7677 	if (nluns_claimed > nluns_bufmax) {
7678 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7679 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7680 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7681 		    "	 Number of LUNs lost=%x",
7682 		    ptgt->tgt_port_wwn.raw_wwn[0],
7683 		    ptgt->tgt_port_wwn.raw_wwn[1],
7684 		    ptgt->tgt_port_wwn.raw_wwn[2],
7685 		    ptgt->tgt_port_wwn.raw_wwn[3],
7686 		    ptgt->tgt_port_wwn.raw_wwn[4],
7687 		    ptgt->tgt_port_wwn.raw_wwn[5],
7688 		    ptgt->tgt_port_wwn.raw_wwn[6],
7689 		    ptgt->tgt_port_wwn.raw_wwn[7],
7690 		    nluns_claimed - nluns_bufmax);
7691 
7692 		nluns_claimed = nluns_bufmax;
7693 	}
7694 	ptgt->tgt_lun_cnt = nluns_claimed;
7695 
7696 	/*
7697 	 * Identify missing LUNs and print warning messages
7698 	 */
7699 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7700 		int offline;
7701 		int exists = 0;
7702 
7703 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7704 
7705 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7706 			uchar_t		*lun_string;
7707 
7708 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7709 
7710 			switch (lun_string[0] & 0xC0) {
7711 			case FCP_LUN_ADDRESSING:
7712 			case FCP_PD_ADDRESSING:
7713 			case FCP_VOLUME_ADDRESSING:
7714 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7715 				    lun_string[1];
7716 				if (plun->lun_num == lun_num) {
7717 					exists++;
7718 					break;
7719 				}
7720 				break;
7721 
7722 			default:
7723 				break;
7724 			}
7725 		}
7726 
7727 		if (!exists && !offline) {
7728 			mutex_exit(&ptgt->tgt_mutex);
7729 
7730 			mutex_enter(&pptr->port_mutex);
7731 			mutex_enter(&ptgt->tgt_mutex);
7732 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7733 				/*
7734 				 * set disappear flag when device was connected
7735 				 */
7736 				if (!(plun->lun_state &
7737 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7738 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7739 				}
7740 				mutex_exit(&ptgt->tgt_mutex);
7741 				mutex_exit(&pptr->port_mutex);
7742 				if (!(plun->lun_state &
7743 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7744 					fcp_log(CE_NOTE, pptr->port_dip,
7745 					    "!Lun=%x for target=%x disappeared",
7746 					    plun->lun_num, ptgt->tgt_d_id);
7747 				}
7748 				mutex_enter(&ptgt->tgt_mutex);
7749 			} else {
7750 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7751 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7752 				    "fcp_handle_reportlun,1: state change"
7753 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7754 				mutex_exit(&ptgt->tgt_mutex);
7755 				mutex_exit(&pptr->port_mutex);
7756 				kmem_free(report_lun, len);
7757 				(void) fcp_call_finish_init(pptr, ptgt,
7758 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7759 				    icmd->ipkt_cause);
7760 				fcp_icmd_free(pptr, icmd);
7761 				return;
7762 			}
7763 		} else if (exists) {
7764 			/*
7765 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7766 			 * actually exists in REPORT_LUN response
7767 			 */
7768 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7769 				plun->lun_state &=
7770 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7771 			}
7772 			if (offline || plun->lun_num == 0) {
7773 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7774 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7775 					mutex_exit(&ptgt->tgt_mutex);
7776 					fcp_log(CE_NOTE, pptr->port_dip,
7777 					    "!Lun=%x for target=%x reappeared",
7778 					    plun->lun_num, ptgt->tgt_d_id);
7779 					mutex_enter(&ptgt->tgt_mutex);
7780 				}
7781 			}
7782 		}
7783 	}
7784 
7785 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7786 	mutex_exit(&ptgt->tgt_mutex);
7787 
7788 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7789 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7790 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7791 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7792 
7793 	/* scan each lun */
7794 	for (i = 0; i < nluns_claimed; i++) {
7795 		uchar_t	*lun_string;
7796 
7797 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7798 
7799 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7800 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7801 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7802 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7803 		    lun_string[0]);
7804 
7805 		switch (lun_string[0] & 0xC0) {
7806 		case FCP_LUN_ADDRESSING:
7807 		case FCP_PD_ADDRESSING:
7808 		case FCP_VOLUME_ADDRESSING:
7809 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7810 
7811 			/* We will skip masked LUNs because of the blacklist. */
7812 			if (fcp_lun_blacklist != NULL) {
7813 				mutex_enter(&ptgt->tgt_mutex);
7814 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7815 				    lun_num) == TRUE) {
7816 					ptgt->tgt_lun_cnt--;
7817 					mutex_exit(&ptgt->tgt_mutex);
7818 					break;
7819 				}
7820 				mutex_exit(&ptgt->tgt_mutex);
7821 			}
7822 
7823 			/* see if this LUN is already allocated */
7824 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7825 				plun = fcp_alloc_lun(ptgt);
7826 				if (plun == NULL) {
7827 					fcp_log(CE_NOTE, pptr->port_dip,
7828 					    "!Lun allocation failed"
7829 					    " target=%x lun=%x",
7830 					    ptgt->tgt_d_id, lun_num);
7831 					break;
7832 				}
7833 			}
7834 
7835 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7836 			/* convert to LUN */
7837 			plun->lun_addr.ent_addr_0 =
7838 			    BE_16(*(uint16_t *)&(lun_string[0]));
7839 			plun->lun_addr.ent_addr_1 =
7840 			    BE_16(*(uint16_t *)&(lun_string[2]));
7841 			plun->lun_addr.ent_addr_2 =
7842 			    BE_16(*(uint16_t *)&(lun_string[4]));
7843 			plun->lun_addr.ent_addr_3 =
7844 			    BE_16(*(uint16_t *)&(lun_string[6]));
7845 
7846 			plun->lun_num = lun_num;
7847 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7848 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7849 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7850 
7851 			/* Retrieve the rscn count (if a valid one exists) */
7852 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7853 				rscn_count = ((fc_ulp_rscn_info_t *)
7854 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7855 				    ulp_rscn_count;
7856 			} else {
7857 				rscn_count = FC_INVALID_RSCN_COUNT;
7858 			}
7859 
7860 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7861 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7862 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7863 				mutex_enter(&pptr->port_mutex);
7864 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7865 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7866 					fcp_log(CE_NOTE, pptr->port_dip,
7867 					    "!failed to send INQUIRY"
7868 					    " target=%x lun=%x",
7869 					    ptgt->tgt_d_id, plun->lun_num);
7870 				} else {
7871 					FCP_TRACE(fcp_logq,
7872 					    pptr->port_instbuf, fcp_trace,
7873 					    FCP_BUF_LEVEL_5, 0,
7874 					    "fcp_handle_reportlun,2: state"
7875 					    " change occured for D_ID=0x%x",
7876 					    ptgt->tgt_d_id);
7877 				}
7878 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7879 				mutex_exit(&pptr->port_mutex);
7880 			} else {
7881 				continue;
7882 			}
7883 			break;
7884 
7885 		default:
7886 			fcp_log(CE_WARN, NULL,
7887 			    "!Unsupported LUN Addressing method %x "
7888 			    "in response to REPORT_LUN", lun_string[0]);
7889 			break;
7890 		}
7891 
7892 		/*
7893 		 * each time through this loop we should decrement
7894 		 * the tmp_cnt by one -- since we go through this loop
7895 		 * one time for each LUN, the tmp_cnt should never be <=0
7896 		 */
7897 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7898 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7899 	}
7900 
7901 	if (i == 0) {
7902 		fcp_log(CE_WARN, pptr->port_dip,
7903 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7904 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7905 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7906 	}
7907 
7908 	kmem_free(report_lun, len);
7909 	fcp_icmd_free(pptr, icmd);
7910 }
7911 
7912 
7913 /*
7914  * called internally to return a LUN given a target and a LUN number
7915  */
7916 static struct fcp_lun *
7917 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7918 {
7919 	struct fcp_lun	*plun;
7920 
7921 	mutex_enter(&ptgt->tgt_mutex);
7922 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7923 		if (plun->lun_num == lun_num) {
7924 			mutex_exit(&ptgt->tgt_mutex);
7925 			return (plun);
7926 		}
7927 	}
7928 	mutex_exit(&ptgt->tgt_mutex);
7929 
7930 	return (NULL);
7931 }
7932 
7933 
7934 /*
7935  * handle finishing one target for fcp_finish_init
7936  *
7937  * return true (non-zero) if we want finish_init to continue with the
7938  * next target
7939  *
7940  * called with the port mutex held
7941  */
7942 /*ARGSUSED*/
7943 static int
7944 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7945     int link_cnt, int tgt_cnt, int cause)
7946 {
7947 	int	rval = 1;
7948 	ASSERT(pptr != NULL);
7949 	ASSERT(ptgt != NULL);
7950 
7951 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7952 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7953 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7954 	    ptgt->tgt_state);
7955 
7956 	ASSERT(mutex_owned(&pptr->port_mutex));
7957 
7958 	if ((pptr->port_link_cnt != link_cnt) ||
7959 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7960 		/*
7961 		 * oh oh -- another link reset or target change
7962 		 * must have occurred while we are in here
7963 		 */
7964 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7965 
7966 		return (0);
7967 	} else {
7968 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7969 	}
7970 
7971 	mutex_enter(&ptgt->tgt_mutex);
7972 
7973 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7974 		/*
7975 		 * tgt is not offline -- is it marked (i.e. needs
7976 		 * to be offlined) ??
7977 		 */
7978 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7979 			/*
7980 			 * this target not offline *and*
7981 			 * marked
7982 			 */
7983 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7984 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7985 			    tgt_cnt, 0, 0);
7986 		} else {
7987 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7988 
7989 			/* create the LUNs */
7990 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7991 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7992 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7993 				    cause);
7994 				ptgt->tgt_device_created = 1;
7995 			} else {
7996 				fcp_update_tgt_state(ptgt, FCP_RESET,
7997 				    FCP_LUN_BUSY);
7998 			}
7999 		}
8000 	}
8001 
8002 	mutex_exit(&ptgt->tgt_mutex);
8003 
8004 	return (rval);
8005 }
8006 
8007 
8008 /*
8009  * this routine is called to finish port initialization
8010  *
8011  * Each port has a "temp" counter -- when a state change happens (e.g.
8012  * port online), the temp count is set to the number of devices in the map.
8013  * Then, as each device gets "discovered", the temp counter is decremented
8014  * by one.  When this count reaches zero we know that all of the devices
8015  * in the map have been discovered (or an error has occurred), so we can
8016  * then finish initialization -- which is done by this routine (well, this
8017  * and fcp-finish_tgt())
8018  *
8019  * acquires and releases the global mutex
8020  *
8021  * called with the port mutex owned
8022  */
8023 static void
8024 fcp_finish_init(struct fcp_port *pptr)
8025 {
8026 #ifdef	DEBUG
8027 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8028 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8029 	    FCP_STACK_DEPTH);
8030 #endif /* DEBUG */
8031 
8032 	ASSERT(mutex_owned(&pptr->port_mutex));
8033 
8034 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8035 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8036 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8037 
8038 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
8039 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
8040 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8041 		pptr->port_state &= ~FCP_STATE_ONLINING;
8042 		pptr->port_state |= FCP_STATE_ONLINE;
8043 	}
8044 
8045 	/* Wake up threads waiting on config done */
8046 	cv_broadcast(&pptr->port_config_cv);
8047 }
8048 
8049 
8050 /*
8051  * called from fcp_finish_init to create the LUNs for a target
8052  *
8053  * called with the port mutex owned
8054  */
8055 static void
8056 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8057 {
8058 	struct fcp_lun	*plun;
8059 	struct fcp_port	*pptr;
8060 	child_info_t		*cip = NULL;
8061 
8062 	ASSERT(ptgt != NULL);
8063 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8064 
8065 	pptr = ptgt->tgt_port;
8066 
8067 	ASSERT(pptr != NULL);
8068 
8069 	/* scan all LUNs for this target */
8070 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8071 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8072 			continue;
8073 		}
8074 
8075 		if (plun->lun_state & FCP_LUN_MARK) {
8076 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8077 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8078 			    "fcp_create_luns: offlining marked LUN!");
8079 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8080 			continue;
8081 		}
8082 
8083 		plun->lun_state &= ~FCP_LUN_BUSY;
8084 
8085 		/*
8086 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8087 		 * but we have a valid plun->lun_cip. To cover this case also
8088 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8089 		 */
8090 		if (plun->lun_mpxio && plun->lun_cip &&
8091 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8092 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8093 		    0, 0))) {
8094 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8095 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8096 			    "fcp_create_luns: enable lun %p failed!",
8097 			    plun);
8098 		}
8099 
8100 		if (plun->lun_state & FCP_LUN_INIT &&
8101 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8102 			continue;
8103 		}
8104 
8105 		if (cause == FCP_CAUSE_USER_CREATE) {
8106 			continue;
8107 		}
8108 
8109 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8110 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8111 		    "create_luns: passing ONLINE elem to HP thread");
8112 
8113 		/*
8114 		 * If lun has changed, prepare for offlining the old path.
8115 		 * Do not offline the old path right now, since it may be
8116 		 * still opened.
8117 		 */
8118 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8119 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8120 		}
8121 
8122 		/* pass an ONLINE element to the hotplug thread */
8123 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8124 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8125 
8126 			/*
8127 			 * We can not synchronous attach (i.e pass
8128 			 * NDI_ONLINE_ATTACH) here as we might be
8129 			 * coming from an interrupt or callback
8130 			 * thread.
8131 			 */
8132 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8133 			    link_cnt, tgt_cnt, 0, 0)) {
8134 				fcp_log(CE_CONT, pptr->port_dip,
8135 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8136 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8137 			}
8138 		}
8139 	}
8140 }
8141 
8142 
8143 /*
8144  * function to online/offline devices
8145  */
8146 static int
8147 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8148     int online, int lcount, int tcount, int flags)
8149 {
8150 	int			rval = NDI_FAILURE;
8151 	int			circ;
8152 	child_info_t		*ccip;
8153 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8154 	int			is_mpxio = pptr->port_mpxio;
8155 	dev_info_t		*cdip, *pdip;
8156 	char			*devname;
8157 
8158 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8159 		/*
8160 		 * When this event gets serviced, lun_cip and lun_mpxio
8161 		 * has changed, so it should be invalidated now.
8162 		 */
8163 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8164 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8165 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8166 		return (rval);
8167 	}
8168 
8169 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8170 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8171 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8172 	    "flags=%x mpxio=%x\n",
8173 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8174 	    plun->lun_mpxio);
8175 
8176 	/*
8177 	 * lun_mpxio needs checking here because we can end up in a race
8178 	 * condition where this task has been dispatched while lun_mpxio is
8179 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8180 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8181 	 * the flag. We rely on the serialization of the tasks here. We return
8182 	 * NDI_SUCCESS so any callers continue without reporting spurious
8183 	 * errors, and the still think we're an MPXIO LUN.
8184 	 */
8185 
8186 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8187 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8188 		if (plun->lun_mpxio) {
8189 			rval = fcp_update_mpxio_path(plun, cip, online);
8190 		} else {
8191 			rval = NDI_SUCCESS;
8192 		}
8193 		return (rval);
8194 	}
8195 
8196 	/*
8197 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8198 	 * executing devfs_clean() if parent lock is held.
8199 	 */
8200 	ASSERT(!servicing_interrupt());
8201 	if (online == FCP_OFFLINE) {
8202 		if (plun->lun_mpxio == 0) {
8203 			if (plun->lun_cip == cip) {
8204 				cdip = DIP(plun->lun_cip);
8205 			} else {
8206 				cdip = DIP(cip);
8207 			}
8208 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8209 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8210 		} else if ((plun->lun_cip != cip) && cip) {
8211 			/*
8212 			 * This means a DTYPE/GUID change, we shall get the
8213 			 * dip of the old cip instead of the current lun_cip.
8214 			 */
8215 			cdip = mdi_pi_get_client(PIP(cip));
8216 		}
8217 		if (cdip) {
8218 			if (i_ddi_devi_attached(cdip)) {
8219 				pdip = ddi_get_parent(cdip);
8220 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8221 				ndi_devi_enter(pdip, &circ);
8222 				(void) ddi_deviname(cdip, devname);
8223 				ndi_devi_exit(pdip, circ);
8224 				/*
8225 				 * Release parent lock before calling
8226 				 * devfs_clean().
8227 				 */
8228 				rval = devfs_clean(pdip, devname + 1,
8229 				    DV_CLEAN_FORCE);
8230 				kmem_free(devname, MAXNAMELEN + 1);
8231 				/*
8232 				 * Return if devfs_clean() fails for
8233 				 * non-MPXIO case.
8234 				 * For MPXIO case, another path could be
8235 				 * offlined.
8236 				 */
8237 				if (rval && plun->lun_mpxio == 0) {
8238 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8239 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8240 					    "fcp_trigger_lun: devfs_clean "
8241 					    "failed rval=%x  dip=%p",
8242 					    rval, pdip);
8243 					return (NDI_FAILURE);
8244 				}
8245 			}
8246 		}
8247 	}
8248 
8249 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8250 		return (NDI_FAILURE);
8251 	}
8252 
8253 	if (is_mpxio) {
8254 		mdi_devi_enter(pptr->port_dip, &circ);
8255 	} else {
8256 		ndi_devi_enter(pptr->port_dip, &circ);
8257 	}
8258 
8259 	mutex_enter(&pptr->port_mutex);
8260 	mutex_enter(&plun->lun_mutex);
8261 
8262 	if (online == FCP_ONLINE) {
8263 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8264 		if (ccip == NULL) {
8265 			goto fail;
8266 		}
8267 	} else {
8268 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8269 			goto fail;
8270 		}
8271 		ccip = cip;
8272 	}
8273 
8274 	if (online == FCP_ONLINE) {
8275 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8276 		    &circ);
8277 		fc_ulp_log_device_event(pptr->port_fp_handle,
8278 		    FC_ULP_DEVICE_ONLINE);
8279 	} else {
8280 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8281 		    &circ);
8282 		fc_ulp_log_device_event(pptr->port_fp_handle,
8283 		    FC_ULP_DEVICE_OFFLINE);
8284 	}
8285 
8286 fail:	mutex_exit(&plun->lun_mutex);
8287 	mutex_exit(&pptr->port_mutex);
8288 
8289 	if (is_mpxio) {
8290 		mdi_devi_exit(pptr->port_dip, circ);
8291 	} else {
8292 		ndi_devi_exit(pptr->port_dip, circ);
8293 	}
8294 
8295 	fc_ulp_idle_port(pptr->port_fp_handle);
8296 
8297 	return (rval);
8298 }
8299 
8300 
8301 /*
8302  * take a target offline by taking all of its LUNs offline
8303  */
8304 /*ARGSUSED*/
8305 static int
8306 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8307     int link_cnt, int tgt_cnt, int nowait, int flags)
8308 {
8309 	struct fcp_tgt_elem	*elem;
8310 
8311 	ASSERT(mutex_owned(&pptr->port_mutex));
8312 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8313 
8314 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8315 
8316 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8317 	    ptgt->tgt_change_cnt)) {
8318 		mutex_exit(&ptgt->tgt_mutex);
8319 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8320 		mutex_enter(&ptgt->tgt_mutex);
8321 
8322 		return (0);
8323 	}
8324 
8325 	ptgt->tgt_pd_handle = NULL;
8326 	mutex_exit(&ptgt->tgt_mutex);
8327 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8328 	mutex_enter(&ptgt->tgt_mutex);
8329 
8330 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8331 
8332 	if (ptgt->tgt_tcap &&
8333 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8334 		elem->flags = flags;
8335 		elem->time = fcp_watchdog_time;
8336 		if (nowait == 0) {
8337 			elem->time += fcp_offline_delay;
8338 		}
8339 		elem->ptgt = ptgt;
8340 		elem->link_cnt = link_cnt;
8341 		elem->tgt_cnt = tgt_cnt;
8342 		elem->next = pptr->port_offline_tgts;
8343 		pptr->port_offline_tgts = elem;
8344 	} else {
8345 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8346 	}
8347 
8348 	return (1);
8349 }
8350 
8351 
8352 static void
8353 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8354     int link_cnt, int tgt_cnt, int flags)
8355 {
8356 	ASSERT(mutex_owned(&pptr->port_mutex));
8357 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8358 
8359 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8360 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8361 	ptgt->tgt_pd_handle = NULL;
8362 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8363 }
8364 
8365 
8366 static void
8367 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8368     int flags)
8369 {
8370 	struct	fcp_lun	*plun;
8371 
8372 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8373 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8374 
8375 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8376 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8377 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8378 		}
8379 	}
8380 }
8381 
8382 
8383 /*
8384  * take a LUN offline
8385  *
8386  * enters and leaves with the target mutex held, releasing it in the process
8387  *
8388  * allocates memory in non-sleep mode
8389  */
8390 static void
8391 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8392     int nowait, int flags)
8393 {
8394 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8395 	struct fcp_lun_elem	*elem;
8396 
8397 	ASSERT(plun != NULL);
8398 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8399 
8400 	if (nowait) {
8401 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8402 		return;
8403 	}
8404 
8405 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8406 		elem->flags = flags;
8407 		elem->time = fcp_watchdog_time;
8408 		if (nowait == 0) {
8409 			elem->time += fcp_offline_delay;
8410 		}
8411 		elem->plun = plun;
8412 		elem->link_cnt = link_cnt;
8413 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8414 		elem->next = pptr->port_offline_luns;
8415 		pptr->port_offline_luns = elem;
8416 	} else {
8417 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8418 	}
8419 }
8420 
8421 
8422 static void
8423 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8424 {
8425 	struct fcp_pkt	*head = NULL;
8426 
8427 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8428 
8429 	mutex_exit(&LUN_TGT->tgt_mutex);
8430 
8431 	head = fcp_scan_commands(plun);
8432 	if (head != NULL) {
8433 		fcp_abort_commands(head, LUN_PORT);
8434 	}
8435 
8436 	mutex_enter(&LUN_TGT->tgt_mutex);
8437 
8438 	if (plun->lun_cip && plun->lun_mpxio) {
8439 		/*
8440 		 * Intimate MPxIO lun busy is cleared
8441 		 */
8442 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8443 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8444 		    0, 0)) {
8445 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8446 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8447 			    LUN_TGT->tgt_d_id, plun->lun_num);
8448 		}
8449 		/*
8450 		 * Intimate MPxIO that the lun is now marked for offline
8451 		 */
8452 		mutex_exit(&LUN_TGT->tgt_mutex);
8453 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8454 		mutex_enter(&LUN_TGT->tgt_mutex);
8455 	}
8456 }
8457 
8458 static void
8459 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8460     int flags)
8461 {
8462 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8463 
8464 	mutex_exit(&LUN_TGT->tgt_mutex);
8465 	fcp_update_offline_flags(plun);
8466 	mutex_enter(&LUN_TGT->tgt_mutex);
8467 
8468 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8469 
8470 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8471 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8472 	    "offline_lun: passing OFFLINE elem to HP thread");
8473 
8474 	if (plun->lun_cip) {
8475 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8476 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8477 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8478 		    LUN_TGT->tgt_trace);
8479 
8480 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8481 		    link_cnt, tgt_cnt, flags, 0)) {
8482 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8483 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8484 			    LUN_TGT->tgt_d_id, plun->lun_num);
8485 		}
8486 	}
8487 }
8488 
8489 static void
8490 fcp_scan_offline_luns(struct fcp_port *pptr)
8491 {
8492 	struct fcp_lun_elem	*elem;
8493 	struct fcp_lun_elem	*prev;
8494 	struct fcp_lun_elem	*next;
8495 
8496 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8497 
8498 	prev = NULL;
8499 	elem = pptr->port_offline_luns;
8500 	while (elem) {
8501 		next = elem->next;
8502 		if (elem->time <= fcp_watchdog_time) {
8503 			int			changed = 1;
8504 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8505 
8506 			mutex_enter(&ptgt->tgt_mutex);
8507 			if (pptr->port_link_cnt == elem->link_cnt &&
8508 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8509 				changed = 0;
8510 			}
8511 
8512 			if (!changed &&
8513 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8514 				fcp_offline_lun_now(elem->plun,
8515 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8516 			}
8517 			mutex_exit(&ptgt->tgt_mutex);
8518 
8519 			kmem_free(elem, sizeof (*elem));
8520 
8521 			if (prev) {
8522 				prev->next = next;
8523 			} else {
8524 				pptr->port_offline_luns = next;
8525 			}
8526 		} else {
8527 			prev = elem;
8528 		}
8529 		elem = next;
8530 	}
8531 }
8532 
8533 
8534 static void
8535 fcp_scan_offline_tgts(struct fcp_port *pptr)
8536 {
8537 	struct fcp_tgt_elem	*elem;
8538 	struct fcp_tgt_elem	*prev;
8539 	struct fcp_tgt_elem	*next;
8540 
8541 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8542 
8543 	prev = NULL;
8544 	elem = pptr->port_offline_tgts;
8545 	while (elem) {
8546 		next = elem->next;
8547 		if (elem->time <= fcp_watchdog_time) {
8548 			int		outdated = 1;
8549 			struct fcp_tgt	*ptgt = elem->ptgt;
8550 
8551 			mutex_enter(&ptgt->tgt_mutex);
8552 
8553 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8554 				/* No change on tgt since elem was created. */
8555 				outdated = 0;
8556 			} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8557 			    pptr->port_link_cnt == elem->link_cnt + 1 &&
8558 			    ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8559 				/*
8560 				 * Exactly one thing happened to the target
8561 				 * inbetween: the local port went offline.
8562 				 * For fp the remote port is already gone so
8563 				 * it will not tell us again to offline the
8564 				 * target. We must offline it now.
8565 				 */
8566 				outdated = 0;
8567 			}
8568 
8569 			if (!outdated && !(ptgt->tgt_state &
8570 			    FCP_TGT_OFFLINE)) {
8571 				fcp_offline_target_now(pptr,
8572 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8573 				    elem->flags);
8574 			}
8575 
8576 			mutex_exit(&ptgt->tgt_mutex);
8577 
8578 			kmem_free(elem, sizeof (*elem));
8579 
8580 			if (prev) {
8581 				prev->next = next;
8582 			} else {
8583 				pptr->port_offline_tgts = next;
8584 			}
8585 		} else {
8586 			prev = elem;
8587 		}
8588 		elem = next;
8589 	}
8590 }
8591 
8592 
8593 static void
8594 fcp_update_offline_flags(struct fcp_lun *plun)
8595 {
8596 	struct fcp_port	*pptr = LUN_PORT;
8597 	ASSERT(plun != NULL);
8598 
8599 	mutex_enter(&LUN_TGT->tgt_mutex);
8600 	plun->lun_state |= FCP_LUN_OFFLINE;
8601 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8602 
8603 	mutex_enter(&plun->lun_mutex);
8604 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8605 		dev_info_t *cdip = NULL;
8606 
8607 		mutex_exit(&LUN_TGT->tgt_mutex);
8608 
8609 		if (plun->lun_mpxio == 0) {
8610 			cdip = DIP(plun->lun_cip);
8611 		} else if (plun->lun_cip) {
8612 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8613 		}
8614 
8615 		mutex_exit(&plun->lun_mutex);
8616 		if (cdip) {
8617 			(void) ndi_event_retrieve_cookie(
8618 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8619 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8620 			(void) ndi_event_run_callbacks(
8621 			    pptr->port_ndi_event_hdl, cdip,
8622 			    fcp_remove_eid, NULL);
8623 		}
8624 	} else {
8625 		mutex_exit(&plun->lun_mutex);
8626 		mutex_exit(&LUN_TGT->tgt_mutex);
8627 	}
8628 }
8629 
8630 
8631 /*
8632  * Scan all of the command pkts for this port, moving pkts that
8633  * match our LUN onto our own list (headed by "head")
8634  */
8635 static struct fcp_pkt *
8636 fcp_scan_commands(struct fcp_lun *plun)
8637 {
8638 	struct fcp_port	*pptr = LUN_PORT;
8639 
8640 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8641 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8642 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8643 
8644 	struct fcp_pkt	*head = NULL;	/* head of our list */
8645 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8646 
8647 	int			cmds_found = 0;
8648 
8649 	mutex_enter(&pptr->port_pkt_mutex);
8650 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8651 		struct fcp_lun *tlun =
8652 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8653 
8654 		ncmd = cmd->cmd_next;	/* set next command */
8655 
8656 		/*
8657 		 * if this pkt is for a different LUN  or the
8658 		 * command is sent down, skip it.
8659 		 */
8660 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8661 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8662 			pcmd = cmd;
8663 			continue;
8664 		}
8665 		cmds_found++;
8666 		if (pcmd != NULL) {
8667 			ASSERT(pptr->port_pkt_head != cmd);
8668 			pcmd->cmd_next = cmd->cmd_next;
8669 		} else {
8670 			ASSERT(cmd == pptr->port_pkt_head);
8671 			pptr->port_pkt_head = cmd->cmd_next;
8672 		}
8673 
8674 		if (cmd == pptr->port_pkt_tail) {
8675 			pptr->port_pkt_tail = pcmd;
8676 			if (pcmd) {
8677 				pcmd->cmd_next = NULL;
8678 			}
8679 		}
8680 
8681 		if (head == NULL) {
8682 			head = tail = cmd;
8683 		} else {
8684 			ASSERT(tail != NULL);
8685 
8686 			tail->cmd_next = cmd;
8687 			tail = cmd;
8688 		}
8689 		cmd->cmd_next = NULL;
8690 	}
8691 	mutex_exit(&pptr->port_pkt_mutex);
8692 
8693 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8694 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8695 	    "scan commands: %d cmd(s) found", cmds_found);
8696 
8697 	return (head);
8698 }
8699 
8700 
8701 /*
8702  * Abort all the commands in the command queue
8703  */
8704 static void
8705 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8706 {
8707 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8708 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8709 
8710 	ASSERT(mutex_owned(&pptr->port_mutex));
8711 
8712 	/* scan through the pkts and invalid them */
8713 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8714 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8715 
8716 		ncmd = cmd->cmd_next;
8717 		ASSERT(pkt != NULL);
8718 
8719 		/*
8720 		 * The lun is going to be marked offline. Indicate
8721 		 * the target driver not to requeue or retry this command
8722 		 * as the device is going to be offlined pretty soon.
8723 		 */
8724 		pkt->pkt_reason = CMD_DEV_GONE;
8725 		pkt->pkt_statistics = 0;
8726 		pkt->pkt_state = 0;
8727 
8728 		/* reset cmd flags/state */
8729 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8730 		cmd->cmd_state = FCP_PKT_IDLE;
8731 
8732 		/*
8733 		 * ensure we have a packet completion routine,
8734 		 * then call it.
8735 		 */
8736 		ASSERT(pkt->pkt_comp != NULL);
8737 
8738 		mutex_exit(&pptr->port_mutex);
8739 		fcp_post_callback(cmd);
8740 		mutex_enter(&pptr->port_mutex);
8741 	}
8742 }
8743 
8744 
8745 /*
8746  * the pkt_comp callback for command packets
8747  */
8748 static void
8749 fcp_cmd_callback(fc_packet_t *fpkt)
8750 {
8751 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8752 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8753 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8754 
8755 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8756 
8757 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8758 		cmn_err(CE_PANIC, "Packet already completed %p",
8759 		    (void *)cmd);
8760 	}
8761 
8762 	/*
8763 	 * Watch thread should be freeing the packet, ignore the pkt.
8764 	 */
8765 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8766 		fcp_log(CE_CONT, pptr->port_dip,
8767 		    "!FCP: Pkt completed while aborting\n");
8768 		return;
8769 	}
8770 	cmd->cmd_state = FCP_PKT_IDLE;
8771 
8772 	fcp_complete_pkt(fpkt);
8773 
8774 #ifdef	DEBUG
8775 	mutex_enter(&pptr->port_pkt_mutex);
8776 	pptr->port_npkts--;
8777 	mutex_exit(&pptr->port_pkt_mutex);
8778 #endif /* DEBUG */
8779 
8780 	fcp_post_callback(cmd);
8781 }
8782 
8783 
8784 static void
8785 fcp_complete_pkt(fc_packet_t *fpkt)
8786 {
8787 	int			error = 0;
8788 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8789 	    fpkt->pkt_ulp_private;
8790 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8791 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8792 	struct fcp_lun	*plun;
8793 	struct fcp_tgt	*ptgt;
8794 	struct fcp_rsp		*rsp;
8795 	struct scsi_address	save;
8796 
8797 #ifdef	DEBUG
8798 	save = pkt->pkt_address;
8799 #endif /* DEBUG */
8800 
8801 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8802 
8803 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8804 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8805 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8806 			    sizeof (struct fcp_rsp));
8807 		}
8808 
8809 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8810 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8811 
8812 		pkt->pkt_resid = 0;
8813 
8814 		if (fpkt->pkt_datalen) {
8815 			pkt->pkt_state |= STATE_XFERRED_DATA;
8816 			if (fpkt->pkt_data_resid) {
8817 				error++;
8818 			}
8819 		}
8820 
8821 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8822 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8823 			/*
8824 			 * The next two checks make sure that if there
8825 			 * is no sense data or a valid response and
8826 			 * the command came back with check condition,
8827 			 * the command should be retried.
8828 			 */
8829 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8830 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8831 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8832 				pkt->pkt_resid = cmd->cmd_dmacount;
8833 			}
8834 		}
8835 
8836 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8837 			return;
8838 		}
8839 
8840 		plun = ADDR2LUN(&pkt->pkt_address);
8841 		ptgt = plun->lun_tgt;
8842 		ASSERT(ptgt != NULL);
8843 
8844 		/*
8845 		 * Update the transfer resid, if appropriate
8846 		 */
8847 		if (rsp->fcp_u.fcp_status.resid_over ||
8848 		    rsp->fcp_u.fcp_status.resid_under) {
8849 			pkt->pkt_resid = rsp->fcp_resid;
8850 		}
8851 
8852 		/*
8853 		 * First see if we got a FCP protocol error.
8854 		 */
8855 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8856 			struct fcp_rsp_info	*bep;
8857 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8858 			    sizeof (struct fcp_rsp));
8859 
8860 			if (fcp_validate_fcp_response(rsp, pptr) !=
8861 			    FC_SUCCESS) {
8862 				pkt->pkt_reason = CMD_CMPLT;
8863 				*(pkt->pkt_scbp) = STATUS_CHECK;
8864 
8865 				fcp_log(CE_WARN, pptr->port_dip,
8866 				    "!SCSI command to d_id=0x%x lun=0x%x"
8867 				    " failed, Bad FCP response values:"
8868 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8869 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8870 				    ptgt->tgt_d_id, plun->lun_num,
8871 				    rsp->reserved_0, rsp->reserved_1,
8872 				    rsp->fcp_u.fcp_status.reserved_0,
8873 				    rsp->fcp_u.fcp_status.reserved_1,
8874 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8875 
8876 				return;
8877 			}
8878 
8879 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8880 				FCP_CP_IN(fpkt->pkt_resp +
8881 				    sizeof (struct fcp_rsp), bep,
8882 				    fpkt->pkt_resp_acc,
8883 				    sizeof (struct fcp_rsp_info));
8884 			}
8885 
8886 			if (bep->rsp_code != FCP_NO_FAILURE) {
8887 				child_info_t	*cip;
8888 
8889 				pkt->pkt_reason = CMD_TRAN_ERR;
8890 
8891 				mutex_enter(&plun->lun_mutex);
8892 				cip = plun->lun_cip;
8893 				mutex_exit(&plun->lun_mutex);
8894 
8895 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8896 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8897 				    "FCP response error on cmd=%p"
8898 				    " target=0x%x, cip=%p", cmd,
8899 				    ptgt->tgt_d_id, cip);
8900 			}
8901 		}
8902 
8903 		/*
8904 		 * See if we got a SCSI error with sense data
8905 		 */
8906 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8907 			uchar_t				rqlen;
8908 			caddr_t				sense_from;
8909 			child_info_t			*cip;
8910 			timeout_id_t			tid;
8911 			struct scsi_arq_status		*arq;
8912 			struct scsi_extended_sense	*sense_to;
8913 
8914 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8915 			sense_to = &arq->sts_sensedata;
8916 
8917 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8918 			    sizeof (struct scsi_extended_sense));
8919 
8920 			sense_from = (caddr_t)fpkt->pkt_resp +
8921 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8922 
8923 			if (fcp_validate_fcp_response(rsp, pptr) !=
8924 			    FC_SUCCESS) {
8925 				pkt->pkt_reason = CMD_CMPLT;
8926 				*(pkt->pkt_scbp) = STATUS_CHECK;
8927 
8928 				fcp_log(CE_WARN, pptr->port_dip,
8929 				    "!SCSI command to d_id=0x%x lun=0x%x"
8930 				    " failed, Bad FCP response values:"
8931 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8932 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8933 				    ptgt->tgt_d_id, plun->lun_num,
8934 				    rsp->reserved_0, rsp->reserved_1,
8935 				    rsp->fcp_u.fcp_status.reserved_0,
8936 				    rsp->fcp_u.fcp_status.reserved_1,
8937 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8938 
8939 				return;
8940 			}
8941 
8942 			/*
8943 			 * copy in sense information
8944 			 */
8945 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8946 				FCP_CP_IN(sense_from, sense_to,
8947 				    fpkt->pkt_resp_acc, rqlen);
8948 			} else {
8949 				bcopy(sense_from, sense_to, rqlen);
8950 			}
8951 
8952 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8953 			    (FCP_SENSE_NO_LUN(sense_to))) {
8954 				mutex_enter(&ptgt->tgt_mutex);
8955 				if (ptgt->tgt_tid == NULL) {
8956 					/*
8957 					 * Kick off rediscovery
8958 					 */
8959 					tid = timeout(fcp_reconfigure_luns,
8960 					    (caddr_t)ptgt, drv_usectohz(1));
8961 
8962 					ptgt->tgt_tid = tid;
8963 					ptgt->tgt_state |= FCP_TGT_BUSY;
8964 				}
8965 				mutex_exit(&ptgt->tgt_mutex);
8966 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8967 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8968 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8969 					    "!FCP: Report Lun Has Changed"
8970 					    " target=%x", ptgt->tgt_d_id);
8971 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8972 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8973 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8974 					    "!FCP: LU Not Supported"
8975 					    " target=%x", ptgt->tgt_d_id);
8976 				}
8977 			}
8978 			ASSERT(pkt->pkt_scbp != NULL);
8979 
8980 			pkt->pkt_state |= STATE_ARQ_DONE;
8981 
8982 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8983 
8984 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8985 			arq->sts_rqpkt_reason = 0;
8986 			arq->sts_rqpkt_statistics = 0;
8987 
8988 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8989 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8990 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8991 			    STATE_XFERRED_DATA;
8992 
8993 			mutex_enter(&plun->lun_mutex);
8994 			cip = plun->lun_cip;
8995 			mutex_exit(&plun->lun_mutex);
8996 
8997 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8998 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8999 			    "SCSI Check condition on cmd=%p target=0x%x"
9000 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
9001 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9002 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
9003 			    rsp->fcp_u.fcp_status.scsi_status,
9004 			    sense_to->es_key, sense_to->es_add_code,
9005 			    sense_to->es_qual_code);
9006 		}
9007 	} else {
9008 		plun = ADDR2LUN(&pkt->pkt_address);
9009 		ptgt = plun->lun_tgt;
9010 		ASSERT(ptgt != NULL);
9011 
9012 		/*
9013 		 * Work harder to translate errors into target driver
9014 		 * understandable ones. Note with despair that the target
9015 		 * drivers don't decode pkt_state and pkt_reason exhaustively
9016 		 * They resort to using the big hammer most often, which
9017 		 * may not get fixed in the life time of this driver.
9018 		 */
9019 		pkt->pkt_state = 0;
9020 		pkt->pkt_statistics = 0;
9021 
9022 		switch (fpkt->pkt_state) {
9023 		case FC_PKT_TRAN_ERROR:
9024 			switch (fpkt->pkt_reason) {
9025 			case FC_REASON_OVERRUN:
9026 				pkt->pkt_reason = CMD_CMD_OVR;
9027 				pkt->pkt_statistics |= STAT_ABORTED;
9028 				break;
9029 
9030 			case FC_REASON_XCHG_BSY: {
9031 				caddr_t ptr;
9032 
9033 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9034 
9035 				ptr = (caddr_t)pkt->pkt_scbp;
9036 				if (ptr) {
9037 					*ptr = STATUS_BUSY;
9038 				}
9039 				break;
9040 			}
9041 
9042 			case FC_REASON_ABORTED:
9043 				pkt->pkt_reason = CMD_TRAN_ERR;
9044 				pkt->pkt_statistics |= STAT_ABORTED;
9045 				break;
9046 
9047 			case FC_REASON_ABORT_FAILED:
9048 				pkt->pkt_reason = CMD_ABORT_FAIL;
9049 				break;
9050 
9051 			case FC_REASON_NO_SEQ_INIT:
9052 			case FC_REASON_CRC_ERROR:
9053 				pkt->pkt_reason = CMD_TRAN_ERR;
9054 				pkt->pkt_statistics |= STAT_ABORTED;
9055 				break;
9056 			default:
9057 				pkt->pkt_reason = CMD_TRAN_ERR;
9058 				break;
9059 			}
9060 			break;
9061 
9062 		case FC_PKT_PORT_OFFLINE: {
9063 			dev_info_t	*cdip = NULL;
9064 			caddr_t		ptr;
9065 
9066 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9067 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9068 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
9069 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9070 				    ptgt->tgt_d_id);
9071 			}
9072 
9073 			mutex_enter(&plun->lun_mutex);
9074 			if (plun->lun_mpxio == 0) {
9075 				cdip = DIP(plun->lun_cip);
9076 			} else if (plun->lun_cip) {
9077 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9078 			}
9079 
9080 			mutex_exit(&plun->lun_mutex);
9081 
9082 			if (cdip) {
9083 				(void) ndi_event_retrieve_cookie(
9084 				    pptr->port_ndi_event_hdl, cdip,
9085 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9086 				    NDI_EVENT_NOPASS);
9087 				(void) ndi_event_run_callbacks(
9088 				    pptr->port_ndi_event_hdl, cdip,
9089 				    fcp_remove_eid, NULL);
9090 			}
9091 
9092 			/*
9093 			 * If the link goes off-line for a lip,
9094 			 * this will cause a error to the ST SG
9095 			 * SGEN drivers. By setting BUSY we will
9096 			 * give the drivers the chance to retry
9097 			 * before it blows of the job. ST will
9098 			 * remember how many times it has retried.
9099 			 */
9100 
9101 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9102 			    (plun->lun_type == DTYPE_CHANGER)) {
9103 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9104 				ptr = (caddr_t)pkt->pkt_scbp;
9105 				if (ptr) {
9106 					*ptr = STATUS_BUSY;
9107 				}
9108 			} else {
9109 				pkt->pkt_reason = CMD_TRAN_ERR;
9110 				pkt->pkt_statistics |= STAT_BUS_RESET;
9111 			}
9112 			break;
9113 		}
9114 
9115 		case FC_PKT_TRAN_BSY:
9116 			/*
9117 			 * Use the ssd Qfull handling here.
9118 			 */
9119 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9120 			pkt->pkt_state = STATE_GOT_BUS;
9121 			break;
9122 
9123 		case FC_PKT_TIMEOUT:
9124 			pkt->pkt_reason = CMD_TIMEOUT;
9125 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9126 				pkt->pkt_statistics |= STAT_TIMEOUT;
9127 			} else {
9128 				pkt->pkt_statistics |= STAT_ABORTED;
9129 			}
9130 			break;
9131 
9132 		case FC_PKT_LOCAL_RJT:
9133 			switch (fpkt->pkt_reason) {
9134 			case FC_REASON_OFFLINE: {
9135 				dev_info_t	*cdip = NULL;
9136 
9137 				mutex_enter(&plun->lun_mutex);
9138 				if (plun->lun_mpxio == 0) {
9139 					cdip = DIP(plun->lun_cip);
9140 				} else if (plun->lun_cip) {
9141 					cdip = mdi_pi_get_client(
9142 					    PIP(plun->lun_cip));
9143 				}
9144 				mutex_exit(&plun->lun_mutex);
9145 
9146 				if (cdip) {
9147 					(void) ndi_event_retrieve_cookie(
9148 					    pptr->port_ndi_event_hdl, cdip,
9149 					    FCAL_REMOVE_EVENT,
9150 					    &fcp_remove_eid,
9151 					    NDI_EVENT_NOPASS);
9152 					(void) ndi_event_run_callbacks(
9153 					    pptr->port_ndi_event_hdl,
9154 					    cdip, fcp_remove_eid, NULL);
9155 				}
9156 
9157 				pkt->pkt_reason = CMD_TRAN_ERR;
9158 				pkt->pkt_statistics |= STAT_BUS_RESET;
9159 
9160 				break;
9161 			}
9162 
9163 			case FC_REASON_NOMEM:
9164 			case FC_REASON_QFULL: {
9165 				caddr_t ptr;
9166 
9167 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9168 				ptr = (caddr_t)pkt->pkt_scbp;
9169 				if (ptr) {
9170 					*ptr = STATUS_BUSY;
9171 				}
9172 				break;
9173 			}
9174 
9175 			case FC_REASON_DMA_ERROR:
9176 				pkt->pkt_reason = CMD_DMA_DERR;
9177 				pkt->pkt_statistics |= STAT_ABORTED;
9178 				break;
9179 
9180 			case FC_REASON_CRC_ERROR:
9181 			case FC_REASON_UNDERRUN: {
9182 				uchar_t		status;
9183 				/*
9184 				 * Work around for Bugid: 4240945.
9185 				 * IB on A5k doesn't set the Underrun bit
9186 				 * in the fcp status, when it is transferring
9187 				 * less than requested amount of data. Work
9188 				 * around the ses problem to keep luxadm
9189 				 * happy till ibfirmware is fixed.
9190 				 */
9191 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9192 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9193 					    fpkt->pkt_resp_acc,
9194 					    sizeof (struct fcp_rsp));
9195 				}
9196 				status = rsp->fcp_u.fcp_status.scsi_status;
9197 				if (((plun->lun_type & DTYPE_MASK) ==
9198 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9199 					pkt->pkt_reason = CMD_CMPLT;
9200 					*pkt->pkt_scbp = status;
9201 					pkt->pkt_resid = 0;
9202 				} else {
9203 					pkt->pkt_reason = CMD_TRAN_ERR;
9204 					pkt->pkt_statistics |= STAT_ABORTED;
9205 				}
9206 				break;
9207 			}
9208 
9209 			case FC_REASON_NO_CONNECTION:
9210 			case FC_REASON_UNSUPPORTED:
9211 			case FC_REASON_ILLEGAL_REQ:
9212 			case FC_REASON_BAD_SID:
9213 			case FC_REASON_DIAG_BUSY:
9214 			case FC_REASON_FCAL_OPN_FAIL:
9215 			case FC_REASON_BAD_XID:
9216 			default:
9217 				pkt->pkt_reason = CMD_TRAN_ERR;
9218 				pkt->pkt_statistics |= STAT_ABORTED;
9219 				break;
9220 
9221 			}
9222 			break;
9223 
9224 		case FC_PKT_NPORT_RJT:
9225 		case FC_PKT_FABRIC_RJT:
9226 		case FC_PKT_NPORT_BSY:
9227 		case FC_PKT_FABRIC_BSY:
9228 		default:
9229 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9230 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9231 			    "FC Status 0x%x, reason 0x%x",
9232 			    fpkt->pkt_state, fpkt->pkt_reason);
9233 			pkt->pkt_reason = CMD_TRAN_ERR;
9234 			pkt->pkt_statistics |= STAT_ABORTED;
9235 			break;
9236 		}
9237 
9238 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9239 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9240 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9241 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9242 		    fpkt->pkt_reason);
9243 	}
9244 
9245 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9246 }
9247 
9248 
9249 static int
9250 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9251 {
9252 	if (rsp->reserved_0 || rsp->reserved_1 ||
9253 	    rsp->fcp_u.fcp_status.reserved_0 ||
9254 	    rsp->fcp_u.fcp_status.reserved_1) {
9255 		/*
9256 		 * These reserved fields should ideally be zero. FCP-2 does say
9257 		 * that the recipient need not check for reserved fields to be
9258 		 * zero. If they are not zero, we will not make a fuss about it
9259 		 * - just log it (in debug to both trace buffer and messages
9260 		 * file and to trace buffer only in non-debug) and move on.
9261 		 *
9262 		 * Non-zero reserved fields were seen with minnows.
9263 		 *
9264 		 * qlc takes care of some of this but we cannot assume that all
9265 		 * FCAs will do so.
9266 		 */
9267 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9268 		    FCP_BUF_LEVEL_5, 0,
9269 		    "Got fcp response packet with non-zero reserved fields "
9270 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9271 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9272 		    rsp->reserved_0, rsp->reserved_1,
9273 		    rsp->fcp_u.fcp_status.reserved_0,
9274 		    rsp->fcp_u.fcp_status.reserved_1);
9275 	}
9276 
9277 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9278 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9279 		return (FC_FAILURE);
9280 	}
9281 
9282 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9283 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9284 	    sizeof (struct fcp_rsp))) {
9285 		return (FC_FAILURE);
9286 	}
9287 
9288 	return (FC_SUCCESS);
9289 }
9290 
9291 
9292 /*
9293  * This is called when there is a change the in device state. The case we're
9294  * handling here is, if the d_id s does not match, offline this tgt and online
9295  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9296  * port_mutex held.
9297  */
9298 static int
9299 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9300     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9301 {
9302 	ASSERT(mutex_owned(&pptr->port_mutex));
9303 
9304 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9305 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9306 	    "Starting fcp_device_changed...");
9307 
9308 	/*
9309 	 * The two cases where the port_device_changed is called is
9310 	 * either it changes it's d_id or it's hard address.
9311 	 */
9312 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9313 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9314 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9315 
9316 		/* offline this target */
9317 		mutex_enter(&ptgt->tgt_mutex);
9318 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9319 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9320 			    0, 1, NDI_DEVI_REMOVE);
9321 		}
9322 		mutex_exit(&ptgt->tgt_mutex);
9323 
9324 		fcp_log(CE_NOTE, pptr->port_dip,
9325 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9326 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9327 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9328 		    map_entry->map_hard_addr.hard_addr);
9329 	}
9330 
9331 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9332 	    link_cnt, tgt_cnt, cause));
9333 }
9334 
9335 /*
9336  *     Function: fcp_alloc_lun
9337  *
9338  *  Description: Creates a new lun structure and adds it to the list
9339  *		 of luns of the target.
9340  *
9341  *     Argument: ptgt		Target the lun will belong to.
9342  *
9343  * Return Value: NULL		Failed
9344  *		 Not NULL	Succeeded
9345  *
9346  *	Context: Kernel context
9347  */
9348 static struct fcp_lun *
9349 fcp_alloc_lun(struct fcp_tgt *ptgt)
9350 {
9351 	struct fcp_lun *plun;
9352 
9353 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9354 	if (plun != NULL) {
9355 		/*
9356 		 * Initialize the mutex before putting in the target list
9357 		 * especially before releasing the target mutex.
9358 		 */
9359 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9360 		plun->lun_tgt = ptgt;
9361 
9362 		mutex_enter(&ptgt->tgt_mutex);
9363 		plun->lun_next = ptgt->tgt_lun;
9364 		ptgt->tgt_lun = plun;
9365 		plun->lun_old_guid = NULL;
9366 		plun->lun_old_guid_size = 0;
9367 		mutex_exit(&ptgt->tgt_mutex);
9368 	}
9369 
9370 	return (plun);
9371 }
9372 
9373 /*
9374  *     Function: fcp_dealloc_lun
9375  *
9376  *  Description: Frees the LUN structure passed by the caller.
9377  *
9378  *     Argument: plun		LUN structure to free.
9379  *
9380  * Return Value: None
9381  *
9382  *	Context: Kernel context.
9383  */
9384 static void
9385 fcp_dealloc_lun(struct fcp_lun *plun)
9386 {
9387 	mutex_enter(&plun->lun_mutex);
9388 	if (plun->lun_cip) {
9389 		fcp_remove_child(plun);
9390 	}
9391 	mutex_exit(&plun->lun_mutex);
9392 
9393 	mutex_destroy(&plun->lun_mutex);
9394 	if (plun->lun_guid) {
9395 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9396 	}
9397 	if (plun->lun_old_guid) {
9398 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9399 	}
9400 	kmem_free(plun, sizeof (*plun));
9401 }
9402 
9403 /*
9404  *     Function: fcp_alloc_tgt
9405  *
9406  *  Description: Creates a new target structure and adds it to the port
9407  *		 hash list.
9408  *
9409  *     Argument: pptr		fcp port structure
9410  *		 *map_entry	entry describing the target to create
9411  *		 link_cnt	Link state change counter
9412  *
9413  * Return Value: NULL		Failed
9414  *		 Not NULL	Succeeded
9415  *
9416  *	Context: Kernel context.
9417  */
9418 static struct fcp_tgt *
9419 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9420 {
9421 	int			hash;
9422 	uchar_t			*wwn;
9423 	struct fcp_tgt	*ptgt;
9424 
9425 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9426 	if (ptgt != NULL) {
9427 		mutex_enter(&pptr->port_mutex);
9428 		if (link_cnt != pptr->port_link_cnt) {
9429 			/*
9430 			 * oh oh -- another link reset
9431 			 * in progress -- give up
9432 			 */
9433 			mutex_exit(&pptr->port_mutex);
9434 			kmem_free(ptgt, sizeof (*ptgt));
9435 			ptgt = NULL;
9436 		} else {
9437 			/*
9438 			 * initialize the mutex before putting in the port
9439 			 * wwn list, especially before releasing the port
9440 			 * mutex.
9441 			 */
9442 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9443 
9444 			/* add new target entry to the port's hash list */
9445 			wwn = (uchar_t *)&map_entry->map_pwwn;
9446 			hash = FCP_HASH(wwn);
9447 
9448 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9449 			pptr->port_tgt_hash_table[hash] = ptgt;
9450 
9451 			/* save cross-ptr */
9452 			ptgt->tgt_port = pptr;
9453 
9454 			ptgt->tgt_change_cnt = 1;
9455 
9456 			/* initialize the target manual_config_only flag */
9457 			if (fcp_enable_auto_configuration) {
9458 				ptgt->tgt_manual_config_only = 0;
9459 			} else {
9460 				ptgt->tgt_manual_config_only = 1;
9461 			}
9462 
9463 			mutex_exit(&pptr->port_mutex);
9464 		}
9465 	}
9466 
9467 	return (ptgt);
9468 }
9469 
9470 /*
9471  *     Function: fcp_dealloc_tgt
9472  *
9473  *  Description: Frees the target structure passed by the caller.
9474  *
9475  *     Argument: ptgt		Target structure to free.
9476  *
9477  * Return Value: None
9478  *
9479  *	Context: Kernel context.
9480  */
9481 static void
9482 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9483 {
9484 	mutex_destroy(&ptgt->tgt_mutex);
9485 	kmem_free(ptgt, sizeof (*ptgt));
9486 }
9487 
9488 
9489 /*
9490  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9491  *
9492  *	Device discovery commands will not be retried for-ever as
9493  *	this will have repercussions on other devices that need to
9494  *	be submitted to the hotplug thread. After a quick glance
9495  *	at the SCSI-3 spec, it was found that the spec doesn't
9496  *	mandate a forever retry, rather recommends a delayed retry.
9497  *
9498  *	Since Photon IB is single threaded, STATUS_BUSY is common
9499  *	in a 4+initiator environment. Make sure the total time
9500  *	spent on retries (including command timeout) does not
9501  *	60 seconds
9502  */
9503 static void
9504 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9505 {
9506 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9507 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9508 
9509 	mutex_enter(&pptr->port_mutex);
9510 	mutex_enter(&ptgt->tgt_mutex);
9511 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9512 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9513 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9514 		    "fcp_queue_ipkt,1:state change occured"
9515 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9516 		mutex_exit(&ptgt->tgt_mutex);
9517 		mutex_exit(&pptr->port_mutex);
9518 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9519 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9520 		fcp_icmd_free(pptr, icmd);
9521 		return;
9522 	}
9523 	mutex_exit(&ptgt->tgt_mutex);
9524 
9525 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9526 
9527 	if (pptr->port_ipkt_list != NULL) {
9528 		/* add pkt to front of doubly-linked list */
9529 		pptr->port_ipkt_list->ipkt_prev = icmd;
9530 		icmd->ipkt_next = pptr->port_ipkt_list;
9531 		pptr->port_ipkt_list = icmd;
9532 		icmd->ipkt_prev = NULL;
9533 	} else {
9534 		/* this is the first/only pkt on the list */
9535 		pptr->port_ipkt_list = icmd;
9536 		icmd->ipkt_next = NULL;
9537 		icmd->ipkt_prev = NULL;
9538 	}
9539 	mutex_exit(&pptr->port_mutex);
9540 }
9541 
9542 /*
9543  *     Function: fcp_transport
9544  *
9545  *  Description: This function submits the Fibre Channel packet to the transort
9546  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9547  *		 fails the submission, the treatment depends on the value of
9548  *		 the variable internal.
9549  *
9550  *     Argument: port_handle	fp/fctl port handle.
9551  *		 *fpkt		Packet to submit to the transport layer.
9552  *		 internal	Not zero when it's an internal packet.
9553  *
9554  * Return Value: FC_TRAN_BUSY
9555  *		 FC_STATEC_BUSY
9556  *		 FC_OFFLINE
9557  *		 FC_LOGINREQ
9558  *		 FC_DEVICE_BUSY
9559  *		 FC_SUCCESS
9560  */
9561 static int
9562 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9563 {
9564 	int	rval;
9565 
9566 	rval = fc_ulp_transport(port_handle, fpkt);
9567 	if (rval == FC_SUCCESS) {
9568 		return (rval);
9569 	}
9570 
9571 	/*
9572 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9573 	 * a command, if the underlying modules see that there is a state
9574 	 * change, or if a port is OFFLINE, that means, that state change
9575 	 * hasn't reached FCP yet, so re-queue the command for deferred
9576 	 * submission.
9577 	 */
9578 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9579 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9580 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9581 		/*
9582 		 * Defer packet re-submission. Life hang is possible on
9583 		 * internal commands if the port driver sends FC_STATEC_BUSY
9584 		 * for ever, but that shouldn't happen in a good environment.
9585 		 * Limiting re-transport for internal commands is probably a
9586 		 * good idea..
9587 		 * A race condition can happen when a port sees barrage of
9588 		 * link transitions offline to online. If the FCTL has
9589 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9590 		 * internal commands should be queued to do the discovery.
9591 		 * The race condition is when an online comes and FCP starts
9592 		 * its internal discovery and the link goes offline. It is
9593 		 * possible that the statec_callback has not reached FCP
9594 		 * and FCP is carrying on with its internal discovery.
9595 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9596 		 * that the link has gone offline. At this point FCP should
9597 		 * drop all the internal commands and wait for the
9598 		 * statec_callback. It will be facilitated by incrementing
9599 		 * port_link_cnt.
9600 		 *
9601 		 * For external commands, the (FC)pkt_timeout is decremented
9602 		 * by the QUEUE Delay added by our driver, Care is taken to
9603 		 * ensure that it doesn't become zero (zero means no timeout)
9604 		 * If the time expires right inside driver queue itself,
9605 		 * the watch thread will return it to the original caller
9606 		 * indicating that the command has timed-out.
9607 		 */
9608 		if (internal) {
9609 			char			*op;
9610 			struct fcp_ipkt	*icmd;
9611 
9612 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9613 			switch (icmd->ipkt_opcode) {
9614 			case SCMD_REPORT_LUN:
9615 				op = "REPORT LUN";
9616 				break;
9617 
9618 			case SCMD_INQUIRY:
9619 				op = "INQUIRY";
9620 				break;
9621 
9622 			case SCMD_INQUIRY_PAGE83:
9623 				op = "INQUIRY-83";
9624 				break;
9625 
9626 			default:
9627 				op = "Internal SCSI COMMAND";
9628 				break;
9629 			}
9630 
9631 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9632 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9633 				rval = FC_SUCCESS;
9634 			}
9635 		} else {
9636 			struct fcp_pkt *cmd;
9637 			struct fcp_port *pptr;
9638 
9639 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9640 			cmd->cmd_state = FCP_PKT_IDLE;
9641 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9642 
9643 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9644 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9645 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9646 				    "fcp_transport: xport busy for pkt %p",
9647 				    cmd->cmd_pkt);
9648 				rval = FC_TRAN_BUSY;
9649 			} else {
9650 				fcp_queue_pkt(pptr, cmd);
9651 				rval = FC_SUCCESS;
9652 			}
9653 		}
9654 	}
9655 
9656 	return (rval);
9657 }
9658 
9659 /*VARARGS3*/
9660 static void
9661 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9662 {
9663 	char		buf[256];
9664 	va_list		ap;
9665 
9666 	if (dip == NULL) {
9667 		dip = fcp_global_dip;
9668 	}
9669 
9670 	va_start(ap, fmt);
9671 	(void) vsprintf(buf, fmt, ap);
9672 	va_end(ap);
9673 
9674 	scsi_log(dip, "fcp", level, buf);
9675 }
9676 
9677 /*
9678  * This function retries NS registry of FC4 type.
9679  * It assumes that fcp_mutex is held.
9680  * The function does nothing if topology is not fabric
9681  * So, the topology has to be set before this function can be called
9682  */
9683 static void
9684 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9685 {
9686 	int	rval;
9687 
9688 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9689 
9690 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9691 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9692 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9693 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9694 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9695 		}
9696 		return;
9697 	}
9698 	mutex_exit(&pptr->port_mutex);
9699 	rval = fcp_do_ns_registry(pptr, s_id);
9700 	mutex_enter(&pptr->port_mutex);
9701 
9702 	if (rval == 0) {
9703 		/* Registry successful. Reset flag */
9704 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9705 	}
9706 }
9707 
9708 /*
9709  * This function registers the ULP with the switch by calling transport i/f
9710  */
9711 static int
9712 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9713 {
9714 	fc_ns_cmd_t		ns_cmd;
9715 	ns_rfc_type_t		rfc;
9716 	uint32_t		types[8];
9717 
9718 	/*
9719 	 * Prepare the Name server structure to
9720 	 * register with the transport in case of
9721 	 * Fabric configuration.
9722 	 */
9723 	bzero(&rfc, sizeof (rfc));
9724 	bzero(types, sizeof (types));
9725 
9726 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9727 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9728 
9729 	rfc.rfc_port_id.port_id = s_id;
9730 	bcopy(types, rfc.rfc_types, sizeof (types));
9731 
9732 	ns_cmd.ns_flags = 0;
9733 	ns_cmd.ns_cmd = NS_RFT_ID;
9734 	ns_cmd.ns_req_len = sizeof (rfc);
9735 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9736 	ns_cmd.ns_resp_len = 0;
9737 	ns_cmd.ns_resp_payload = NULL;
9738 
9739 	/*
9740 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9741 	 */
9742 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9743 		fcp_log(CE_WARN, pptr->port_dip,
9744 		    "!ns_registry: failed name server registration");
9745 		return (1);
9746 	}
9747 
9748 	return (0);
9749 }
9750 
9751 /*
9752  *     Function: fcp_handle_port_attach
9753  *
9754  *  Description: This function is called from fcp_port_attach() to attach a
9755  *		 new port. This routine does the following:
9756  *
9757  *		1) Allocates an fcp_port structure and initializes it.
9758  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9759  *		   server.
9760  *		3) Kicks off the enumeration of the targets/luns visible
9761  *		   through this new port.  That is done by calling
9762  *		   fcp_statec_callback() if the port is online.
9763  *
9764  *     Argument: ulph		fp/fctl port handle.
9765  *		 *pinfo		Port information.
9766  *		 s_id		Port ID.
9767  *		 instance	Device instance number for the local port
9768  *				(returned by ddi_get_instance()).
9769  *
9770  * Return Value: DDI_SUCCESS
9771  *		 DDI_FAILURE
9772  *
9773  *	Context: User and Kernel context.
9774  */
9775 /*ARGSUSED*/
9776 int
9777 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9778     uint32_t s_id, int instance)
9779 {
9780 	int			res = DDI_FAILURE;
9781 	scsi_hba_tran_t		*tran;
9782 	int			mutex_initted = FALSE;
9783 	int			hba_attached = FALSE;
9784 	int			soft_state_linked = FALSE;
9785 	int			event_bind = FALSE;
9786 	struct fcp_port		*pptr;
9787 	fc_portmap_t		*tmp_list = NULL;
9788 	uint32_t		max_cnt, alloc_cnt;
9789 	uchar_t			*boot_wwn = NULL;
9790 	uint_t			nbytes;
9791 	int			manual_cfg;
9792 
9793 	/*
9794 	 * this port instance attaching for the first time (or after
9795 	 * being detached before)
9796 	 */
9797 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9798 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9799 
9800 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9801 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9802 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9803 		    instance);
9804 		return (res);
9805 	}
9806 
9807 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9808 		/* this shouldn't happen */
9809 		ddi_soft_state_free(fcp_softstate, instance);
9810 		cmn_err(CE_WARN, "fcp: bad soft state");
9811 		return (res);
9812 	}
9813 
9814 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9815 
9816 	/*
9817 	 * Make a copy of ulp_port_info as fctl allocates
9818 	 * a temp struct.
9819 	 */
9820 	(void) fcp_cp_pinfo(pptr, pinfo);
9821 
9822 	/*
9823 	 * Check for manual_configuration_only property.
9824 	 * Enable manual configurtion if the property is
9825 	 * set to 1, otherwise disable manual configuration.
9826 	 */
9827 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9828 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9829 	    MANUAL_CFG_ONLY,
9830 	    -1)) != -1) {
9831 		if (manual_cfg == 1) {
9832 			char	*pathname;
9833 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9834 			(void) ddi_pathname(pptr->port_dip, pathname);
9835 			cmn_err(CE_NOTE,
9836 			    "%s (%s%d) %s is enabled via %s.conf.",
9837 			    pathname,
9838 			    ddi_driver_name(pptr->port_dip),
9839 			    ddi_get_instance(pptr->port_dip),
9840 			    MANUAL_CFG_ONLY,
9841 			    ddi_driver_name(pptr->port_dip));
9842 			fcp_enable_auto_configuration = 0;
9843 			kmem_free(pathname, MAXPATHLEN);
9844 		}
9845 	}
9846 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9847 	pptr->port_link_cnt = 1;
9848 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9849 	pptr->port_id = s_id;
9850 	pptr->port_instance = instance;
9851 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9852 	pptr->port_state = FCP_STATE_INIT;
9853 	if (pinfo->port_acc_attr == NULL) {
9854 		/*
9855 		 * The corresponding FCA doesn't support DMA at all
9856 		 */
9857 		pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9858 	}
9859 
9860 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9861 
9862 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9863 		/*
9864 		 * If FCA supports DMA in SCSI data phase, we need preallocate
9865 		 * dma cookie, so stash the cookie size
9866 		 */
9867 		pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9868 		    pptr->port_data_dma_attr.dma_attr_sgllen;
9869 	}
9870 
9871 	/*
9872 	 * The two mutexes of fcp_port are initialized.	 The variable
9873 	 * mutex_initted is incremented to remember that fact.	That variable
9874 	 * is checked when the routine fails and the mutexes have to be
9875 	 * destroyed.
9876 	 */
9877 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9878 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9879 	mutex_initted++;
9880 
9881 	/*
9882 	 * The SCSI tran structure is allocate and initialized now.
9883 	 */
9884 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9885 		fcp_log(CE_WARN, pptr->port_dip,
9886 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9887 		goto fail;
9888 	}
9889 
9890 	/* link in the transport structure then fill it in */
9891 	pptr->port_tran = tran;
9892 	tran->tran_hba_private		= pptr;
9893 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9894 	tran->tran_tgt_probe		= NULL;
9895 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9896 	tran->tran_start		= fcp_scsi_start;
9897 	tran->tran_reset		= fcp_scsi_reset;
9898 	tran->tran_abort		= fcp_scsi_abort;
9899 	tran->tran_getcap		= fcp_scsi_getcap;
9900 	tran->tran_setcap		= fcp_scsi_setcap;
9901 	tran->tran_init_pkt		= NULL;
9902 	tran->tran_destroy_pkt		= NULL;
9903 	tran->tran_dmafree		= NULL;
9904 	tran->tran_sync_pkt		= NULL;
9905 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9906 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9907 	tran->tran_get_name		= fcp_scsi_get_name;
9908 	tran->tran_clear_aca		= NULL;
9909 	tran->tran_clear_task_set	= NULL;
9910 	tran->tran_terminate_task	= NULL;
9911 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9912 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9913 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9914 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9915 	tran->tran_quiesce		= NULL;
9916 	tran->tran_unquiesce		= NULL;
9917 	tran->tran_bus_reset		= NULL;
9918 	tran->tran_bus_config		= fcp_scsi_bus_config;
9919 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9920 	tran->tran_bus_power		= NULL;
9921 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9922 
9923 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9924 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9925 	tran->tran_setup_pkt		= fcp_pkt_setup;
9926 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9927 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9928 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9929 	if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9930 		/*
9931 		 * If FCA don't support DMA, then we use different vectors to
9932 		 * minimize the effects on DMA code flow path
9933 		 */
9934 		tran->tran_start	   = fcp_pseudo_start;
9935 		tran->tran_init_pkt	   = fcp_pseudo_init_pkt;
9936 		tran->tran_destroy_pkt	   = fcp_pseudo_destroy_pkt;
9937 		tran->tran_sync_pkt	   = fcp_pseudo_sync_pkt;
9938 		tran->tran_dmafree	   = fcp_pseudo_dmafree;
9939 		tran->tran_setup_pkt	   = NULL;
9940 		tran->tran_teardown_pkt	   = NULL;
9941 		tran->tran_pkt_constructor = NULL;
9942 		tran->tran_pkt_destructor  = NULL;
9943 		pptr->port_data_dma_attr   = pseudo_fca_dma_attr;
9944 	}
9945 
9946 	/*
9947 	 * Allocate an ndi event handle
9948 	 */
9949 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9950 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9951 
9952 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9953 	    sizeof (fcp_ndi_event_defs));
9954 
9955 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9956 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9957 
9958 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9959 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9960 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9961 
9962 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9963 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9964 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9965 		goto fail;
9966 	}
9967 	event_bind++;	/* Checked in fail case */
9968 
9969 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9970 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9971 	    != DDI_SUCCESS) {
9972 		fcp_log(CE_WARN, pptr->port_dip,
9973 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9974 		goto fail;
9975 	}
9976 	hba_attached++;	/* Checked in fail case */
9977 
9978 	pptr->port_mpxio = 0;
9979 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9980 	    MDI_SUCCESS) {
9981 		pptr->port_mpxio++;
9982 	}
9983 
9984 	/*
9985 	 * The following code is putting the new port structure in the global
9986 	 * list of ports and, if it is the first port to attach, it start the
9987 	 * fcp_watchdog_tick.
9988 	 *
9989 	 * Why put this new port in the global before we are done attaching it?
9990 	 * We are actually making the structure globally known before we are
9991 	 * done attaching it.  The reason for that is: because of the code that
9992 	 * follows.  At this point the resources to handle the port are
9993 	 * allocated.  This function is now going to do the following:
9994 	 *
9995 	 *   1) It is going to try to register with the name server advertizing
9996 	 *	the new FCP capability of the port.
9997 	 *   2) It is going to play the role of the fp/fctl layer by building
9998 	 *	a list of worlwide names reachable through this port and call
9999 	 *	itself on fcp_statec_callback().  That requires the port to
10000 	 *	be part of the global list.
10001 	 */
10002 	mutex_enter(&fcp_global_mutex);
10003 	if (fcp_port_head == NULL) {
10004 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10005 	}
10006 	pptr->port_next = fcp_port_head;
10007 	fcp_port_head = pptr;
10008 	soft_state_linked++;
10009 
10010 	if (fcp_watchdog_init++ == 0) {
10011 		fcp_watchdog_tick = fcp_watchdog_timeout *
10012 		    drv_usectohz(1000000);
10013 		fcp_watchdog_id = timeout(fcp_watch, NULL,
10014 		    fcp_watchdog_tick);
10015 	}
10016 	mutex_exit(&fcp_global_mutex);
10017 
10018 	/*
10019 	 * Here an attempt is made to register with the name server, the new
10020 	 * FCP capability.  That is done using an RTF_ID to the name server.
10021 	 * It is done synchronously.  The function fcp_do_ns_registry()
10022 	 * doesn't return till the name server responded.
10023 	 * On failures, just ignore it for now and it will get retried during
10024 	 * state change callbacks. We'll set a flag to show this failure
10025 	 */
10026 	if (fcp_do_ns_registry(pptr, s_id)) {
10027 		mutex_enter(&pptr->port_mutex);
10028 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10029 		mutex_exit(&pptr->port_mutex);
10030 	} else {
10031 		mutex_enter(&pptr->port_mutex);
10032 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10033 		mutex_exit(&pptr->port_mutex);
10034 	}
10035 
10036 	/*
10037 	 * Lookup for boot WWN property
10038 	 */
10039 	if (modrootloaded != 1) {
10040 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10041 		    ddi_get_parent(pinfo->port_dip),
10042 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10043 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10044 		    (nbytes == FC_WWN_SIZE)) {
10045 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10046 		}
10047 		if (boot_wwn) {
10048 			ddi_prop_free(boot_wwn);
10049 		}
10050 	}
10051 
10052 	/*
10053 	 * Handle various topologies and link states.
10054 	 */
10055 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10056 	case FC_STATE_OFFLINE:
10057 
10058 		/*
10059 		 * we're attaching a port where the link is offline
10060 		 *
10061 		 * Wait for ONLINE, at which time a state
10062 		 * change will cause a statec_callback
10063 		 *
10064 		 * in the mean time, do not do anything
10065 		 */
10066 		res = DDI_SUCCESS;
10067 		pptr->port_state |= FCP_STATE_OFFLINE;
10068 		break;
10069 
10070 	case FC_STATE_ONLINE: {
10071 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
10072 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10073 			res = DDI_SUCCESS;
10074 			break;
10075 		}
10076 		/*
10077 		 * discover devices and create nodes (a private
10078 		 * loop or point-to-point)
10079 		 */
10080 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10081 
10082 		/*
10083 		 * At this point we are going to build a list of all the ports
10084 		 * that	can be reached through this local port.	 It looks like
10085 		 * we cannot handle more than FCP_MAX_DEVICES per local port
10086 		 * (128).
10087 		 */
10088 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10089 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10090 		    KM_NOSLEEP)) == NULL) {
10091 			fcp_log(CE_WARN, pptr->port_dip,
10092 			    "!fcp%d: failed to allocate portmap",
10093 			    instance);
10094 			goto fail;
10095 		}
10096 
10097 		/*
10098 		 * fc_ulp_getportmap() is going to provide us with the list of
10099 		 * remote ports in the buffer we just allocated.  The way the
10100 		 * list is going to be retrieved depends on the topology.
10101 		 * However, if we are connected to a Fabric, a name server
10102 		 * request may be sent to get the list of FCP capable ports.
10103 		 * It should be noted that is the case the request is
10104 		 * synchronous.	 This means we are stuck here till the name
10105 		 * server replies.  A lot of things can change during that time
10106 		 * and including, may be, being called on
10107 		 * fcp_statec_callback() for different reasons. I'm not sure
10108 		 * the code can handle that.
10109 		 */
10110 		max_cnt = FCP_MAX_DEVICES;
10111 		alloc_cnt = FCP_MAX_DEVICES;
10112 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10113 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10114 		    FC_SUCCESS) {
10115 			caddr_t msg;
10116 
10117 			(void) fc_ulp_error(res, &msg);
10118 
10119 			/*
10120 			 * this	 just means the transport is
10121 			 * busy perhaps building a portmap so,
10122 			 * for now, succeed this port attach
10123 			 * when the transport has a new map,
10124 			 * it'll send us a state change then
10125 			 */
10126 			fcp_log(CE_WARN, pptr->port_dip,
10127 			    "!failed to get port map : %s", msg);
10128 
10129 			res = DDI_SUCCESS;
10130 			break;	/* go return result */
10131 		}
10132 		if (max_cnt > alloc_cnt) {
10133 			alloc_cnt = max_cnt;
10134 		}
10135 
10136 		/*
10137 		 * We are now going to call fcp_statec_callback() ourselves.
10138 		 * By issuing this call we are trying to kick off the enumera-
10139 		 * tion process.
10140 		 */
10141 		/*
10142 		 * let the state change callback do the SCSI device
10143 		 * discovery and create the devinfos
10144 		 */
10145 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10146 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10147 		    max_cnt, pptr->port_id);
10148 
10149 		res = DDI_SUCCESS;
10150 		break;
10151 	}
10152 
10153 	default:
10154 		/* unknown port state */
10155 		fcp_log(CE_WARN, pptr->port_dip,
10156 		    "!fcp%d: invalid port state at attach=0x%x",
10157 		    instance, pptr->port_phys_state);
10158 
10159 		mutex_enter(&pptr->port_mutex);
10160 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10161 		mutex_exit(&pptr->port_mutex);
10162 
10163 		res = DDI_SUCCESS;
10164 		break;
10165 	}
10166 
10167 	/* free temp list if used */
10168 	if (tmp_list != NULL) {
10169 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10170 	}
10171 
10172 	/* note the attach time */
10173 	pptr->port_attach_time = ddi_get_lbolt64();
10174 
10175 	/* all done */
10176 	return (res);
10177 
10178 	/* a failure we have to clean up after */
10179 fail:
10180 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10181 
10182 	if (soft_state_linked) {
10183 		/* remove this fcp_port from the linked list */
10184 		(void) fcp_soft_state_unlink(pptr);
10185 	}
10186 
10187 	/* unbind and free event set */
10188 	if (pptr->port_ndi_event_hdl) {
10189 		if (event_bind) {
10190 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10191 			    &pptr->port_ndi_events, NDI_SLEEP);
10192 		}
10193 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10194 	}
10195 
10196 	if (pptr->port_ndi_event_defs) {
10197 		(void) kmem_free(pptr->port_ndi_event_defs,
10198 		    sizeof (fcp_ndi_event_defs));
10199 	}
10200 
10201 	/*
10202 	 * Clean up mpxio stuff
10203 	 */
10204 	if (pptr->port_mpxio) {
10205 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10206 		pptr->port_mpxio--;
10207 	}
10208 
10209 	/* undo SCSI HBA setup */
10210 	if (hba_attached) {
10211 		(void) scsi_hba_detach(pptr->port_dip);
10212 	}
10213 	if (pptr->port_tran != NULL) {
10214 		scsi_hba_tran_free(pptr->port_tran);
10215 	}
10216 
10217 	mutex_enter(&fcp_global_mutex);
10218 
10219 	/*
10220 	 * We check soft_state_linked, because it is incremented right before
10221 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10222 	 * soft_state_linked is still FALSE, we do not want to decrement
10223 	 * fcp_watchdog_init or possibly call untimeout.
10224 	 */
10225 
10226 	if (soft_state_linked) {
10227 		if (--fcp_watchdog_init == 0) {
10228 			timeout_id_t	tid = fcp_watchdog_id;
10229 
10230 			mutex_exit(&fcp_global_mutex);
10231 			(void) untimeout(tid);
10232 		} else {
10233 			mutex_exit(&fcp_global_mutex);
10234 		}
10235 	} else {
10236 		mutex_exit(&fcp_global_mutex);
10237 	}
10238 
10239 	if (mutex_initted) {
10240 		mutex_destroy(&pptr->port_mutex);
10241 		mutex_destroy(&pptr->port_pkt_mutex);
10242 	}
10243 
10244 	if (tmp_list != NULL) {
10245 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10246 	}
10247 
10248 	/* this makes pptr invalid */
10249 	ddi_soft_state_free(fcp_softstate, instance);
10250 
10251 	return (DDI_FAILURE);
10252 }
10253 
10254 
10255 static int
10256 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10257 {
10258 	int count = 0;
10259 
10260 	mutex_enter(&pptr->port_mutex);
10261 
10262 	/*
10263 	 * if the port is powered down or suspended, nothing else
10264 	 * to do; just return.
10265 	 */
10266 	if (flag != FCP_STATE_DETACHING) {
10267 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10268 		    FCP_STATE_SUSPENDED)) {
10269 			pptr->port_state |= flag;
10270 			mutex_exit(&pptr->port_mutex);
10271 			return (FC_SUCCESS);
10272 		}
10273 	}
10274 
10275 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10276 		mutex_exit(&pptr->port_mutex);
10277 		return (FC_FAILURE);
10278 	}
10279 
10280 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10281 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10282 	    "fcp_handle_port_detach: port is detaching");
10283 
10284 	pptr->port_state |= flag;
10285 
10286 	/*
10287 	 * Wait for any ongoing reconfig/ipkt to complete, that
10288 	 * ensures the freeing to targets/luns is safe.
10289 	 * No more ref to this port should happen from statec/ioctl
10290 	 * after that as it was removed from the global port list.
10291 	 */
10292 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10293 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10294 		/*
10295 		 * Let's give sufficient time for reconfig/ipkt
10296 		 * to complete.
10297 		 */
10298 		if (count++ >= FCP_ICMD_DEADLINE) {
10299 			break;
10300 		}
10301 		mutex_exit(&pptr->port_mutex);
10302 		delay(drv_usectohz(1000000));
10303 		mutex_enter(&pptr->port_mutex);
10304 	}
10305 
10306 	/*
10307 	 * if the driver is still busy then fail to
10308 	 * suspend/power down.
10309 	 */
10310 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10311 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10312 		pptr->port_state &= ~flag;
10313 		mutex_exit(&pptr->port_mutex);
10314 		return (FC_FAILURE);
10315 	}
10316 
10317 	if (flag == FCP_STATE_DETACHING) {
10318 		pptr = fcp_soft_state_unlink(pptr);
10319 		ASSERT(pptr != NULL);
10320 	}
10321 
10322 	pptr->port_link_cnt++;
10323 	pptr->port_state |= FCP_STATE_OFFLINE;
10324 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10325 
10326 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10327 	    FCP_CAUSE_LINK_DOWN);
10328 	mutex_exit(&pptr->port_mutex);
10329 
10330 	/* kill watch dog timer if we're the last */
10331 	mutex_enter(&fcp_global_mutex);
10332 	if (--fcp_watchdog_init == 0) {
10333 		timeout_id_t	tid = fcp_watchdog_id;
10334 		mutex_exit(&fcp_global_mutex);
10335 		(void) untimeout(tid);
10336 	} else {
10337 		mutex_exit(&fcp_global_mutex);
10338 	}
10339 
10340 	/* clean up the port structures */
10341 	if (flag == FCP_STATE_DETACHING) {
10342 		fcp_cleanup_port(pptr, instance);
10343 	}
10344 
10345 	return (FC_SUCCESS);
10346 }
10347 
10348 
10349 static void
10350 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10351 {
10352 	ASSERT(pptr != NULL);
10353 
10354 	/* unbind and free event set */
10355 	if (pptr->port_ndi_event_hdl) {
10356 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10357 		    &pptr->port_ndi_events, NDI_SLEEP);
10358 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10359 	}
10360 
10361 	if (pptr->port_ndi_event_defs) {
10362 		(void) kmem_free(pptr->port_ndi_event_defs,
10363 		    sizeof (fcp_ndi_event_defs));
10364 	}
10365 
10366 	/* free the lun/target structures and devinfos */
10367 	fcp_free_targets(pptr);
10368 
10369 	/*
10370 	 * Clean up mpxio stuff
10371 	 */
10372 	if (pptr->port_mpxio) {
10373 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10374 		pptr->port_mpxio--;
10375 	}
10376 
10377 	/* clean up SCSA stuff */
10378 	(void) scsi_hba_detach(pptr->port_dip);
10379 	if (pptr->port_tran != NULL) {
10380 		scsi_hba_tran_free(pptr->port_tran);
10381 	}
10382 
10383 #ifdef	KSTATS_CODE
10384 	/* clean up kstats */
10385 	if (pptr->fcp_ksp != NULL) {
10386 		kstat_delete(pptr->fcp_ksp);
10387 	}
10388 #endif
10389 
10390 	/* clean up soft state mutexes/condition variables */
10391 	mutex_destroy(&pptr->port_mutex);
10392 	mutex_destroy(&pptr->port_pkt_mutex);
10393 
10394 	/* all done with soft state */
10395 	ddi_soft_state_free(fcp_softstate, instance);
10396 }
10397 
10398 /*
10399  *     Function: fcp_kmem_cache_constructor
10400  *
10401  *  Description: This function allocates and initializes the resources required
10402  *		 to build a scsi_pkt structure the target driver.  The result
10403  *		 of the allocation and initialization will be cached in the
10404  *		 memory cache.	As DMA resources may be allocated here, that
10405  *		 means DMA resources will be tied up in the cache manager.
10406  *		 This is a tradeoff that has been made for performance reasons.
10407  *
10408  *     Argument: *buf		Memory to preinitialize.
10409  *		 *arg		FCP port structure (fcp_port).
10410  *		 kmflags	Value passed to kmem_cache_alloc() and
10411  *				propagated to the constructor.
10412  *
10413  * Return Value: 0	Allocation/Initialization was successful.
10414  *		 -1	Allocation or Initialization failed.
10415  *
10416  *
10417  * If the returned value is 0, the buffer is initialized like this:
10418  *
10419  *		    +================================+
10420  *	     +----> |	      struct scsi_pkt	     |
10421  *	     |	    |				     |
10422  *	     | +--- | pkt_ha_private		     |
10423  *	     | |    |				     |
10424  *	     | |    +================================+
10425  *	     | |
10426  *	     | |    +================================+
10427  *	     | +--> |	    struct fcp_pkt	     | <---------+
10428  *	     |	    |				     |		 |
10429  *	     +----- | cmd_pkt			     |		 |
10430  *		    |			  cmd_fp_pkt | ---+	 |
10431  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10432  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10433  *	  |    |    |--------------------------------|	  |	 |
10434  *	  |    |    |	      struct fc_packet	     | <--+	 |
10435  *	  |    |    |				     |		 |
10436  *	  |    |    |		     pkt_ulp_private | ----------+
10437  *	  |    |    |		     pkt_fca_private | -----+
10438  *	  |    |    |		     pkt_data_cookie | ---+ |
10439  *	  |    |    | pkt_cmdlen		     |	  | |
10440  *	  |    |(a) | pkt_rsplen		     |	  | |
10441  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10442  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10443  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10444  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10445  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10446  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10447  *		    +================================+	  | |  |   |   |    |
10448  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10449  *		    |				     |	    |  |   |   |    |
10450  *		    +================================+	    |  |   |   |    |
10451  *		    |	      fca_private	     | <----+  |   |   |    |
10452  *		    |				     |	       |   |   |    |
10453  *		    +================================+	       |   |   |    |
10454  *							       |   |   |    |
10455  *							       |   |   |    |
10456  *		    +================================+	 (d)   |   |   |    |
10457  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10458  *		    |				     |		   |   |    |
10459  *		    +================================+		   |   |    |
10460  *								   |   |    |
10461  *		    +================================+	 (d)	   |   |    |
10462  *		    |		fcp_resp	     | <-----------+   |    |
10463  *		    |	(DMA resources associated)   |		       |    |
10464  *		    +================================+		       |    |
10465  *								       |    |
10466  *								       |    |
10467  *								       |    |
10468  *		    +================================+	 (c)	       |    |
10469  *		    |	     fcp_cmd cookies	     | <---------------+    |
10470  *		    |				     |			    |
10471  *		    +================================+			    |
10472  *									    |
10473  *		    +================================+	 (c)		    |
10474  *		    |		 fcp_cmd	     | <--------------------+
10475  *		    |	(DMA resources associated)   |
10476  *		    +================================+
10477  *
10478  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10479  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10480  * (c) Only if DMA is used for the FCP_CMD buffer.
10481  * (d) Only if DMA is used for the FCP_RESP buffer
10482  */
10483 static int
10484 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10485     int kmflags)
10486 {
10487 	struct fcp_pkt	*cmd;
10488 	struct fcp_port	*pptr;
10489 	fc_packet_t	*fpkt;
10490 
10491 	pptr = (struct fcp_port *)tran->tran_hba_private;
10492 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10493 	bzero(cmd, tran->tran_hba_len);
10494 
10495 	cmd->cmd_pkt = pkt;
10496 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10497 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10498 	cmd->cmd_fp_pkt = fpkt;
10499 
10500 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10501 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10502 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10503 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10504 
10505 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10506 	    sizeof (struct fcp_pkt));
10507 
10508 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10509 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10510 
10511 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10512 		/*
10513 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10514 		 * fcp_resp.  The transfer of information will be done by
10515 		 * bcopy.
10516 		 * The naming of the flags (that is actually a value) is
10517 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10518 		 * DMA" but instead "NO DMA".
10519 		 */
10520 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10521 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10522 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10523 	} else {
10524 		/*
10525 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10526 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10527 		 * interfaces.
10528 		 */
10529 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10530 			return (-1);
10531 		}
10532 	}
10533 
10534 	return (0);
10535 }
10536 
10537 /*
10538  *     Function: fcp_kmem_cache_destructor
10539  *
10540  *  Description: Called by the destructor of the cache managed by SCSA.
10541  *		 All the resources pre-allocated in fcp_pkt_constructor
10542  *		 and the data also pre-initialized in fcp_pkt_constructor
10543  *		 are freed and uninitialized here.
10544  *
10545  *     Argument: *buf		Memory to uninitialize.
10546  *		 *arg		FCP port structure (fcp_port).
10547  *
10548  * Return Value: None
10549  *
10550  *	Context: kernel
10551  */
10552 static void
10553 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10554 {
10555 	struct fcp_pkt	*cmd;
10556 	struct fcp_port	*pptr;
10557 
10558 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10559 	cmd = pkt->pkt_ha_private;
10560 
10561 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10562 		/*
10563 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10564 		 * buffer and DMA resources allocated to do so are released.
10565 		 */
10566 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10567 	}
10568 }
10569 
10570 /*
10571  *     Function: fcp_alloc_cmd_resp
10572  *
10573  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10574  *		 will be DMAed by the HBA.  The buffer is allocated applying
10575  *		 the DMA requirements for the HBA.  The buffers allocated will
10576  *		 also be bound.	 DMA resources are allocated in the process.
10577  *		 They will be released by fcp_free_cmd_resp().
10578  *
10579  *     Argument: *pptr	FCP port.
10580  *		 *fpkt	fc packet for which the cmd and resp packet should be
10581  *			allocated.
10582  *		 flags	Allocation flags.
10583  *
10584  * Return Value: FC_FAILURE
10585  *		 FC_SUCCESS
10586  *
10587  *	Context: User or Kernel context only if flags == KM_SLEEP.
10588  *		 Interrupt context if the KM_SLEEP is not specified.
10589  */
10590 static int
10591 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10592 {
10593 	int			rval;
10594 	int			cmd_len;
10595 	int			resp_len;
10596 	ulong_t			real_len;
10597 	int			(*cb) (caddr_t);
10598 	ddi_dma_cookie_t	pkt_cookie;
10599 	ddi_dma_cookie_t	*cp;
10600 	uint32_t		cnt;
10601 
10602 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10603 
10604 	cmd_len = fpkt->pkt_cmdlen;
10605 	resp_len = fpkt->pkt_rsplen;
10606 
10607 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10608 
10609 	/* Allocation of a DMA handle used in subsequent calls. */
10610 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10611 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10612 		return (FC_FAILURE);
10613 	}
10614 
10615 	/* A buffer is allocated that satisfies the DMA requirements. */
10616 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10617 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10618 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10619 
10620 	if (rval != DDI_SUCCESS) {
10621 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10622 		return (FC_FAILURE);
10623 	}
10624 
10625 	if (real_len < cmd_len) {
10626 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10627 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10628 		return (FC_FAILURE);
10629 	}
10630 
10631 	/* The buffer allocated is DMA bound. */
10632 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10633 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10634 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10635 
10636 	if (rval != DDI_DMA_MAPPED) {
10637 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10638 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10639 		return (FC_FAILURE);
10640 	}
10641 
10642 	if (fpkt->pkt_cmd_cookie_cnt >
10643 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10644 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10645 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10646 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10647 		return (FC_FAILURE);
10648 	}
10649 
10650 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10651 
10652 	/*
10653 	 * The buffer where the scatter/gather list is going to be built is
10654 	 * allocated.
10655 	 */
10656 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10657 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10658 	    KM_NOSLEEP);
10659 
10660 	if (cp == NULL) {
10661 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10662 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10663 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10664 		return (FC_FAILURE);
10665 	}
10666 
10667 	/*
10668 	 * The scatter/gather list for the buffer we just allocated is built
10669 	 * here.
10670 	 */
10671 	*cp = pkt_cookie;
10672 	cp++;
10673 
10674 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10675 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10676 		    &pkt_cookie);
10677 		*cp = pkt_cookie;
10678 	}
10679 
10680 	ASSERT(fpkt->pkt_resp_dma == NULL);
10681 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10682 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10683 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10684 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10685 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10686 		return (FC_FAILURE);
10687 	}
10688 
10689 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10690 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10691 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10692 	    &fpkt->pkt_resp_acc);
10693 
10694 	if (rval != DDI_SUCCESS) {
10695 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10696 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10697 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10698 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10699 		kmem_free(fpkt->pkt_cmd_cookie,
10700 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10701 		return (FC_FAILURE);
10702 	}
10703 
10704 	if (real_len < resp_len) {
10705 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10706 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10707 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10708 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10709 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10710 		kmem_free(fpkt->pkt_cmd_cookie,
10711 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10712 		return (FC_FAILURE);
10713 	}
10714 
10715 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10716 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10717 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10718 
10719 	if (rval != DDI_DMA_MAPPED) {
10720 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10721 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10722 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10723 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10724 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10725 		kmem_free(fpkt->pkt_cmd_cookie,
10726 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10727 		return (FC_FAILURE);
10728 	}
10729 
10730 	if (fpkt->pkt_resp_cookie_cnt >
10731 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10732 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10733 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10734 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10735 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10736 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10737 		kmem_free(fpkt->pkt_cmd_cookie,
10738 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10739 		return (FC_FAILURE);
10740 	}
10741 
10742 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10743 
10744 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10745 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10746 	    KM_NOSLEEP);
10747 
10748 	if (cp == NULL) {
10749 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10750 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10751 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10752 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10753 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10754 		kmem_free(fpkt->pkt_cmd_cookie,
10755 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10756 		return (FC_FAILURE);
10757 	}
10758 
10759 	*cp = pkt_cookie;
10760 	cp++;
10761 
10762 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10763 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10764 		    &pkt_cookie);
10765 		*cp = pkt_cookie;
10766 	}
10767 
10768 	return (FC_SUCCESS);
10769 }
10770 
10771 /*
10772  *     Function: fcp_free_cmd_resp
10773  *
10774  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10775  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10776  *		 associated with them.	That includes the DMA resources and the
10777  *		 buffer allocated for the cookies of each one of them.
10778  *
10779  *     Argument: *pptr		FCP port context.
10780  *		 *fpkt		fc packet containing the cmd and resp packet
10781  *				to be released.
10782  *
10783  * Return Value: None
10784  *
10785  *	Context: Interrupt, User and Kernel context.
10786  */
10787 /* ARGSUSED */
10788 static void
10789 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10790 {
10791 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10792 
10793 	if (fpkt->pkt_resp_dma) {
10794 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10795 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10796 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10797 	}
10798 
10799 	if (fpkt->pkt_resp_cookie) {
10800 		kmem_free(fpkt->pkt_resp_cookie,
10801 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10802 		fpkt->pkt_resp_cookie = NULL;
10803 	}
10804 
10805 	if (fpkt->pkt_cmd_dma) {
10806 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10807 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10808 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10809 	}
10810 
10811 	if (fpkt->pkt_cmd_cookie) {
10812 		kmem_free(fpkt->pkt_cmd_cookie,
10813 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10814 		fpkt->pkt_cmd_cookie = NULL;
10815 	}
10816 }
10817 
10818 
10819 /*
10820  * called by the transport to do our own target initialization
10821  *
10822  * can acquire and release the global mutex
10823  */
10824 /* ARGSUSED */
10825 static int
10826 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10827     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10828 {
10829 	uchar_t			*bytes;
10830 	uint_t			nbytes;
10831 	uint16_t		lun_num;
10832 	struct fcp_tgt	*ptgt;
10833 	struct fcp_lun	*plun;
10834 	struct fcp_port	*pptr = (struct fcp_port *)
10835 	    hba_tran->tran_hba_private;
10836 
10837 	ASSERT(pptr != NULL);
10838 
10839 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10840 	    FCP_BUF_LEVEL_8, 0,
10841 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10842 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10843 
10844 	/* get our port WWN property */
10845 	bytes = NULL;
10846 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10847 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10848 	    (nbytes != FC_WWN_SIZE)) {
10849 		/* no port WWN property */
10850 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10851 		    FCP_BUF_LEVEL_8, 0,
10852 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10853 		    " for %s (instance %d): bytes=%p nbytes=%x",
10854 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10855 		    nbytes);
10856 
10857 		if (bytes != NULL) {
10858 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10859 		}
10860 
10861 		return (DDI_NOT_WELL_FORMED);
10862 	}
10863 	ASSERT(bytes != NULL);
10864 
10865 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10866 	    LUN_PROP, 0xFFFF);
10867 	if (lun_num == 0xFFFF) {
10868 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10869 		    FCP_BUF_LEVEL_8, 0,
10870 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10871 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10872 		    ddi_get_instance(tgt_dip));
10873 
10874 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10875 		return (DDI_NOT_WELL_FORMED);
10876 	}
10877 
10878 	mutex_enter(&pptr->port_mutex);
10879 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10880 		mutex_exit(&pptr->port_mutex);
10881 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10882 		    FCP_BUF_LEVEL_8, 0,
10883 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10884 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10885 		    ddi_get_instance(tgt_dip));
10886 
10887 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10888 		return (DDI_FAILURE);
10889 	}
10890 
10891 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10892 	    FC_WWN_SIZE) == 0);
10893 	ASSERT(plun->lun_num == lun_num);
10894 
10895 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10896 
10897 	ptgt = plun->lun_tgt;
10898 
10899 	mutex_enter(&ptgt->tgt_mutex);
10900 	plun->lun_tgt_count++;
10901 	scsi_device_hba_private_set(sd, plun);
10902 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10903 	plun->lun_sd = sd;
10904 	mutex_exit(&ptgt->tgt_mutex);
10905 	mutex_exit(&pptr->port_mutex);
10906 
10907 	return (DDI_SUCCESS);
10908 }
10909 
10910 /*ARGSUSED*/
10911 static int
10912 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10913     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10914 {
10915 	uchar_t			*bytes;
10916 	uint_t			nbytes;
10917 	uint16_t		lun_num;
10918 	struct fcp_tgt	*ptgt;
10919 	struct fcp_lun	*plun;
10920 	struct fcp_port	*pptr = (struct fcp_port *)
10921 	    hba_tran->tran_hba_private;
10922 	child_info_t		*cip;
10923 
10924 	ASSERT(pptr != NULL);
10925 
10926 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10927 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10928 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10929 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10930 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10931 
10932 	cip = (child_info_t *)sd->sd_pathinfo;
10933 	if (cip == NULL) {
10934 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10935 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10936 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10937 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10938 		    ddi_get_instance(tgt_dip));
10939 
10940 		return (DDI_NOT_WELL_FORMED);
10941 	}
10942 
10943 	/* get our port WWN property */
10944 	bytes = NULL;
10945 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10946 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10947 	    (nbytes != FC_WWN_SIZE)) {
10948 		if (bytes) {
10949 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10950 		}
10951 		return (DDI_NOT_WELL_FORMED);
10952 	}
10953 
10954 	ASSERT(bytes != NULL);
10955 
10956 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10957 	    LUN_PROP, 0xFFFF);
10958 	if (lun_num == 0xFFFF) {
10959 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10960 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10961 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10962 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10963 		    ddi_get_instance(tgt_dip));
10964 
10965 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10966 		return (DDI_NOT_WELL_FORMED);
10967 	}
10968 
10969 	mutex_enter(&pptr->port_mutex);
10970 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10971 		mutex_exit(&pptr->port_mutex);
10972 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10973 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10974 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10975 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10976 		    ddi_get_instance(tgt_dip));
10977 
10978 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10979 		return (DDI_FAILURE);
10980 	}
10981 
10982 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10983 	    FC_WWN_SIZE) == 0);
10984 	ASSERT(plun->lun_num == lun_num);
10985 
10986 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10987 
10988 	ptgt = plun->lun_tgt;
10989 
10990 	mutex_enter(&ptgt->tgt_mutex);
10991 	plun->lun_tgt_count++;
10992 	scsi_device_hba_private_set(sd, plun);
10993 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10994 	plun->lun_sd = sd;
10995 	mutex_exit(&ptgt->tgt_mutex);
10996 	mutex_exit(&pptr->port_mutex);
10997 
10998 	return (DDI_SUCCESS);
10999 }
11000 
11001 
11002 /*
11003  * called by the transport to do our own target initialization
11004  *
11005  * can acquire and release the global mutex
11006  */
11007 /* ARGSUSED */
11008 static int
11009 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11010     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11011 {
11012 	struct fcp_port	*pptr = (struct fcp_port *)
11013 	    hba_tran->tran_hba_private;
11014 	int			rval;
11015 
11016 	ASSERT(pptr != NULL);
11017 
11018 	/*
11019 	 * Child node is getting initialized.  Look at the mpxio component
11020 	 * type on the child device to see if this device is mpxio managed
11021 	 * or not.
11022 	 */
11023 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11024 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11025 	} else {
11026 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11027 	}
11028 
11029 	return (rval);
11030 }
11031 
11032 
11033 /* ARGSUSED */
11034 static void
11035 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11036     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11037 {
11038 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
11039 	struct fcp_tgt	*ptgt;
11040 
11041 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11042 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
11043 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11044 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11045 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11046 
11047 	if (plun == NULL) {
11048 		return;
11049 	}
11050 	ptgt = plun->lun_tgt;
11051 
11052 	ASSERT(ptgt != NULL);
11053 
11054 	mutex_enter(&ptgt->tgt_mutex);
11055 	ASSERT(plun->lun_tgt_count > 0);
11056 
11057 	if (--plun->lun_tgt_count == 0) {
11058 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11059 	}
11060 	plun->lun_sd = NULL;
11061 	mutex_exit(&ptgt->tgt_mutex);
11062 }
11063 
11064 /*
11065  *     Function: fcp_scsi_start
11066  *
11067  *  Description: This function is called by the target driver to request a
11068  *		 command to be sent.
11069  *
11070  *     Argument: *ap		SCSI address of the device.
11071  *		 *pkt		SCSI packet containing the cmd to send.
11072  *
11073  * Return Value: TRAN_ACCEPT
11074  *		 TRAN_BUSY
11075  *		 TRAN_BADPKT
11076  *		 TRAN_FATAL_ERROR
11077  */
11078 static int
11079 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11080 {
11081 	struct fcp_port	*pptr = ADDR2FCP(ap);
11082 	struct fcp_lun	*plun = ADDR2LUN(ap);
11083 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
11084 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11085 	int			rval;
11086 
11087 	/* ensure command isn't already issued */
11088 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11089 
11090 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11091 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
11092 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11093 
11094 	/*
11095 	 * It is strange that we enter the fcp_port mutex and the target
11096 	 * mutex to check the lun state (which has a mutex of its own).
11097 	 */
11098 	mutex_enter(&pptr->port_mutex);
11099 	mutex_enter(&ptgt->tgt_mutex);
11100 
11101 	/*
11102 	 * If the device is offline and is not in the process of coming
11103 	 * online, fail the request.
11104 	 */
11105 
11106 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11107 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
11108 		mutex_exit(&ptgt->tgt_mutex);
11109 		mutex_exit(&pptr->port_mutex);
11110 
11111 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11112 			pkt->pkt_reason = CMD_DEV_GONE;
11113 		}
11114 
11115 		return (TRAN_FATAL_ERROR);
11116 	}
11117 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11118 
11119 	/*
11120 	 * If we are suspended, kernel is trying to dump, so don't
11121 	 * block, fail or defer requests - send them down right away.
11122 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11123 	 * assume we have been suspended.  There is hardware such as
11124 	 * the v880 that doesn't do PM.	 Thus, the check for
11125 	 * ddi_in_panic.
11126 	 *
11127 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11128 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11129 	 * either the device will have gone away or changed and we can fail
11130 	 * the request, or we can proceed if the device didn't change.
11131 	 *
11132 	 * If the pd in the target or the packet is NULL it's probably
11133 	 * because the device has gone away, we allow the request to be
11134 	 * put on the internal queue here in case the device comes back within
11135 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11136 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11137 	 * could be NULL because the device was disappearing during or since
11138 	 * packet initialization.
11139 	 */
11140 
11141 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11142 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11143 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11144 	    (ptgt->tgt_pd_handle == NULL) ||
11145 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11146 		/*
11147 		 * If ((LUN is busy AND
11148 		 *	LUN not suspended AND
11149 		 *	The system is not in panic state) OR
11150 		 *	(The port is coming up))
11151 		 *
11152 		 * We check to see if the any of the flags FLAG_NOINTR or
11153 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11154 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11155 		 */
11156 		mutex_exit(&ptgt->tgt_mutex);
11157 		mutex_exit(&pptr->port_mutex);
11158 
11159 		/* see if using interrupts is allowed (so queueing'll work) */
11160 		if (pkt->pkt_flags & FLAG_NOINTR) {
11161 			pkt->pkt_resid = 0;
11162 			return (TRAN_BUSY);
11163 		}
11164 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11165 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11166 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11167 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11168 			return (TRAN_BUSY);
11169 		}
11170 #ifdef	DEBUG
11171 		mutex_enter(&pptr->port_pkt_mutex);
11172 		pptr->port_npkts++;
11173 		mutex_exit(&pptr->port_pkt_mutex);
11174 #endif /* DEBUG */
11175 
11176 		/* got queue up the pkt for later */
11177 		fcp_queue_pkt(pptr, cmd);
11178 		return (TRAN_ACCEPT);
11179 	}
11180 	cmd->cmd_state = FCP_PKT_ISSUED;
11181 
11182 	mutex_exit(&ptgt->tgt_mutex);
11183 	mutex_exit(&pptr->port_mutex);
11184 
11185 	/*
11186 	 * Now that we released the mutexes, what was protected by them can
11187 	 * change.
11188 	 */
11189 
11190 	/*
11191 	 * If there is a reconfiguration in progress, wait for it to complete.
11192 	 */
11193 	fcp_reconfig_wait(pptr);
11194 
11195 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11196 	    pkt->pkt_time : 0;
11197 
11198 	/* prepare the packet */
11199 
11200 	fcp_prepare_pkt(pptr, cmd, plun);
11201 
11202 	if (cmd->cmd_pkt->pkt_time) {
11203 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11204 	} else {
11205 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11206 	}
11207 
11208 	/*
11209 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11210 	 * have to do polled I/O
11211 	 */
11212 	if (pkt->pkt_flags & FLAG_NOINTR) {
11213 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11214 		return (fcp_dopoll(pptr, cmd));
11215 	}
11216 
11217 #ifdef	DEBUG
11218 	mutex_enter(&pptr->port_pkt_mutex);
11219 	pptr->port_npkts++;
11220 	mutex_exit(&pptr->port_pkt_mutex);
11221 #endif /* DEBUG */
11222 
11223 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11224 	if (rval == FC_SUCCESS) {
11225 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11226 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11227 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11228 		return (TRAN_ACCEPT);
11229 	}
11230 
11231 	cmd->cmd_state = FCP_PKT_IDLE;
11232 
11233 #ifdef	DEBUG
11234 	mutex_enter(&pptr->port_pkt_mutex);
11235 	pptr->port_npkts--;
11236 	mutex_exit(&pptr->port_pkt_mutex);
11237 #endif /* DEBUG */
11238 
11239 	/*
11240 	 * For lack of clearer definitions, choose
11241 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11242 	 */
11243 
11244 	if (rval == FC_TRAN_BUSY) {
11245 		pkt->pkt_resid = 0;
11246 		rval = TRAN_BUSY;
11247 	} else {
11248 		mutex_enter(&ptgt->tgt_mutex);
11249 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11250 			child_info_t	*cip;
11251 
11252 			mutex_enter(&plun->lun_mutex);
11253 			cip = plun->lun_cip;
11254 			mutex_exit(&plun->lun_mutex);
11255 
11256 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11257 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11258 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11259 			    plun->lun_tgt->tgt_d_id, rval, cip);
11260 
11261 			rval = TRAN_FATAL_ERROR;
11262 		} else {
11263 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11264 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11265 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11266 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11267 				    pkt);
11268 				rval = TRAN_BUSY;
11269 			} else {
11270 				rval = TRAN_ACCEPT;
11271 				fcp_queue_pkt(pptr, cmd);
11272 			}
11273 		}
11274 		mutex_exit(&ptgt->tgt_mutex);
11275 	}
11276 
11277 	return (rval);
11278 }
11279 
11280 /*
11281  * called by the transport to abort a packet
11282  */
11283 /*ARGSUSED*/
11284 static int
11285 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11286 {
11287 	int tgt_cnt;
11288 	struct fcp_port		*pptr = ADDR2FCP(ap);
11289 	struct fcp_lun	*plun = ADDR2LUN(ap);
11290 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11291 
11292 	if (pkt == NULL) {
11293 		if (ptgt) {
11294 			mutex_enter(&ptgt->tgt_mutex);
11295 			tgt_cnt = ptgt->tgt_change_cnt;
11296 			mutex_exit(&ptgt->tgt_mutex);
11297 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11298 			return (TRUE);
11299 		}
11300 	}
11301 	return (FALSE);
11302 }
11303 
11304 
11305 /*
11306  * Perform reset
11307  */
11308 int
11309 fcp_scsi_reset(struct scsi_address *ap, int level)
11310 {
11311 	int			rval = 0;
11312 	struct fcp_port		*pptr = ADDR2FCP(ap);
11313 	struct fcp_lun	*plun = ADDR2LUN(ap);
11314 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11315 
11316 	if (level == RESET_ALL) {
11317 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11318 			rval = 1;
11319 		}
11320 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11321 		/*
11322 		 * If we are in the middle of discovery, return
11323 		 * SUCCESS as this target will be rediscovered
11324 		 * anyway
11325 		 */
11326 		mutex_enter(&ptgt->tgt_mutex);
11327 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11328 			mutex_exit(&ptgt->tgt_mutex);
11329 			return (1);
11330 		}
11331 		mutex_exit(&ptgt->tgt_mutex);
11332 
11333 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11334 			rval = 1;
11335 		}
11336 	}
11337 	return (rval);
11338 }
11339 
11340 
11341 /*
11342  * called by the framework to get a SCSI capability
11343  */
11344 static int
11345 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11346 {
11347 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11348 }
11349 
11350 
11351 /*
11352  * called by the framework to set a SCSI capability
11353  */
11354 static int
11355 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11356 {
11357 	return (fcp_commoncap(ap, cap, value, whom, 1));
11358 }
11359 
11360 /*
11361  *     Function: fcp_pkt_setup
11362  *
11363  *  Description: This function sets up the scsi_pkt structure passed by the
11364  *		 caller. This function assumes fcp_pkt_constructor has been
11365  *		 called previously for the packet passed by the caller.	 If
11366  *		 successful this call will have the following results:
11367  *
11368  *		   - The resources needed that will be constant through out
11369  *		     the whole transaction are allocated.
11370  *		   - The fields that will be constant through out the whole
11371  *		     transaction are initialized.
11372  *		   - The scsi packet will be linked to the LUN structure
11373  *		     addressed by the transaction.
11374  *
11375  *     Argument:
11376  *		 *pkt		Pointer to a scsi_pkt structure.
11377  *		 callback
11378  *		 arg
11379  *
11380  * Return Value: 0	Success
11381  *		 !0	Failure
11382  *
11383  *	Context: Kernel context or interrupt context
11384  */
11385 /* ARGSUSED */
11386 static int
11387 fcp_pkt_setup(struct scsi_pkt *pkt,
11388     int (*callback)(caddr_t arg),
11389     caddr_t arg)
11390 {
11391 	struct fcp_pkt	*cmd;
11392 	struct fcp_port	*pptr;
11393 	struct fcp_lun	*plun;
11394 	struct fcp_tgt	*ptgt;
11395 	int		kf;
11396 	fc_packet_t	*fpkt;
11397 	fc_frame_hdr_t	*hp;
11398 
11399 	pptr = ADDR2FCP(&pkt->pkt_address);
11400 	plun = ADDR2LUN(&pkt->pkt_address);
11401 	ptgt = plun->lun_tgt;
11402 
11403 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11404 	fpkt = cmd->cmd_fp_pkt;
11405 
11406 	/*
11407 	 * this request is for dma allocation only
11408 	 */
11409 	/*
11410 	 * First step of fcp_scsi_init_pkt: pkt allocation
11411 	 * We determine if the caller is willing to wait for the
11412 	 * resources.
11413 	 */
11414 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11415 
11416 	/*
11417 	 * Selective zeroing of the pkt.
11418 	 */
11419 	cmd->cmd_back = NULL;
11420 	cmd->cmd_next = NULL;
11421 
11422 	/*
11423 	 * Zero out fcp command
11424 	 */
11425 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11426 
11427 	cmd->cmd_state = FCP_PKT_IDLE;
11428 
11429 	fpkt = cmd->cmd_fp_pkt;
11430 	fpkt->pkt_data_acc = NULL;
11431 
11432 	/*
11433 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11434 	 * could be destroyed.	We need fail pkt_setup.
11435 	 */
11436 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11437 		return (-1);
11438 	}
11439 
11440 	mutex_enter(&ptgt->tgt_mutex);
11441 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11442 
11443 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11444 	    != FC_SUCCESS) {
11445 		mutex_exit(&ptgt->tgt_mutex);
11446 		return (-1);
11447 	}
11448 
11449 	mutex_exit(&ptgt->tgt_mutex);
11450 
11451 	/* Fill in the Fabric Channel Header */
11452 	hp = &fpkt->pkt_cmd_fhdr;
11453 	hp->r_ctl = R_CTL_COMMAND;
11454 	hp->rsvd = 0;
11455 	hp->type = FC_TYPE_SCSI_FCP;
11456 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11457 	hp->seq_id = 0;
11458 	hp->df_ctl  = 0;
11459 	hp->seq_cnt = 0;
11460 	hp->ox_id = 0xffff;
11461 	hp->rx_id = 0xffff;
11462 	hp->ro = 0;
11463 
11464 	/*
11465 	 * A doubly linked list (cmd_forw, cmd_back) is built
11466 	 * out of every allocated packet on a per-lun basis
11467 	 *
11468 	 * The packets are maintained in the list so as to satisfy
11469 	 * scsi_abort() requests. At present (which is unlikely to
11470 	 * change in the future) nobody performs a real scsi_abort
11471 	 * in the SCSI target drivers (as they don't keep the packets
11472 	 * after doing scsi_transport - so they don't know how to
11473 	 * abort a packet other than sending a NULL to abort all
11474 	 * outstanding packets)
11475 	 */
11476 	mutex_enter(&plun->lun_mutex);
11477 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11478 		plun->lun_pkt_head->cmd_back = cmd;
11479 	} else {
11480 		plun->lun_pkt_tail = cmd;
11481 	}
11482 	plun->lun_pkt_head = cmd;
11483 	mutex_exit(&plun->lun_mutex);
11484 	return (0);
11485 }
11486 
11487 /*
11488  *     Function: fcp_pkt_teardown
11489  *
11490  *  Description: This function releases a scsi_pkt structure and all the
11491  *		 resources attached to it.
11492  *
11493  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11494  *
11495  * Return Value: None
11496  *
11497  *	Context: User, Kernel or Interrupt context.
11498  */
11499 static void
11500 fcp_pkt_teardown(struct scsi_pkt *pkt)
11501 {
11502 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11503 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11504 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11505 
11506 	/*
11507 	 * Remove the packet from the per-lun list
11508 	 */
11509 	mutex_enter(&plun->lun_mutex);
11510 	if (cmd->cmd_back) {
11511 		ASSERT(cmd != plun->lun_pkt_head);
11512 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11513 	} else {
11514 		ASSERT(cmd == plun->lun_pkt_head);
11515 		plun->lun_pkt_head = cmd->cmd_forw;
11516 	}
11517 
11518 	if (cmd->cmd_forw) {
11519 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11520 	} else {
11521 		ASSERT(cmd == plun->lun_pkt_tail);
11522 		plun->lun_pkt_tail = cmd->cmd_back;
11523 	}
11524 
11525 	mutex_exit(&plun->lun_mutex);
11526 
11527 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11528 }
11529 
11530 /*
11531  * Routine for reset notification setup, to register or cancel.
11532  * This function is called by SCSA
11533  */
11534 /*ARGSUSED*/
11535 static int
11536 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11537     void (*callback)(caddr_t), caddr_t arg)
11538 {
11539 	struct fcp_port *pptr = ADDR2FCP(ap);
11540 
11541 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11542 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11543 }
11544 
11545 
11546 static int
11547 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11548     ddi_eventcookie_t *event_cookiep)
11549 {
11550 	struct fcp_port *pptr = fcp_dip2port(dip);
11551 
11552 	if (pptr == NULL) {
11553 		return (DDI_FAILURE);
11554 	}
11555 
11556 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11557 	    event_cookiep, NDI_EVENT_NOPASS));
11558 }
11559 
11560 
11561 static int
11562 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11563     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11564     ddi_callback_id_t *cb_id)
11565 {
11566 	struct fcp_port *pptr = fcp_dip2port(dip);
11567 
11568 	if (pptr == NULL) {
11569 		return (DDI_FAILURE);
11570 	}
11571 
11572 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11573 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11574 }
11575 
11576 
11577 static int
11578 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11579 {
11580 
11581 	struct fcp_port *pptr = fcp_dip2port(dip);
11582 
11583 	if (pptr == NULL) {
11584 		return (DDI_FAILURE);
11585 	}
11586 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11587 }
11588 
11589 
11590 /*
11591  * called by the transport to post an event
11592  */
11593 static int
11594 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11595     ddi_eventcookie_t eventid, void *impldata)
11596 {
11597 	struct fcp_port *pptr = fcp_dip2port(dip);
11598 
11599 	if (pptr == NULL) {
11600 		return (DDI_FAILURE);
11601 	}
11602 
11603 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11604 	    eventid, impldata));
11605 }
11606 
11607 
11608 /*
11609  * A target in in many cases in Fibre Channel has a one to one relation
11610  * with a port identifier (which is also known as D_ID and also as AL_PA
11611  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11612  * will most likely result in resetting all LUNs (which means a reset will
11613  * occur on all the SCSI devices connected at the other end of the bridge)
11614  * That is the latest favorite topic for discussion, for, one can debate as
11615  * hot as one likes and come up with arguably a best solution to one's
11616  * satisfaction
11617  *
11618  * To stay on track and not digress much, here are the problems stated
11619  * briefly:
11620  *
11621  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11622  *	target drivers use RESET_TARGET even if their instance is on a
11623  *	LUN. Doesn't that sound a bit broken ?
11624  *
11625  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11626  *	control fields of an FCP_CMND structure. It should have been
11627  *	fixed right there, giving flexibility to the initiators to
11628  *	minimize havoc that could be caused by resetting a target.
11629  */
11630 static int
11631 fcp_reset_target(struct scsi_address *ap, int level)
11632 {
11633 	int			rval = FC_FAILURE;
11634 	char			lun_id[25];
11635 	struct fcp_port		*pptr = ADDR2FCP(ap);
11636 	struct fcp_lun	*plun = ADDR2LUN(ap);
11637 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11638 	struct scsi_pkt		*pkt;
11639 	struct fcp_pkt	*cmd;
11640 	struct fcp_rsp		*rsp;
11641 	uint32_t		tgt_cnt;
11642 	struct fcp_rsp_info	*rsp_info;
11643 	struct fcp_reset_elem	*p;
11644 	int			bval;
11645 
11646 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11647 	    KM_NOSLEEP)) == NULL) {
11648 		return (rval);
11649 	}
11650 
11651 	mutex_enter(&ptgt->tgt_mutex);
11652 	if (level == RESET_TARGET) {
11653 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11654 			mutex_exit(&ptgt->tgt_mutex);
11655 			kmem_free(p, sizeof (struct fcp_reset_elem));
11656 			return (rval);
11657 		}
11658 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11659 		(void) strcpy(lun_id, " ");
11660 	} else {
11661 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11662 			mutex_exit(&ptgt->tgt_mutex);
11663 			kmem_free(p, sizeof (struct fcp_reset_elem));
11664 			return (rval);
11665 		}
11666 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11667 
11668 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11669 	}
11670 	tgt_cnt = ptgt->tgt_change_cnt;
11671 
11672 	mutex_exit(&ptgt->tgt_mutex);
11673 
11674 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11675 	    0, 0, NULL, 0)) == NULL) {
11676 		kmem_free(p, sizeof (struct fcp_reset_elem));
11677 		mutex_enter(&ptgt->tgt_mutex);
11678 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11679 		mutex_exit(&ptgt->tgt_mutex);
11680 		return (rval);
11681 	}
11682 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11683 
11684 	/* fill in cmd part of packet */
11685 	cmd = PKT2CMD(pkt);
11686 	if (level == RESET_TARGET) {
11687 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11688 	} else {
11689 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11690 	}
11691 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11692 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11693 
11694 	/* prepare a packet for transport */
11695 	fcp_prepare_pkt(pptr, cmd, plun);
11696 
11697 	if (cmd->cmd_pkt->pkt_time) {
11698 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11699 	} else {
11700 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11701 	}
11702 
11703 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11704 	bval = fcp_dopoll(pptr, cmd);
11705 	fc_ulp_idle_port(pptr->port_fp_handle);
11706 
11707 	/* submit the packet */
11708 	if (bval == TRAN_ACCEPT) {
11709 		int error = 3;
11710 
11711 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11712 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11713 		    sizeof (struct fcp_rsp));
11714 
11715 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11716 			if (fcp_validate_fcp_response(rsp, pptr) ==
11717 			    FC_SUCCESS) {
11718 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11719 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11720 					    sizeof (struct fcp_rsp), rsp_info,
11721 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11722 					    sizeof (struct fcp_rsp_info));
11723 				}
11724 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11725 					rval = FC_SUCCESS;
11726 					error = 0;
11727 				} else {
11728 					error = 1;
11729 				}
11730 			} else {
11731 				error = 2;
11732 			}
11733 		}
11734 
11735 		switch (error) {
11736 		case 0:
11737 			fcp_log(CE_WARN, pptr->port_dip,
11738 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11739 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11740 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11741 			break;
11742 
11743 		case 1:
11744 			fcp_log(CE_WARN, pptr->port_dip,
11745 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11746 			    " response code=%x",
11747 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11748 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11749 			    rsp_info->rsp_code);
11750 			break;
11751 
11752 		case 2:
11753 			fcp_log(CE_WARN, pptr->port_dip,
11754 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11755 			    " Bad FCP response values: rsvd1=%x,"
11756 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11757 			    " rsplen=%x, senselen=%x",
11758 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11759 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11760 			    rsp->reserved_0, rsp->reserved_1,
11761 			    rsp->fcp_u.fcp_status.reserved_0,
11762 			    rsp->fcp_u.fcp_status.reserved_1,
11763 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11764 			break;
11765 
11766 		default:
11767 			fcp_log(CE_WARN, pptr->port_dip,
11768 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11769 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11770 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11771 			break;
11772 		}
11773 	}
11774 	scsi_destroy_pkt(pkt);
11775 
11776 	if (rval == FC_FAILURE) {
11777 		mutex_enter(&ptgt->tgt_mutex);
11778 		if (level == RESET_TARGET) {
11779 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11780 		} else {
11781 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11782 		}
11783 		mutex_exit(&ptgt->tgt_mutex);
11784 		kmem_free(p, sizeof (struct fcp_reset_elem));
11785 		return (rval);
11786 	}
11787 
11788 	mutex_enter(&pptr->port_mutex);
11789 	if (level == RESET_TARGET) {
11790 		p->tgt = ptgt;
11791 		p->lun = NULL;
11792 	} else {
11793 		p->tgt = NULL;
11794 		p->lun = plun;
11795 	}
11796 	p->tgt = ptgt;
11797 	p->tgt_cnt = tgt_cnt;
11798 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11799 	p->next = pptr->port_reset_list;
11800 	pptr->port_reset_list = p;
11801 
11802 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11803 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11804 	    "Notify ssd of the reset to reinstate the reservations");
11805 
11806 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11807 	    &pptr->port_reset_notify_listf);
11808 
11809 	mutex_exit(&pptr->port_mutex);
11810 
11811 	return (rval);
11812 }
11813 
11814 
11815 /*
11816  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11817  * SCSI capabilities
11818  */
11819 /* ARGSUSED */
11820 static int
11821 fcp_commoncap(struct scsi_address *ap, char *cap,
11822     int val, int tgtonly, int doset)
11823 {
11824 	struct fcp_port		*pptr = ADDR2FCP(ap);
11825 	struct fcp_lun	*plun = ADDR2LUN(ap);
11826 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11827 	int			cidx;
11828 	int			rval = FALSE;
11829 
11830 	if (cap == (char *)0) {
11831 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11832 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11833 		    "fcp_commoncap: invalid arg");
11834 		return (rval);
11835 	}
11836 
11837 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11838 		return (UNDEFINED);
11839 	}
11840 
11841 	/*
11842 	 * Process setcap request.
11843 	 */
11844 	if (doset) {
11845 		/*
11846 		 * At present, we can only set binary (0/1) values
11847 		 */
11848 		switch (cidx) {
11849 		case SCSI_CAP_ARQ:
11850 			if (val == 0) {
11851 				rval = FALSE;
11852 			} else {
11853 				rval = TRUE;
11854 			}
11855 			break;
11856 
11857 		case SCSI_CAP_LUN_RESET:
11858 			if (val) {
11859 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11860 			} else {
11861 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11862 			}
11863 			rval = TRUE;
11864 			break;
11865 
11866 		case SCSI_CAP_SECTOR_SIZE:
11867 			rval = TRUE;
11868 			break;
11869 		default:
11870 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11871 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11872 			    "fcp_setcap: unsupported %d", cidx);
11873 			rval = UNDEFINED;
11874 			break;
11875 		}
11876 
11877 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11878 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11879 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11880 		    "0x%x/0x%x/0x%x/%d",
11881 		    cap, val, tgtonly, doset, rval);
11882 
11883 	} else {
11884 		/*
11885 		 * Process getcap request.
11886 		 */
11887 		switch (cidx) {
11888 		case SCSI_CAP_DMA_MAX:
11889 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11890 
11891 			/*
11892 			 * Need to make an adjustment qlc is uint_t 64
11893 			 * st is int, so we will make the adjustment here
11894 			 * being as nobody wants to touch this.
11895 			 * It still leaves the max single block length
11896 			 * of 2 gig. This should last .
11897 			 */
11898 
11899 			if (rval == -1) {
11900 				rval = MAX_INT_DMA;
11901 			}
11902 
11903 			break;
11904 
11905 		case SCSI_CAP_INITIATOR_ID:
11906 			rval = pptr->port_id;
11907 			break;
11908 
11909 		case SCSI_CAP_ARQ:
11910 		case SCSI_CAP_RESET_NOTIFICATION:
11911 		case SCSI_CAP_TAGGED_QING:
11912 			rval = TRUE;
11913 			break;
11914 
11915 		case SCSI_CAP_SCSI_VERSION:
11916 			rval = 3;
11917 			break;
11918 
11919 		case SCSI_CAP_INTERCONNECT_TYPE:
11920 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11921 			    (ptgt->tgt_hard_addr == 0)) {
11922 				rval = INTERCONNECT_FABRIC;
11923 			} else {
11924 				rval = INTERCONNECT_FIBRE;
11925 			}
11926 			break;
11927 
11928 		case SCSI_CAP_LUN_RESET:
11929 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11930 			    TRUE : FALSE;
11931 			break;
11932 
11933 		default:
11934 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11935 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11936 			    "fcp_getcap: unsupported %d", cidx);
11937 			rval = UNDEFINED;
11938 			break;
11939 		}
11940 
11941 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11942 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11943 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11944 		    "0x%x/0x%x/0x%x/%d",
11945 		    cap, val, tgtonly, doset, rval);
11946 	}
11947 
11948 	return (rval);
11949 }
11950 
11951 /*
11952  * called by the transport to get the port-wwn and lun
11953  * properties of this device, and to create a "name" based on them
11954  *
11955  * these properties don't exist on sun4m
11956  *
11957  * return 1 for success else return 0
11958  */
11959 /* ARGSUSED */
11960 static int
11961 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11962 {
11963 	int			i;
11964 	int			*lun;
11965 	int			numChars;
11966 	uint_t			nlun;
11967 	uint_t			count;
11968 	uint_t			nbytes;
11969 	uchar_t			*bytes;
11970 	uint16_t		lun_num;
11971 	uint32_t		tgt_id;
11972 	char			**conf_wwn;
11973 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11974 	uchar_t			barray[FC_WWN_SIZE];
11975 	dev_info_t		*tgt_dip;
11976 	struct fcp_tgt	*ptgt;
11977 	struct fcp_port	*pptr;
11978 	struct fcp_lun	*plun;
11979 
11980 	ASSERT(sd != NULL);
11981 	ASSERT(name != NULL);
11982 
11983 	tgt_dip = sd->sd_dev;
11984 	pptr = ddi_get_soft_state(fcp_softstate,
11985 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11986 	if (pptr == NULL) {
11987 		return (0);
11988 	}
11989 
11990 	ASSERT(tgt_dip != NULL);
11991 
11992 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11993 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11994 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11995 		name[0] = '\0';
11996 		return (0);
11997 	}
11998 
11999 	if (nlun == 0) {
12000 		ddi_prop_free(lun);
12001 		return (0);
12002 	}
12003 
12004 	lun_num = lun[0];
12005 	ddi_prop_free(lun);
12006 
12007 	/*
12008 	 * Lookup for .conf WWN property
12009 	 */
12010 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12011 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12012 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12013 		ASSERT(count >= 1);
12014 
12015 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12016 		ddi_prop_free(conf_wwn);
12017 		mutex_enter(&pptr->port_mutex);
12018 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12019 			mutex_exit(&pptr->port_mutex);
12020 			return (0);
12021 		}
12022 		ptgt = plun->lun_tgt;
12023 		mutex_exit(&pptr->port_mutex);
12024 
12025 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12026 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12027 
12028 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12029 		    ptgt->tgt_hard_addr != 0) {
12030 			tgt_id = (uint32_t)fcp_alpa_to_switch[
12031 			    ptgt->tgt_hard_addr];
12032 		} else {
12033 			tgt_id = ptgt->tgt_d_id;
12034 		}
12035 
12036 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12037 		    TARGET_PROP, tgt_id);
12038 	}
12039 
12040 	/* get the our port-wwn property */
12041 	bytes = NULL;
12042 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12043 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12044 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12045 		if (bytes != NULL) {
12046 			ddi_prop_free(bytes);
12047 		}
12048 		return (0);
12049 	}
12050 
12051 	for (i = 0; i < FC_WWN_SIZE; i++) {
12052 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12053 	}
12054 
12055 	/* Stick in the address of the form "wWWN,LUN" */
12056 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12057 
12058 	ASSERT(numChars < len);
12059 	if (numChars >= len) {
12060 		fcp_log(CE_WARN, pptr->port_dip,
12061 		    "!fcp_scsi_get_name: "
12062 		    "name parameter length too small, it needs to be %d",
12063 		    numChars+1);
12064 	}
12065 
12066 	ddi_prop_free(bytes);
12067 
12068 	return (1);
12069 }
12070 
12071 
12072 /*
12073  * called by the transport to get the SCSI target id value, returning
12074  * it in "name"
12075  *
12076  * this isn't needed/used on sun4m
12077  *
12078  * return 1 for success else return 0
12079  */
12080 /* ARGSUSED */
12081 static int
12082 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12083 {
12084 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
12085 	struct fcp_tgt	*ptgt;
12086 	int    numChars;
12087 
12088 	if (plun == NULL) {
12089 		return (0);
12090 	}
12091 
12092 	if ((ptgt = plun->lun_tgt) == NULL) {
12093 		return (0);
12094 	}
12095 
12096 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12097 
12098 	ASSERT(numChars < len);
12099 	if (numChars >= len) {
12100 		fcp_log(CE_WARN, NULL,
12101 		    "!fcp_scsi_get_bus_addr: "
12102 		    "name parameter length too small, it needs to be %d",
12103 		    numChars+1);
12104 	}
12105 
12106 	return (1);
12107 }
12108 
12109 
12110 /*
12111  * called internally to reset the link where the specified port lives
12112  */
12113 static int
12114 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12115 {
12116 	la_wwn_t		wwn;
12117 	struct fcp_lun	*plun;
12118 	struct fcp_tgt	*ptgt;
12119 
12120 	/* disable restart of lip if we're suspended */
12121 	mutex_enter(&pptr->port_mutex);
12122 
12123 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12124 	    FCP_STATE_POWER_DOWN)) {
12125 		mutex_exit(&pptr->port_mutex);
12126 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12127 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12128 		    "fcp_linkreset, fcp%d: link reset "
12129 		    "disabled due to DDI_SUSPEND",
12130 		    ddi_get_instance(pptr->port_dip));
12131 		return (FC_FAILURE);
12132 	}
12133 
12134 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12135 		mutex_exit(&pptr->port_mutex);
12136 		return (FC_SUCCESS);
12137 	}
12138 
12139 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12140 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12141 
12142 	/*
12143 	 * If ap == NULL assume local link reset.
12144 	 */
12145 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12146 		plun = ADDR2LUN(ap);
12147 		ptgt = plun->lun_tgt;
12148 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12149 	} else {
12150 		bzero((caddr_t)&wwn, sizeof (wwn));
12151 	}
12152 	mutex_exit(&pptr->port_mutex);
12153 
12154 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12155 }
12156 
12157 
12158 /*
12159  * called from fcp_port_attach() to resume a port
12160  * return DDI_* success/failure status
12161  * acquires and releases the global mutex
12162  * acquires and releases the port mutex
12163  */
12164 /*ARGSUSED*/
12165 
12166 static int
12167 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12168     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12169 {
12170 	int			res = DDI_FAILURE; /* default result */
12171 	struct fcp_port	*pptr;		/* port state ptr */
12172 	uint32_t		alloc_cnt;
12173 	uint32_t		max_cnt;
12174 	fc_portmap_t		*tmp_list = NULL;
12175 
12176 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12177 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12178 	    instance);
12179 
12180 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12181 		cmn_err(CE_WARN, "fcp: bad soft state");
12182 		return (res);
12183 	}
12184 
12185 	mutex_enter(&pptr->port_mutex);
12186 	switch (cmd) {
12187 	case FC_CMD_RESUME:
12188 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12189 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12190 		break;
12191 
12192 	case FC_CMD_POWER_UP:
12193 		/*
12194 		 * If the port is DDI_SUSPENded, defer rediscovery
12195 		 * until DDI_RESUME occurs
12196 		 */
12197 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12198 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12199 			mutex_exit(&pptr->port_mutex);
12200 			return (DDI_SUCCESS);
12201 		}
12202 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12203 	}
12204 	pptr->port_id = s_id;
12205 	pptr->port_state = FCP_STATE_INIT;
12206 	mutex_exit(&pptr->port_mutex);
12207 
12208 	/*
12209 	 * Make a copy of ulp_port_info as fctl allocates
12210 	 * a temp struct.
12211 	 */
12212 	(void) fcp_cp_pinfo(pptr, pinfo);
12213 
12214 	mutex_enter(&fcp_global_mutex);
12215 	if (fcp_watchdog_init++ == 0) {
12216 		fcp_watchdog_tick = fcp_watchdog_timeout *
12217 		    drv_usectohz(1000000);
12218 		fcp_watchdog_id = timeout(fcp_watch,
12219 		    NULL, fcp_watchdog_tick);
12220 	}
12221 	mutex_exit(&fcp_global_mutex);
12222 
12223 	/*
12224 	 * Handle various topologies and link states.
12225 	 */
12226 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12227 	case FC_STATE_OFFLINE:
12228 		/*
12229 		 * Wait for ONLINE, at which time a state
12230 		 * change will cause a statec_callback
12231 		 */
12232 		res = DDI_SUCCESS;
12233 		break;
12234 
12235 	case FC_STATE_ONLINE:
12236 
12237 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12238 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12239 			res = DDI_SUCCESS;
12240 			break;
12241 		}
12242 
12243 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12244 		    !fcp_enable_auto_configuration) {
12245 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12246 			if (tmp_list == NULL) {
12247 				if (!alloc_cnt) {
12248 					res = DDI_SUCCESS;
12249 				}
12250 				break;
12251 			}
12252 			max_cnt = alloc_cnt;
12253 		} else {
12254 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12255 
12256 			alloc_cnt = FCP_MAX_DEVICES;
12257 
12258 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12259 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12260 			    KM_NOSLEEP)) == NULL) {
12261 				fcp_log(CE_WARN, pptr->port_dip,
12262 				    "!fcp%d: failed to allocate portmap",
12263 				    instance);
12264 				break;
12265 			}
12266 
12267 			max_cnt = alloc_cnt;
12268 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12269 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12270 			    FC_SUCCESS) {
12271 				caddr_t msg;
12272 
12273 				(void) fc_ulp_error(res, &msg);
12274 
12275 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12276 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12277 				    "resume failed getportmap: reason=0x%x",
12278 				    res);
12279 
12280 				fcp_log(CE_WARN, pptr->port_dip,
12281 				    "!failed to get port map : %s", msg);
12282 				break;
12283 			}
12284 			if (max_cnt > alloc_cnt) {
12285 				alloc_cnt = max_cnt;
12286 			}
12287 		}
12288 
12289 		/*
12290 		 * do the SCSI device discovery and create
12291 		 * the devinfos
12292 		 */
12293 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12294 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12295 		    max_cnt, pptr->port_id);
12296 
12297 		res = DDI_SUCCESS;
12298 		break;
12299 
12300 	default:
12301 		fcp_log(CE_WARN, pptr->port_dip,
12302 		    "!fcp%d: invalid port state at attach=0x%x",
12303 		    instance, pptr->port_phys_state);
12304 
12305 		mutex_enter(&pptr->port_mutex);
12306 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12307 		mutex_exit(&pptr->port_mutex);
12308 		res = DDI_SUCCESS;
12309 
12310 		break;
12311 	}
12312 
12313 	if (tmp_list != NULL) {
12314 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12315 	}
12316 
12317 	return (res);
12318 }
12319 
12320 
12321 static void
12322 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12323 {
12324 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12325 	pptr->port_dip = pinfo->port_dip;
12326 	pptr->port_fp_handle = pinfo->port_handle;
12327 	if (pinfo->port_acc_attr != NULL) {
12328 		/*
12329 		 * FCA supports DMA
12330 		 */
12331 		pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12332 		pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12333 		pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12334 		pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12335 	}
12336 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12337 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12338 	pptr->port_phys_state = pinfo->port_state;
12339 	pptr->port_topology = pinfo->port_flags;
12340 	pptr->port_reset_action = pinfo->port_reset_action;
12341 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12342 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12343 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12344 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12345 
12346 	/* Clear FMA caps to avoid fm-capability ereport */
12347 	if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12348 		pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12349 	if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12350 		pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12351 	if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12352 		pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12353 }
12354 
12355 /*
12356  * If the elements wait field is set to 1 then
12357  * another thread is waiting for the operation to complete. Once
12358  * it is complete, the waiting thread is signaled and the element is
12359  * freed by the waiting thread. If the elements wait field is set to 0
12360  * the element is freed.
12361  */
12362 static void
12363 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12364 {
12365 	ASSERT(elem != NULL);
12366 	mutex_enter(&elem->mutex);
12367 	elem->result = result;
12368 	if (elem->wait) {
12369 		elem->wait = 0;
12370 		cv_signal(&elem->cv);
12371 		mutex_exit(&elem->mutex);
12372 	} else {
12373 		mutex_exit(&elem->mutex);
12374 		cv_destroy(&elem->cv);
12375 		mutex_destroy(&elem->mutex);
12376 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12377 	}
12378 }
12379 
12380 /*
12381  * This function is invoked from the taskq thread to allocate
12382  * devinfo nodes and to online/offline them.
12383  */
12384 static void
12385 fcp_hp_task(void *arg)
12386 {
12387 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12388 	struct fcp_lun	*plun = elem->lun;
12389 	struct fcp_port		*pptr = elem->port;
12390 	int			result;
12391 
12392 	ASSERT(elem->what == FCP_ONLINE ||
12393 	    elem->what == FCP_OFFLINE ||
12394 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12395 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12396 
12397 	mutex_enter(&pptr->port_mutex);
12398 	mutex_enter(&plun->lun_mutex);
12399 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12400 	    plun->lun_event_count != elem->event_cnt) ||
12401 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12402 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12403 		mutex_exit(&plun->lun_mutex);
12404 		mutex_exit(&pptr->port_mutex);
12405 		fcp_process_elem(elem, NDI_FAILURE);
12406 		return;
12407 	}
12408 	mutex_exit(&plun->lun_mutex);
12409 	mutex_exit(&pptr->port_mutex);
12410 
12411 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12412 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12413 	fcp_process_elem(elem, result);
12414 }
12415 
12416 
12417 static child_info_t *
12418 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12419     int tcount)
12420 {
12421 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12422 
12423 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12424 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12425 
12426 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12427 		/*
12428 		 * Child has not been created yet. Create the child device
12429 		 * based on the per-Lun flags.
12430 		 */
12431 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12432 			plun->lun_cip =
12433 			    CIP(fcp_create_dip(plun, lcount, tcount));
12434 			plun->lun_mpxio = 0;
12435 		} else {
12436 			plun->lun_cip =
12437 			    CIP(fcp_create_pip(plun, lcount, tcount));
12438 			plun->lun_mpxio = 1;
12439 		}
12440 	} else {
12441 		plun->lun_cip = cip;
12442 	}
12443 
12444 	return (plun->lun_cip);
12445 }
12446 
12447 
12448 static int
12449 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12450 {
12451 	int		rval = FC_FAILURE;
12452 	dev_info_t	*pdip;
12453 	struct dev_info	*dip;
12454 	int		circular;
12455 
12456 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12457 
12458 	pdip = plun->lun_tgt->tgt_port->port_dip;
12459 
12460 	if (plun->lun_cip == NULL) {
12461 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12462 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12463 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12464 		    "plun: %p lun state: %x num: %d target state: %x",
12465 		    plun, plun->lun_state, plun->lun_num,
12466 		    plun->lun_tgt->tgt_port->port_state);
12467 		return (rval);
12468 	}
12469 	ndi_devi_enter(pdip, &circular);
12470 	dip = DEVI(pdip)->devi_child;
12471 	while (dip) {
12472 		if (dip == DEVI(cdip)) {
12473 			rval = FC_SUCCESS;
12474 			break;
12475 		}
12476 		dip = dip->devi_sibling;
12477 	}
12478 	ndi_devi_exit(pdip, circular);
12479 	return (rval);
12480 }
12481 
12482 static int
12483 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12484 {
12485 	int		rval = FC_FAILURE;
12486 
12487 	ASSERT(plun != NULL);
12488 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12489 
12490 	if (plun->lun_mpxio == 0) {
12491 		rval = fcp_is_dip_present(plun, DIP(cip));
12492 	} else {
12493 		rval = fcp_is_pip_present(plun, PIP(cip));
12494 	}
12495 
12496 	return (rval);
12497 }
12498 
12499 /*
12500  *     Function: fcp_create_dip
12501  *
12502  *  Description: Creates a dev_info_t structure for the LUN specified by the
12503  *		 caller.
12504  *
12505  *     Argument: plun		Lun structure
12506  *		 link_cnt	Link state count.
12507  *		 tgt_cnt	Target state change count.
12508  *
12509  * Return Value: NULL if it failed
12510  *		 dev_info_t structure address if it succeeded
12511  *
12512  *	Context: Kernel context
12513  */
12514 static dev_info_t *
12515 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12516 {
12517 	int			failure = 0;
12518 	uint32_t		tgt_id;
12519 	uint64_t		sam_lun;
12520 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12521 	struct fcp_port	*pptr = ptgt->tgt_port;
12522 	dev_info_t		*pdip = pptr->port_dip;
12523 	dev_info_t		*cdip = NULL;
12524 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12525 	char			*nname = NULL;
12526 	char			**compatible = NULL;
12527 	int			ncompatible;
12528 	char			*scsi_binding_set;
12529 	char			t_pwwn[17];
12530 
12531 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12532 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12533 
12534 	/* get the 'scsi-binding-set' property */
12535 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12536 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12537 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12538 		scsi_binding_set = NULL;
12539 	}
12540 
12541 	/* determine the node name and compatible */
12542 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12543 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12544 	if (scsi_binding_set) {
12545 		ddi_prop_free(scsi_binding_set);
12546 	}
12547 
12548 	if (nname == NULL) {
12549 #ifdef	DEBUG
12550 		cmn_err(CE_WARN, "%s%d: no driver for "
12551 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12552 		    "	 compatible: %s",
12553 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12554 		    ptgt->tgt_port_wwn.raw_wwn[0],
12555 		    ptgt->tgt_port_wwn.raw_wwn[1],
12556 		    ptgt->tgt_port_wwn.raw_wwn[2],
12557 		    ptgt->tgt_port_wwn.raw_wwn[3],
12558 		    ptgt->tgt_port_wwn.raw_wwn[4],
12559 		    ptgt->tgt_port_wwn.raw_wwn[5],
12560 		    ptgt->tgt_port_wwn.raw_wwn[6],
12561 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12562 		    *compatible);
12563 #endif	/* DEBUG */
12564 		failure++;
12565 		goto end_of_fcp_create_dip;
12566 	}
12567 
12568 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12569 
12570 	/*
12571 	 * if the old_dip does not match the cdip, that means there is
12572 	 * some property change. since we'll be using the cdip, we need
12573 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12574 	 * then the dtype for the device has been updated. Offline the
12575 	 * the old device and create a new device with the new device type
12576 	 * Refer to bug: 4764752
12577 	 */
12578 	if (old_dip && (cdip != old_dip ||
12579 	    plun->lun_state & FCP_LUN_CHANGED)) {
12580 		plun->lun_state &= ~(FCP_LUN_INIT);
12581 		mutex_exit(&plun->lun_mutex);
12582 		mutex_exit(&pptr->port_mutex);
12583 
12584 		mutex_enter(&ptgt->tgt_mutex);
12585 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12586 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12587 		mutex_exit(&ptgt->tgt_mutex);
12588 
12589 #ifdef DEBUG
12590 		if (cdip != NULL) {
12591 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12592 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12593 			    "Old dip=%p; New dip=%p don't match", old_dip,
12594 			    cdip);
12595 		} else {
12596 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12597 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12598 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12599 		}
12600 #endif
12601 
12602 		mutex_enter(&pptr->port_mutex);
12603 		mutex_enter(&plun->lun_mutex);
12604 	}
12605 
12606 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12607 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12608 		if (ndi_devi_alloc(pptr->port_dip, nname,
12609 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12610 			failure++;
12611 			goto end_of_fcp_create_dip;
12612 		}
12613 	}
12614 
12615 	/*
12616 	 * Previously all the properties for the devinfo were destroyed here
12617 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12618 	 * the devid property (and other properties established by the target
12619 	 * driver or framework) which the code does not always recreate, this
12620 	 * call was removed.
12621 	 * This opens a theoretical possibility that we may return with a
12622 	 * stale devid on the node if the scsi entity behind the fibre channel
12623 	 * lun has changed.
12624 	 */
12625 
12626 	/* decorate the node with compatible */
12627 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12628 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12629 		failure++;
12630 		goto end_of_fcp_create_dip;
12631 	}
12632 
12633 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12634 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12635 		failure++;
12636 		goto end_of_fcp_create_dip;
12637 	}
12638 
12639 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12640 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12641 		failure++;
12642 		goto end_of_fcp_create_dip;
12643 	}
12644 
12645 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12646 	t_pwwn[16] = '\0';
12647 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12648 	    != DDI_PROP_SUCCESS) {
12649 		failure++;
12650 		goto end_of_fcp_create_dip;
12651 	}
12652 
12653 	/*
12654 	 * If there is no hard address - We might have to deal with
12655 	 * that by using WWN - Having said that it is important to
12656 	 * recognize this problem early so ssd can be informed of
12657 	 * the right interconnect type.
12658 	 */
12659 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12660 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12661 	} else {
12662 		tgt_id = ptgt->tgt_d_id;
12663 	}
12664 
12665 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12666 	    tgt_id) != DDI_PROP_SUCCESS) {
12667 		failure++;
12668 		goto end_of_fcp_create_dip;
12669 	}
12670 
12671 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12672 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12673 		failure++;
12674 		goto end_of_fcp_create_dip;
12675 	}
12676 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12677 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12678 	    sam_lun) != DDI_PROP_SUCCESS) {
12679 		failure++;
12680 		goto end_of_fcp_create_dip;
12681 	}
12682 
12683 end_of_fcp_create_dip:
12684 	scsi_hba_nodename_compatible_free(nname, compatible);
12685 
12686 	if (cdip != NULL && failure) {
12687 		(void) ndi_prop_remove_all(cdip);
12688 		(void) ndi_devi_free(cdip);
12689 		cdip = NULL;
12690 	}
12691 
12692 	return (cdip);
12693 }
12694 
12695 /*
12696  *     Function: fcp_create_pip
12697  *
12698  *  Description: Creates a Path Id for the LUN specified by the caller.
12699  *
12700  *     Argument: plun		Lun structure
12701  *		 link_cnt	Link state count.
12702  *		 tgt_cnt	Target state count.
12703  *
12704  * Return Value: NULL if it failed
12705  *		 mdi_pathinfo_t structure address if it succeeded
12706  *
12707  *	Context: Kernel context
12708  */
12709 static mdi_pathinfo_t *
12710 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12711 {
12712 	int			i;
12713 	char			buf[MAXNAMELEN];
12714 	char			uaddr[MAXNAMELEN];
12715 	int			failure = 0;
12716 	uint32_t		tgt_id;
12717 	uint64_t		sam_lun;
12718 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12719 	struct fcp_port	*pptr = ptgt->tgt_port;
12720 	dev_info_t		*pdip = pptr->port_dip;
12721 	mdi_pathinfo_t		*pip = NULL;
12722 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12723 	char			*nname = NULL;
12724 	char			**compatible = NULL;
12725 	int			ncompatible;
12726 	char			*scsi_binding_set;
12727 	char			t_pwwn[17];
12728 
12729 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12730 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12731 
12732 	scsi_binding_set = "vhci";
12733 
12734 	/* determine the node name and compatible */
12735 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12736 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12737 
12738 	if (nname == NULL) {
12739 #ifdef	DEBUG
12740 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12741 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12742 		    "	 compatible: %s",
12743 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12744 		    ptgt->tgt_port_wwn.raw_wwn[0],
12745 		    ptgt->tgt_port_wwn.raw_wwn[1],
12746 		    ptgt->tgt_port_wwn.raw_wwn[2],
12747 		    ptgt->tgt_port_wwn.raw_wwn[3],
12748 		    ptgt->tgt_port_wwn.raw_wwn[4],
12749 		    ptgt->tgt_port_wwn.raw_wwn[5],
12750 		    ptgt->tgt_port_wwn.raw_wwn[6],
12751 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12752 		    *compatible);
12753 #endif	/* DEBUG */
12754 		failure++;
12755 		goto end_of_fcp_create_pip;
12756 	}
12757 
12758 	pip = fcp_find_existing_pip(plun, pdip);
12759 
12760 	/*
12761 	 * if the old_dip does not match the cdip, that means there is
12762 	 * some property change. since we'll be using the cdip, we need
12763 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12764 	 * then the dtype for the device has been updated. Offline the
12765 	 * the old device and create a new device with the new device type
12766 	 * Refer to bug: 4764752
12767 	 */
12768 	if (old_pip && (pip != old_pip ||
12769 	    plun->lun_state & FCP_LUN_CHANGED)) {
12770 		plun->lun_state &= ~(FCP_LUN_INIT);
12771 		mutex_exit(&plun->lun_mutex);
12772 		mutex_exit(&pptr->port_mutex);
12773 
12774 		mutex_enter(&ptgt->tgt_mutex);
12775 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12776 		    FCP_OFFLINE, lcount, tcount,
12777 		    NDI_DEVI_REMOVE, 0);
12778 		mutex_exit(&ptgt->tgt_mutex);
12779 
12780 		if (pip != NULL) {
12781 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12782 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12783 			    "Old pip=%p; New pip=%p don't match",
12784 			    old_pip, pip);
12785 		} else {
12786 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12787 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12788 			    "Old pip=%p; New pip=NULL don't match",
12789 			    old_pip);
12790 		}
12791 
12792 		mutex_enter(&pptr->port_mutex);
12793 		mutex_enter(&plun->lun_mutex);
12794 	}
12795 
12796 	/*
12797 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12798 	 * lun_guid_size which is dependent on the target, I don't
12799 	 * believe the same trancation happens here UNLESS the standards
12800 	 * change the FC_WWN_SIZE value to something larger than
12801 	 * MAXNAMELEN(currently 255 bytes).
12802 	 */
12803 
12804 	for (i = 0; i < FC_WWN_SIZE; i++) {
12805 		(void) sprintf(&buf[i << 1], "%02x",
12806 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12807 	}
12808 
12809 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12810 	    buf, plun->lun_num);
12811 
12812 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12813 		/*
12814 		 * Release the locks before calling into
12815 		 * mdi_pi_alloc_compatible() since this can result in a
12816 		 * callback into fcp which can result in a deadlock
12817 		 * (see bug # 4870272).
12818 		 *
12819 		 * Basically, what we are trying to avoid is the scenario where
12820 		 * one thread does ndi_devi_enter() and tries to grab
12821 		 * fcp_mutex and another does it the other way round.
12822 		 *
12823 		 * But before we do that, make sure that nobody releases the
12824 		 * port in the meantime. We can do this by setting a flag.
12825 		 */
12826 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12827 		pptr->port_state |= FCP_STATE_IN_MDI;
12828 		mutex_exit(&plun->lun_mutex);
12829 		mutex_exit(&pptr->port_mutex);
12830 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12831 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12832 			fcp_log(CE_WARN, pptr->port_dip,
12833 			    "!path alloc failed:0x%x", plun);
12834 			mutex_enter(&pptr->port_mutex);
12835 			mutex_enter(&plun->lun_mutex);
12836 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12837 			failure++;
12838 			goto end_of_fcp_create_pip;
12839 		}
12840 		mutex_enter(&pptr->port_mutex);
12841 		mutex_enter(&plun->lun_mutex);
12842 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12843 	} else {
12844 		(void) mdi_prop_remove(pip, NULL);
12845 	}
12846 
12847 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12848 
12849 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12850 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12851 	    != DDI_PROP_SUCCESS) {
12852 		failure++;
12853 		goto end_of_fcp_create_pip;
12854 	}
12855 
12856 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12857 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12858 	    != DDI_PROP_SUCCESS) {
12859 		failure++;
12860 		goto end_of_fcp_create_pip;
12861 	}
12862 
12863 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12864 	t_pwwn[16] = '\0';
12865 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12866 	    != DDI_PROP_SUCCESS) {
12867 		failure++;
12868 		goto end_of_fcp_create_pip;
12869 	}
12870 
12871 	/*
12872 	 * If there is no hard address - We might have to deal with
12873 	 * that by using WWN - Having said that it is important to
12874 	 * recognize this problem early so ssd can be informed of
12875 	 * the right interconnect type.
12876 	 */
12877 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12878 	    ptgt->tgt_hard_addr != 0) {
12879 		tgt_id = (uint32_t)
12880 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12881 	} else {
12882 		tgt_id = ptgt->tgt_d_id;
12883 	}
12884 
12885 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12886 	    != DDI_PROP_SUCCESS) {
12887 		failure++;
12888 		goto end_of_fcp_create_pip;
12889 	}
12890 
12891 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12892 	    != DDI_PROP_SUCCESS) {
12893 		failure++;
12894 		goto end_of_fcp_create_pip;
12895 	}
12896 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12897 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12898 	    != DDI_PROP_SUCCESS) {
12899 		failure++;
12900 		goto end_of_fcp_create_pip;
12901 	}
12902 
12903 end_of_fcp_create_pip:
12904 	scsi_hba_nodename_compatible_free(nname, compatible);
12905 
12906 	if (pip != NULL && failure) {
12907 		(void) mdi_prop_remove(pip, NULL);
12908 		mutex_exit(&plun->lun_mutex);
12909 		mutex_exit(&pptr->port_mutex);
12910 		(void) mdi_pi_free(pip, 0);
12911 		mutex_enter(&pptr->port_mutex);
12912 		mutex_enter(&plun->lun_mutex);
12913 		pip = NULL;
12914 	}
12915 
12916 	return (pip);
12917 }
12918 
12919 static dev_info_t *
12920 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12921 {
12922 	uint_t			nbytes;
12923 	uchar_t			*bytes;
12924 	uint_t			nwords;
12925 	uint32_t		tgt_id;
12926 	int			*words;
12927 	dev_info_t		*cdip;
12928 	dev_info_t		*ndip;
12929 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12930 	struct fcp_port	*pptr = ptgt->tgt_port;
12931 	int			circular;
12932 
12933 	ndi_devi_enter(pdip, &circular);
12934 
12935 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12936 	while ((cdip = ndip) != NULL) {
12937 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12938 
12939 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12940 			continue;
12941 		}
12942 
12943 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12944 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12945 		    &nbytes) != DDI_PROP_SUCCESS) {
12946 			continue;
12947 		}
12948 
12949 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12950 			if (bytes != NULL) {
12951 				ddi_prop_free(bytes);
12952 			}
12953 			continue;
12954 		}
12955 		ASSERT(bytes != NULL);
12956 
12957 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12958 			ddi_prop_free(bytes);
12959 			continue;
12960 		}
12961 
12962 		ddi_prop_free(bytes);
12963 
12964 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12965 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12966 		    &nbytes) != DDI_PROP_SUCCESS) {
12967 			continue;
12968 		}
12969 
12970 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12971 			if (bytes != NULL) {
12972 				ddi_prop_free(bytes);
12973 			}
12974 			continue;
12975 		}
12976 		ASSERT(bytes != NULL);
12977 
12978 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12979 			ddi_prop_free(bytes);
12980 			continue;
12981 		}
12982 
12983 		ddi_prop_free(bytes);
12984 
12985 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12986 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12987 		    &nwords) != DDI_PROP_SUCCESS) {
12988 			continue;
12989 		}
12990 
12991 		if (nwords != 1 || words == NULL) {
12992 			if (words != NULL) {
12993 				ddi_prop_free(words);
12994 			}
12995 			continue;
12996 		}
12997 		ASSERT(words != NULL);
12998 
12999 		/*
13000 		 * If there is no hard address - We might have to deal with
13001 		 * that by using WWN - Having said that it is important to
13002 		 * recognize this problem early so ssd can be informed of
13003 		 * the right interconnect type.
13004 		 */
13005 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13006 		    ptgt->tgt_hard_addr != 0) {
13007 			tgt_id =
13008 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13009 		} else {
13010 			tgt_id = ptgt->tgt_d_id;
13011 		}
13012 
13013 		if (tgt_id != (uint32_t)*words) {
13014 			ddi_prop_free(words);
13015 			continue;
13016 		}
13017 		ddi_prop_free(words);
13018 
13019 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13020 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13021 		    &nwords) != DDI_PROP_SUCCESS) {
13022 			continue;
13023 		}
13024 
13025 		if (nwords != 1 || words == NULL) {
13026 			if (words != NULL) {
13027 				ddi_prop_free(words);
13028 			}
13029 			continue;
13030 		}
13031 		ASSERT(words != NULL);
13032 
13033 		if (plun->lun_num == (uint16_t)*words) {
13034 			ddi_prop_free(words);
13035 			break;
13036 		}
13037 		ddi_prop_free(words);
13038 	}
13039 	ndi_devi_exit(pdip, circular);
13040 
13041 	return (cdip);
13042 }
13043 
13044 
13045 static int
13046 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13047 {
13048 	dev_info_t	*pdip;
13049 	char		buf[MAXNAMELEN];
13050 	char		uaddr[MAXNAMELEN];
13051 	int		rval = FC_FAILURE;
13052 
13053 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13054 
13055 	pdip = plun->lun_tgt->tgt_port->port_dip;
13056 
13057 	/*
13058 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13059 	 * non-NULL even when the LUN is not there as in the case when a LUN is
13060 	 * configured and then deleted on the device end (for T3/T4 case). In
13061 	 * such cases, pip will be NULL.
13062 	 *
13063 	 * If the device generates an RSCN, it will end up getting offlined when
13064 	 * it disappeared and a new LUN will get created when it is rediscovered
13065 	 * on the device. If we check for lun_cip here, the LUN will not end
13066 	 * up getting onlined since this function will end up returning a
13067 	 * FC_SUCCESS.
13068 	 *
13069 	 * The behavior is different on other devices. For instance, on a HDS,
13070 	 * there was no RSCN generated by the device but the next I/O generated
13071 	 * a check condition and rediscovery got triggered that way. So, in
13072 	 * such cases, this path will not be exercised
13073 	 */
13074 	if (pip == NULL) {
13075 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13076 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
13077 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
13078 		    "plun: %p lun state: %x num: %d target state: %x",
13079 		    plun, plun->lun_state, plun->lun_num,
13080 		    plun->lun_tgt->tgt_port->port_state);
13081 		return (rval);
13082 	}
13083 
13084 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13085 
13086 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13087 
13088 	if (plun->lun_old_guid) {
13089 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13090 			rval = FC_SUCCESS;
13091 		}
13092 	} else {
13093 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13094 			rval = FC_SUCCESS;
13095 		}
13096 	}
13097 	return (rval);
13098 }
13099 
13100 static mdi_pathinfo_t *
13101 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13102 {
13103 	char			buf[MAXNAMELEN];
13104 	char			uaddr[MAXNAMELEN];
13105 	mdi_pathinfo_t		*pip;
13106 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13107 	struct fcp_port	*pptr = ptgt->tgt_port;
13108 
13109 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13110 
13111 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13112 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13113 
13114 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13115 
13116 	return (pip);
13117 }
13118 
13119 
13120 static int
13121 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13122     int tcount, int flags, int *circ)
13123 {
13124 	int			rval;
13125 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13126 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13127 	dev_info_t		*cdip = NULL;
13128 
13129 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13130 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13131 
13132 	if (plun->lun_cip == NULL) {
13133 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13134 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13135 		    "fcp_online_child: plun->lun_cip is NULL: "
13136 		    "plun: %p state: %x num: %d target state: %x",
13137 		    plun, plun->lun_state, plun->lun_num,
13138 		    plun->lun_tgt->tgt_port->port_state);
13139 		return (NDI_FAILURE);
13140 	}
13141 again:
13142 	if (plun->lun_mpxio == 0) {
13143 		cdip = DIP(cip);
13144 		mutex_exit(&plun->lun_mutex);
13145 		mutex_exit(&pptr->port_mutex);
13146 
13147 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13148 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13149 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13150 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13151 
13152 		/*
13153 		 * We could check for FCP_LUN_INIT here but chances
13154 		 * of getting here when it's already in FCP_LUN_INIT
13155 		 * is rare and a duplicate ndi_devi_online wouldn't
13156 		 * hurt either (as the node would already have been
13157 		 * in CF2)
13158 		 */
13159 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13160 			rval = ndi_devi_bind_driver(cdip, flags);
13161 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13162 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13163 			    "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13164 		} else {
13165 			rval = ndi_devi_online(cdip, flags);
13166 		}
13167 
13168 		/*
13169 		 * We log the message into trace buffer if the device
13170 		 * is "ses" and into syslog for any other device
13171 		 * type. This is to prevent the ndi_devi_online failure
13172 		 * message that appears for V880/A5K ses devices.
13173 		 */
13174 		if (rval == NDI_SUCCESS) {
13175 			mutex_enter(&ptgt->tgt_mutex);
13176 			plun->lun_state |= FCP_LUN_INIT;
13177 			mutex_exit(&ptgt->tgt_mutex);
13178 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13179 			fcp_log(CE_NOTE, pptr->port_dip,
13180 			    "!ndi_devi_online:"
13181 			    " failed for %s: target=%x lun=%x %x",
13182 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13183 			    plun->lun_num, rval);
13184 		} else {
13185 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13186 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13187 			    " !ndi_devi_online:"
13188 			    " failed for %s: target=%x lun=%x %x",
13189 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13190 			    plun->lun_num, rval);
13191 		}
13192 	} else {
13193 		cdip = mdi_pi_get_client(PIP(cip));
13194 		mutex_exit(&plun->lun_mutex);
13195 		mutex_exit(&pptr->port_mutex);
13196 
13197 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13198 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13199 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13200 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13201 
13202 		/*
13203 		 * Hold path and exit phci to avoid deadlock with power
13204 		 * management code during mdi_pi_online.
13205 		 */
13206 		mdi_hold_path(PIP(cip));
13207 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13208 
13209 		rval = mdi_pi_online(PIP(cip), flags);
13210 
13211 		mdi_devi_enter_phci(pptr->port_dip, circ);
13212 		mdi_rele_path(PIP(cip));
13213 
13214 		if (rval == MDI_SUCCESS) {
13215 			mutex_enter(&ptgt->tgt_mutex);
13216 			plun->lun_state |= FCP_LUN_INIT;
13217 			mutex_exit(&ptgt->tgt_mutex);
13218 
13219 			/*
13220 			 * Clear MPxIO path permanent disable in case
13221 			 * fcp hotplug dropped the offline event.
13222 			 */
13223 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13224 
13225 		} else if (rval == MDI_NOT_SUPPORTED) {
13226 			child_info_t	*old_cip = cip;
13227 
13228 			/*
13229 			 * MPxIO does not support this device yet.
13230 			 * Enumerate in legacy mode.
13231 			 */
13232 			mutex_enter(&pptr->port_mutex);
13233 			mutex_enter(&plun->lun_mutex);
13234 			plun->lun_mpxio = 0;
13235 			plun->lun_cip = NULL;
13236 			cdip = fcp_create_dip(plun, lcount, tcount);
13237 			plun->lun_cip = cip = CIP(cdip);
13238 			if (cip == NULL) {
13239 				fcp_log(CE_WARN, pptr->port_dip,
13240 				    "!fcp_online_child: "
13241 				    "Create devinfo failed for LU=%p", plun);
13242 				mutex_exit(&plun->lun_mutex);
13243 
13244 				mutex_enter(&ptgt->tgt_mutex);
13245 				plun->lun_state |= FCP_LUN_OFFLINE;
13246 				mutex_exit(&ptgt->tgt_mutex);
13247 
13248 				mutex_exit(&pptr->port_mutex);
13249 
13250 				/*
13251 				 * free the mdi_pathinfo node
13252 				 */
13253 				(void) mdi_pi_free(PIP(old_cip), 0);
13254 			} else {
13255 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13256 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13257 				    "fcp_online_child: creating devinfo "
13258 				    "node 0x%p for plun 0x%p",
13259 				    cip, plun);
13260 				mutex_exit(&plun->lun_mutex);
13261 				mutex_exit(&pptr->port_mutex);
13262 				/*
13263 				 * free the mdi_pathinfo node
13264 				 */
13265 				(void) mdi_pi_free(PIP(old_cip), 0);
13266 				mutex_enter(&pptr->port_mutex);
13267 				mutex_enter(&plun->lun_mutex);
13268 				goto again;
13269 			}
13270 		} else {
13271 			if (cdip) {
13272 				fcp_log(CE_NOTE, pptr->port_dip,
13273 				    "!fcp_online_child: mdi_pi_online:"
13274 				    " failed for %s: target=%x lun=%x %x",
13275 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13276 				    plun->lun_num, rval);
13277 			}
13278 		}
13279 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13280 	}
13281 
13282 	if (rval == NDI_SUCCESS) {
13283 		if (cdip) {
13284 			(void) ndi_event_retrieve_cookie(
13285 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13286 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13287 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13288 			    cdip, fcp_insert_eid, NULL);
13289 		}
13290 	}
13291 	mutex_enter(&pptr->port_mutex);
13292 	mutex_enter(&plun->lun_mutex);
13293 	return (rval);
13294 }
13295 
13296 /* ARGSUSED */
13297 static int
13298 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13299     int tcount, int flags, int *circ)
13300 {
13301 	int		rval;
13302 	int		lun_mpxio;
13303 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
13304 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13305 	dev_info_t	*cdip;
13306 
13307 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13308 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13309 
13310 	if (plun->lun_cip == NULL) {
13311 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13312 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13313 		    "fcp_offline_child: plun->lun_cip is NULL: "
13314 		    "plun: %p lun state: %x num: %d target state: %x",
13315 		    plun, plun->lun_state, plun->lun_num,
13316 		    plun->lun_tgt->tgt_port->port_state);
13317 		return (NDI_FAILURE);
13318 	}
13319 
13320 	/*
13321 	 * We will use this value twice. Make a copy to be sure we use
13322 	 * the same value in both places.
13323 	 */
13324 	lun_mpxio = plun->lun_mpxio;
13325 
13326 	if (lun_mpxio == 0) {
13327 		cdip = DIP(cip);
13328 		mutex_exit(&plun->lun_mutex);
13329 		mutex_exit(&pptr->port_mutex);
13330 		rval = ndi_devi_offline(DIP(cip), flags);
13331 		if (rval != NDI_SUCCESS) {
13332 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13333 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13334 			    "fcp_offline_child: ndi_devi_offline failed "
13335 			    "rval=%x cip=%p", rval, cip);
13336 		}
13337 	} else {
13338 		cdip = mdi_pi_get_client(PIP(cip));
13339 		mutex_exit(&plun->lun_mutex);
13340 		mutex_exit(&pptr->port_mutex);
13341 
13342 		/*
13343 		 * Exit phci to avoid deadlock with power management code
13344 		 * during mdi_pi_offline
13345 		 */
13346 		mdi_hold_path(PIP(cip));
13347 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13348 
13349 		rval = mdi_pi_offline(PIP(cip), flags);
13350 
13351 		mdi_devi_enter_phci(pptr->port_dip, circ);
13352 		mdi_rele_path(PIP(cip));
13353 
13354 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13355 	}
13356 
13357 	mutex_enter(&ptgt->tgt_mutex);
13358 	plun->lun_state &= ~FCP_LUN_INIT;
13359 	mutex_exit(&ptgt->tgt_mutex);
13360 
13361 	if (rval == NDI_SUCCESS) {
13362 		cdip = NULL;
13363 		if (flags & NDI_DEVI_REMOVE) {
13364 			mutex_enter(&plun->lun_mutex);
13365 			/*
13366 			 * If the guid of the LUN changes, lun_cip will not
13367 			 * equal to cip, and after offlining the LUN with the
13368 			 * old guid, we should keep lun_cip since it's the cip
13369 			 * of the LUN with the new guid.
13370 			 * Otherwise remove our reference to child node.
13371 			 *
13372 			 * This must be done before the child node is freed,
13373 			 * otherwise other threads could see a stale lun_cip
13374 			 * pointer.
13375 			 */
13376 			if (plun->lun_cip == cip) {
13377 				plun->lun_cip = NULL;
13378 			}
13379 			if (plun->lun_old_guid) {
13380 				kmem_free(plun->lun_old_guid,
13381 				    plun->lun_old_guid_size);
13382 				plun->lun_old_guid = NULL;
13383 				plun->lun_old_guid_size = 0;
13384 			}
13385 			mutex_exit(&plun->lun_mutex);
13386 		}
13387 	}
13388 
13389 	if (lun_mpxio != 0) {
13390 		if (rval == NDI_SUCCESS) {
13391 			/*
13392 			 * Clear MPxIO path permanent disable as the path is
13393 			 * already offlined.
13394 			 */
13395 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13396 
13397 			if (flags & NDI_DEVI_REMOVE) {
13398 				(void) mdi_pi_free(PIP(cip), 0);
13399 			}
13400 		} else {
13401 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13402 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13403 			    "fcp_offline_child: mdi_pi_offline failed "
13404 			    "rval=%x cip=%p", rval, cip);
13405 		}
13406 	}
13407 
13408 	mutex_enter(&pptr->port_mutex);
13409 	mutex_enter(&plun->lun_mutex);
13410 
13411 	if (cdip) {
13412 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13413 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13414 		    " target=%x lun=%x", "ndi_offline",
13415 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13416 	}
13417 
13418 	return (rval);
13419 }
13420 
13421 static void
13422 fcp_remove_child(struct fcp_lun *plun)
13423 {
13424 	child_info_t *cip;
13425 	int circ;
13426 
13427 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13428 
13429 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13430 		if (plun->lun_mpxio == 0) {
13431 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13432 			(void) ndi_devi_free(DIP(plun->lun_cip));
13433 			plun->lun_cip = NULL;
13434 		} else {
13435 			/*
13436 			 * Clear reference to the child node in the lun.
13437 			 * This must be done before freeing it with mdi_pi_free
13438 			 * and with lun_mutex held so that other threads always
13439 			 * see either valid lun_cip or NULL when holding
13440 			 * lun_mutex. We keep a copy in cip.
13441 			 */
13442 			cip = plun->lun_cip;
13443 			plun->lun_cip = NULL;
13444 
13445 			mutex_exit(&plun->lun_mutex);
13446 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13447 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13448 
13449 			mdi_devi_enter(
13450 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13451 
13452 			/*
13453 			 * Exit phci to avoid deadlock with power management
13454 			 * code during mdi_pi_offline
13455 			 */
13456 			mdi_hold_path(PIP(cip));
13457 			mdi_devi_exit_phci(
13458 			    plun->lun_tgt->tgt_port->port_dip, circ);
13459 			(void) mdi_pi_offline(PIP(cip),
13460 			    NDI_DEVI_REMOVE);
13461 			mdi_devi_enter_phci(
13462 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13463 			mdi_rele_path(PIP(cip));
13464 
13465 			mdi_devi_exit(
13466 			    plun->lun_tgt->tgt_port->port_dip, circ);
13467 
13468 			FCP_TRACE(fcp_logq,
13469 			    plun->lun_tgt->tgt_port->port_instbuf,
13470 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13471 			    "lun=%p pip freed %p", plun, cip);
13472 
13473 			(void) mdi_prop_remove(PIP(cip), NULL);
13474 			(void) mdi_pi_free(PIP(cip), 0);
13475 
13476 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13477 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13478 			mutex_enter(&plun->lun_mutex);
13479 		}
13480 	} else {
13481 		plun->lun_cip = NULL;
13482 	}
13483 }
13484 
13485 /*
13486  * called when a timeout occurs
13487  *
13488  * can be scheduled during an attach or resume (if not already running)
13489  *
13490  * one timeout is set up for all ports
13491  *
13492  * acquires and releases the global mutex
13493  */
13494 /*ARGSUSED*/
13495 static void
13496 fcp_watch(void *arg)
13497 {
13498 	struct fcp_port	*pptr;
13499 	struct fcp_ipkt	*icmd;
13500 	struct fcp_ipkt	*nicmd;
13501 	struct fcp_pkt	*cmd;
13502 	struct fcp_pkt	*ncmd;
13503 	struct fcp_pkt	*tail;
13504 	struct fcp_pkt	*pcmd;
13505 	struct fcp_pkt	*save_head;
13506 	struct fcp_port	*save_port;
13507 
13508 	/* increment global watchdog time */
13509 	fcp_watchdog_time += fcp_watchdog_timeout;
13510 
13511 	mutex_enter(&fcp_global_mutex);
13512 
13513 	/* scan each port in our list */
13514 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13515 		save_port = fcp_port_head;
13516 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13517 		mutex_exit(&fcp_global_mutex);
13518 
13519 		mutex_enter(&pptr->port_mutex);
13520 		if (pptr->port_ipkt_list == NULL &&
13521 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13522 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13523 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13524 			mutex_exit(&pptr->port_mutex);
13525 			mutex_enter(&fcp_global_mutex);
13526 			goto end_of_watchdog;
13527 		}
13528 
13529 		/*
13530 		 * We check if a list of targets need to be offlined.
13531 		 */
13532 		if (pptr->port_offline_tgts) {
13533 			fcp_scan_offline_tgts(pptr);
13534 		}
13535 
13536 		/*
13537 		 * We check if a list of luns need to be offlined.
13538 		 */
13539 		if (pptr->port_offline_luns) {
13540 			fcp_scan_offline_luns(pptr);
13541 		}
13542 
13543 		/*
13544 		 * We check if a list of targets or luns need to be reset.
13545 		 */
13546 		if (pptr->port_reset_list) {
13547 			fcp_check_reset_delay(pptr);
13548 		}
13549 
13550 		mutex_exit(&pptr->port_mutex);
13551 
13552 		/*
13553 		 * This is where the pending commands (pkt) are checked for
13554 		 * timeout.
13555 		 */
13556 		mutex_enter(&pptr->port_pkt_mutex);
13557 		tail = pptr->port_pkt_tail;
13558 
13559 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13560 		    cmd != NULL; cmd = ncmd) {
13561 			ncmd = cmd->cmd_next;
13562 			/*
13563 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13564 			 * must be set.
13565 			 */
13566 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13567 			/*
13568 			 * FCP_INVALID_TIMEOUT will be set for those
13569 			 * command that need to be failed. Mostly those
13570 			 * cmds that could not be queued down for the
13571 			 * "timeout" value. cmd->cmd_timeout is used
13572 			 * to try and requeue the command regularly.
13573 			 */
13574 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13575 				/*
13576 				 * This command hasn't timed out yet.  Let's
13577 				 * go to the next one.
13578 				 */
13579 				pcmd = cmd;
13580 				goto end_of_loop;
13581 			}
13582 
13583 			if (cmd == pptr->port_pkt_head) {
13584 				ASSERT(pcmd == NULL);
13585 				pptr->port_pkt_head = cmd->cmd_next;
13586 			} else {
13587 				ASSERT(pcmd != NULL);
13588 				pcmd->cmd_next = cmd->cmd_next;
13589 			}
13590 
13591 			if (cmd == pptr->port_pkt_tail) {
13592 				ASSERT(cmd->cmd_next == NULL);
13593 				pptr->port_pkt_tail = pcmd;
13594 				if (pcmd) {
13595 					pcmd->cmd_next = NULL;
13596 				}
13597 			}
13598 			cmd->cmd_next = NULL;
13599 
13600 			/*
13601 			 * save the current head before dropping the
13602 			 * mutex - If the head doesn't remain the
13603 			 * same after re acquiring the mutex, just
13604 			 * bail out and revisit on next tick.
13605 			 *
13606 			 * PS: The tail pointer can change as the commands
13607 			 * get requeued after failure to retransport
13608 			 */
13609 			save_head = pptr->port_pkt_head;
13610 			mutex_exit(&pptr->port_pkt_mutex);
13611 
13612 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13613 			    FCP_INVALID_TIMEOUT) {
13614 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13615 				struct fcp_lun	*plun;
13616 				struct fcp_tgt	*ptgt;
13617 
13618 				plun = ADDR2LUN(&pkt->pkt_address);
13619 				ptgt = plun->lun_tgt;
13620 
13621 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13622 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13623 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13624 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13625 
13626 				cmd->cmd_state == FCP_PKT_ABORTING ?
13627 				    fcp_fail_cmd(cmd, CMD_RESET,
13628 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13629 				    CMD_TIMEOUT, STAT_ABORTED);
13630 			} else {
13631 				fcp_retransport_cmd(pptr, cmd);
13632 			}
13633 			mutex_enter(&pptr->port_pkt_mutex);
13634 			if (save_head && save_head != pptr->port_pkt_head) {
13635 				/*
13636 				 * Looks like linked list got changed (mostly
13637 				 * happens when an an OFFLINE LUN code starts
13638 				 * returning overflow queue commands in
13639 				 * parallel. So bail out and revisit during
13640 				 * next tick
13641 				 */
13642 				break;
13643 			}
13644 		end_of_loop:
13645 			/*
13646 			 * Scan only upto the previously known tail pointer
13647 			 * to avoid excessive processing - lots of new packets
13648 			 * could have been added to the tail or the old ones
13649 			 * re-queued.
13650 			 */
13651 			if (cmd == tail) {
13652 				break;
13653 			}
13654 		}
13655 		mutex_exit(&pptr->port_pkt_mutex);
13656 
13657 		mutex_enter(&pptr->port_mutex);
13658 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13659 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13660 
13661 			nicmd = icmd->ipkt_next;
13662 			if ((icmd->ipkt_restart != 0) &&
13663 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13664 				/* packet has not timed out */
13665 				continue;
13666 			}
13667 
13668 			/* time for packet re-transport */
13669 			if (icmd == pptr->port_ipkt_list) {
13670 				pptr->port_ipkt_list = icmd->ipkt_next;
13671 				if (pptr->port_ipkt_list) {
13672 					pptr->port_ipkt_list->ipkt_prev =
13673 					    NULL;
13674 				}
13675 			} else {
13676 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13677 				if (icmd->ipkt_next) {
13678 					icmd->ipkt_next->ipkt_prev =
13679 					    icmd->ipkt_prev;
13680 				}
13681 			}
13682 			icmd->ipkt_next = NULL;
13683 			icmd->ipkt_prev = NULL;
13684 			mutex_exit(&pptr->port_mutex);
13685 
13686 			if (fcp_is_retryable(icmd)) {
13687 				fc_ulp_rscn_info_t *rscnp =
13688 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13689 				    pkt_ulp_rscn_infop;
13690 
13691 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13692 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13693 				    "%x to D_ID=%x Retrying..",
13694 				    icmd->ipkt_opcode,
13695 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13696 
13697 				/*
13698 				 * Update the RSCN count in the packet
13699 				 * before resending.
13700 				 */
13701 
13702 				if (rscnp != NULL) {
13703 					rscnp->ulp_rscn_count =
13704 					    fc_ulp_get_rscn_count(pptr->
13705 					    port_fp_handle);
13706 				}
13707 
13708 				mutex_enter(&pptr->port_mutex);
13709 				mutex_enter(&ptgt->tgt_mutex);
13710 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13711 					mutex_exit(&ptgt->tgt_mutex);
13712 					mutex_exit(&pptr->port_mutex);
13713 					switch (icmd->ipkt_opcode) {
13714 						int rval;
13715 					case LA_ELS_PLOGI:
13716 						if ((rval = fc_ulp_login(
13717 						    pptr->port_fp_handle,
13718 						    &icmd->ipkt_fpkt, 1)) ==
13719 						    FC_SUCCESS) {
13720 							mutex_enter(
13721 							    &pptr->port_mutex);
13722 							continue;
13723 						}
13724 						if (fcp_handle_ipkt_errors(
13725 						    pptr, ptgt, icmd, rval,
13726 						    "PLOGI") == DDI_SUCCESS) {
13727 							mutex_enter(
13728 							    &pptr->port_mutex);
13729 							continue;
13730 						}
13731 						break;
13732 
13733 					case LA_ELS_PRLI:
13734 						if ((rval = fc_ulp_issue_els(
13735 						    pptr->port_fp_handle,
13736 						    icmd->ipkt_fpkt)) ==
13737 						    FC_SUCCESS) {
13738 							mutex_enter(
13739 							    &pptr->port_mutex);
13740 							continue;
13741 						}
13742 						if (fcp_handle_ipkt_errors(
13743 						    pptr, ptgt, icmd, rval,
13744 						    "PRLI") == DDI_SUCCESS) {
13745 							mutex_enter(
13746 							    &pptr->port_mutex);
13747 							continue;
13748 						}
13749 						break;
13750 
13751 					default:
13752 						if ((rval = fcp_transport(
13753 						    pptr->port_fp_handle,
13754 						    icmd->ipkt_fpkt, 1)) ==
13755 						    FC_SUCCESS) {
13756 							mutex_enter(
13757 							    &pptr->port_mutex);
13758 							continue;
13759 						}
13760 						if (fcp_handle_ipkt_errors(
13761 						    pptr, ptgt, icmd, rval,
13762 						    "PRLI") == DDI_SUCCESS) {
13763 							mutex_enter(
13764 							    &pptr->port_mutex);
13765 							continue;
13766 						}
13767 						break;
13768 					}
13769 				} else {
13770 					mutex_exit(&ptgt->tgt_mutex);
13771 					mutex_exit(&pptr->port_mutex);
13772 				}
13773 			} else {
13774 				fcp_print_error(icmd->ipkt_fpkt);
13775 			}
13776 
13777 			(void) fcp_call_finish_init(pptr, ptgt,
13778 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13779 			    icmd->ipkt_cause);
13780 			fcp_icmd_free(pptr, icmd);
13781 			mutex_enter(&pptr->port_mutex);
13782 		}
13783 
13784 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13785 		mutex_exit(&pptr->port_mutex);
13786 		mutex_enter(&fcp_global_mutex);
13787 
13788 	end_of_watchdog:
13789 		/*
13790 		 * Bail out early before getting into trouble
13791 		 */
13792 		if (save_port != fcp_port_head) {
13793 			break;
13794 		}
13795 	}
13796 
13797 	if (fcp_watchdog_init > 0) {
13798 		/* reschedule timeout to go again */
13799 		fcp_watchdog_id =
13800 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13801 	}
13802 	mutex_exit(&fcp_global_mutex);
13803 }
13804 
13805 
13806 static void
13807 fcp_check_reset_delay(struct fcp_port *pptr)
13808 {
13809 	uint32_t		tgt_cnt;
13810 	int			level;
13811 	struct fcp_tgt	*ptgt;
13812 	struct fcp_lun	*plun;
13813 	struct fcp_reset_elem *cur = NULL;
13814 	struct fcp_reset_elem *next = NULL;
13815 	struct fcp_reset_elem *prev = NULL;
13816 
13817 	ASSERT(mutex_owned(&pptr->port_mutex));
13818 
13819 	next = pptr->port_reset_list;
13820 	while ((cur = next) != NULL) {
13821 		next = cur->next;
13822 
13823 		if (cur->timeout < fcp_watchdog_time) {
13824 			prev = cur;
13825 			continue;
13826 		}
13827 
13828 		ptgt = cur->tgt;
13829 		plun = cur->lun;
13830 		tgt_cnt = cur->tgt_cnt;
13831 
13832 		if (ptgt) {
13833 			level = RESET_TARGET;
13834 		} else {
13835 			ASSERT(plun != NULL);
13836 			level = RESET_LUN;
13837 			ptgt = plun->lun_tgt;
13838 		}
13839 		if (prev) {
13840 			prev->next = next;
13841 		} else {
13842 			/*
13843 			 * Because we drop port mutex while doing aborts for
13844 			 * packets, we can't rely on reset_list pointing to
13845 			 * our head
13846 			 */
13847 			if (cur == pptr->port_reset_list) {
13848 				pptr->port_reset_list = next;
13849 			} else {
13850 				struct fcp_reset_elem *which;
13851 
13852 				which = pptr->port_reset_list;
13853 				while (which && which->next != cur) {
13854 					which = which->next;
13855 				}
13856 				ASSERT(which != NULL);
13857 
13858 				which->next = next;
13859 				prev = which;
13860 			}
13861 		}
13862 
13863 		kmem_free(cur, sizeof (*cur));
13864 
13865 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13866 			mutex_enter(&ptgt->tgt_mutex);
13867 			if (level == RESET_TARGET) {
13868 				fcp_update_tgt_state(ptgt,
13869 				    FCP_RESET, FCP_LUN_BUSY);
13870 			} else {
13871 				fcp_update_lun_state(plun,
13872 				    FCP_RESET, FCP_LUN_BUSY);
13873 			}
13874 			mutex_exit(&ptgt->tgt_mutex);
13875 
13876 			mutex_exit(&pptr->port_mutex);
13877 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13878 			mutex_enter(&pptr->port_mutex);
13879 		}
13880 	}
13881 }
13882 
13883 
13884 static void
13885 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13886     struct fcp_lun *rlun, int tgt_cnt)
13887 {
13888 	int			rval;
13889 	struct fcp_lun	*tlun, *nlun;
13890 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13891 	    *cmd = NULL, *head = NULL,
13892 	    *tail = NULL;
13893 
13894 	mutex_enter(&pptr->port_pkt_mutex);
13895 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13896 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13897 		struct fcp_tgt *ptgt = plun->lun_tgt;
13898 
13899 		ncmd = cmd->cmd_next;
13900 
13901 		if (ptgt != ttgt && plun != rlun) {
13902 			pcmd = cmd;
13903 			continue;
13904 		}
13905 
13906 		if (pcmd != NULL) {
13907 			ASSERT(pptr->port_pkt_head != cmd);
13908 			pcmd->cmd_next = ncmd;
13909 		} else {
13910 			ASSERT(cmd == pptr->port_pkt_head);
13911 			pptr->port_pkt_head = ncmd;
13912 		}
13913 		if (pptr->port_pkt_tail == cmd) {
13914 			ASSERT(cmd->cmd_next == NULL);
13915 			pptr->port_pkt_tail = pcmd;
13916 			if (pcmd != NULL) {
13917 				pcmd->cmd_next = NULL;
13918 			}
13919 		}
13920 
13921 		if (head == NULL) {
13922 			head = tail = cmd;
13923 		} else {
13924 			ASSERT(tail != NULL);
13925 			tail->cmd_next = cmd;
13926 			tail = cmd;
13927 		}
13928 		cmd->cmd_next = NULL;
13929 	}
13930 	mutex_exit(&pptr->port_pkt_mutex);
13931 
13932 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13933 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13934 
13935 		ncmd = cmd->cmd_next;
13936 		ASSERT(pkt != NULL);
13937 
13938 		mutex_enter(&pptr->port_mutex);
13939 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13940 			mutex_exit(&pptr->port_mutex);
13941 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13942 			pkt->pkt_reason = CMD_RESET;
13943 			pkt->pkt_statistics |= STAT_DEV_RESET;
13944 			cmd->cmd_state = FCP_PKT_IDLE;
13945 			fcp_post_callback(cmd);
13946 		} else {
13947 			mutex_exit(&pptr->port_mutex);
13948 		}
13949 	}
13950 
13951 	/*
13952 	 * If the FCA will return all the commands in its queue then our
13953 	 * work is easy, just return.
13954 	 */
13955 
13956 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13957 		return;
13958 	}
13959 
13960 	/*
13961 	 * For RESET_LUN get hold of target pointer
13962 	 */
13963 	if (ttgt == NULL) {
13964 		ASSERT(rlun != NULL);
13965 
13966 		ttgt = rlun->lun_tgt;
13967 
13968 		ASSERT(ttgt != NULL);
13969 	}
13970 
13971 	/*
13972 	 * There are some severe race conditions here.
13973 	 * While we are trying to abort the pkt, it might be completing
13974 	 * so mark it aborted and if the abort does not succeed then
13975 	 * handle it in the watch thread.
13976 	 */
13977 	mutex_enter(&ttgt->tgt_mutex);
13978 	nlun = ttgt->tgt_lun;
13979 	mutex_exit(&ttgt->tgt_mutex);
13980 	while ((tlun = nlun) != NULL) {
13981 		int restart = 0;
13982 		if (rlun && rlun != tlun) {
13983 			mutex_enter(&ttgt->tgt_mutex);
13984 			nlun = tlun->lun_next;
13985 			mutex_exit(&ttgt->tgt_mutex);
13986 			continue;
13987 		}
13988 		mutex_enter(&tlun->lun_mutex);
13989 		cmd = tlun->lun_pkt_head;
13990 		while (cmd != NULL) {
13991 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13992 				struct scsi_pkt *pkt;
13993 
13994 				restart = 1;
13995 				cmd->cmd_state = FCP_PKT_ABORTING;
13996 				mutex_exit(&tlun->lun_mutex);
13997 				rval = fc_ulp_abort(pptr->port_fp_handle,
13998 				    cmd->cmd_fp_pkt, KM_SLEEP);
13999 				if (rval == FC_SUCCESS) {
14000 					pkt = cmd->cmd_pkt;
14001 					pkt->pkt_reason = CMD_RESET;
14002 					pkt->pkt_statistics |= STAT_DEV_RESET;
14003 					cmd->cmd_state = FCP_PKT_IDLE;
14004 					fcp_post_callback(cmd);
14005 				} else {
14006 					caddr_t msg;
14007 
14008 					(void) fc_ulp_error(rval, &msg);
14009 
14010 					/*
14011 					 * This part is tricky. The abort
14012 					 * failed and now the command could
14013 					 * be completing.  The cmd_state ==
14014 					 * FCP_PKT_ABORTING should save
14015 					 * us in fcp_cmd_callback. If we
14016 					 * are already aborting ignore the
14017 					 * command in fcp_cmd_callback.
14018 					 * Here we leave this packet for 20
14019 					 * sec to be aborted in the
14020 					 * fcp_watch thread.
14021 					 */
14022 					fcp_log(CE_WARN, pptr->port_dip,
14023 					    "!Abort failed after reset %s",
14024 					    msg);
14025 
14026 					cmd->cmd_timeout =
14027 					    fcp_watchdog_time +
14028 					    cmd->cmd_pkt->pkt_time +
14029 					    FCP_FAILED_DELAY;
14030 
14031 					cmd->cmd_fp_pkt->pkt_timeout =
14032 					    FCP_INVALID_TIMEOUT;
14033 					/*
14034 					 * This is a hack, cmd is put in the
14035 					 * overflow queue so that it can be
14036 					 * timed out finally
14037 					 */
14038 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
14039 
14040 					mutex_enter(&pptr->port_pkt_mutex);
14041 					if (pptr->port_pkt_head) {
14042 						ASSERT(pptr->port_pkt_tail
14043 						    != NULL);
14044 						pptr->port_pkt_tail->cmd_next
14045 						    = cmd;
14046 						pptr->port_pkt_tail = cmd;
14047 					} else {
14048 						ASSERT(pptr->port_pkt_tail
14049 						    == NULL);
14050 						pptr->port_pkt_head =
14051 						    pptr->port_pkt_tail
14052 						    = cmd;
14053 					}
14054 					cmd->cmd_next = NULL;
14055 					mutex_exit(&pptr->port_pkt_mutex);
14056 				}
14057 				mutex_enter(&tlun->lun_mutex);
14058 				cmd = tlun->lun_pkt_head;
14059 			} else {
14060 				cmd = cmd->cmd_forw;
14061 			}
14062 		}
14063 		mutex_exit(&tlun->lun_mutex);
14064 
14065 		mutex_enter(&ttgt->tgt_mutex);
14066 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14067 		mutex_exit(&ttgt->tgt_mutex);
14068 
14069 		mutex_enter(&pptr->port_mutex);
14070 		if (tgt_cnt != ttgt->tgt_change_cnt) {
14071 			mutex_exit(&pptr->port_mutex);
14072 			return;
14073 		} else {
14074 			mutex_exit(&pptr->port_mutex);
14075 		}
14076 	}
14077 }
14078 
14079 
14080 /*
14081  * unlink the soft state, returning the soft state found (if any)
14082  *
14083  * acquires and releases the global mutex
14084  */
14085 struct fcp_port *
14086 fcp_soft_state_unlink(struct fcp_port *pptr)
14087 {
14088 	struct fcp_port	*hptr;		/* ptr index */
14089 	struct fcp_port	*tptr;		/* prev hptr */
14090 
14091 	mutex_enter(&fcp_global_mutex);
14092 	for (hptr = fcp_port_head, tptr = NULL;
14093 	    hptr != NULL;
14094 	    tptr = hptr, hptr = hptr->port_next) {
14095 		if (hptr == pptr) {
14096 			/* we found a match -- remove this item */
14097 			if (tptr == NULL) {
14098 				/* we're at the head of the list */
14099 				fcp_port_head = hptr->port_next;
14100 			} else {
14101 				tptr->port_next = hptr->port_next;
14102 			}
14103 			break;			/* success */
14104 		}
14105 	}
14106 	if (fcp_port_head == NULL) {
14107 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
14108 	}
14109 	mutex_exit(&fcp_global_mutex);
14110 	return (hptr);
14111 }
14112 
14113 
14114 /*
14115  * called by fcp_scsi_hba_tgt_init to find a LUN given a
14116  * WWN and a LUN number
14117  */
14118 /* ARGSUSED */
14119 static struct fcp_lun *
14120 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14121 {
14122 	int hash;
14123 	struct fcp_tgt *ptgt;
14124 	struct fcp_lun *plun;
14125 
14126 	ASSERT(mutex_owned(&pptr->port_mutex));
14127 
14128 	hash = FCP_HASH(wwn);
14129 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14130 	    ptgt = ptgt->tgt_next) {
14131 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14132 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
14133 			mutex_enter(&ptgt->tgt_mutex);
14134 			for (plun = ptgt->tgt_lun;
14135 			    plun != NULL;
14136 			    plun = plun->lun_next) {
14137 				if (plun->lun_num == lun) {
14138 					mutex_exit(&ptgt->tgt_mutex);
14139 					return (plun);
14140 				}
14141 			}
14142 			mutex_exit(&ptgt->tgt_mutex);
14143 			return (NULL);
14144 		}
14145 	}
14146 	return (NULL);
14147 }
14148 
14149 /*
14150  *     Function: fcp_prepare_pkt
14151  *
14152  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
14153  *		 for fcp_start(). It binds the data or partially maps it.
14154  *		 Builds the FCP header and starts the initialization of the
14155  *		 Fibre Channel header.
14156  *
14157  *     Argument: *pptr		FCP port.
14158  *		 *cmd		FCP packet.
14159  *		 *plun		LUN the command will be sent to.
14160  *
14161  *	Context: User, Kernel and Interrupt context.
14162  */
14163 static void
14164 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14165     struct fcp_lun *plun)
14166 {
14167 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
14168 	struct fcp_tgt		*ptgt = plun->lun_tgt;
14169 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
14170 
14171 	ASSERT(cmd->cmd_pkt->pkt_comp ||
14172 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14173 
14174 	if (cmd->cmd_pkt->pkt_numcookies) {
14175 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14176 			fcmd->fcp_cntl.cntl_read_data = 1;
14177 			fcmd->fcp_cntl.cntl_write_data = 0;
14178 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14179 		} else {
14180 			fcmd->fcp_cntl.cntl_read_data = 0;
14181 			fcmd->fcp_cntl.cntl_write_data = 1;
14182 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14183 		}
14184 
14185 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14186 
14187 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14188 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14189 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14190 
14191 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14192 
14193 		/* FCA needs pkt_datalen to be set */
14194 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14195 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14196 	} else {
14197 		fcmd->fcp_cntl.cntl_read_data = 0;
14198 		fcmd->fcp_cntl.cntl_write_data = 0;
14199 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14200 		fpkt->pkt_datalen = 0;
14201 		fcmd->fcp_data_len = 0;
14202 	}
14203 
14204 	/* set up the Tagged Queuing type */
14205 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14206 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14207 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14208 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14209 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14210 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14211 	} else {
14212 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14213 	}
14214 
14215 	fcmd->fcp_ent_addr = plun->lun_addr;
14216 
14217 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14218 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14219 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14220 	} else {
14221 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14222 	}
14223 
14224 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14225 	cmd->cmd_pkt->pkt_state = 0;
14226 	cmd->cmd_pkt->pkt_statistics = 0;
14227 	cmd->cmd_pkt->pkt_resid = 0;
14228 
14229 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14230 
14231 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14232 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14233 		fpkt->pkt_comp = NULL;
14234 	} else {
14235 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14236 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14237 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14238 		}
14239 		fpkt->pkt_comp = fcp_cmd_callback;
14240 	}
14241 
14242 	mutex_enter(&pptr->port_mutex);
14243 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14244 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14245 	}
14246 	mutex_exit(&pptr->port_mutex);
14247 
14248 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14249 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14250 
14251 	/*
14252 	 * Save a few kernel cycles here
14253 	 */
14254 #ifndef	__lock_lint
14255 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14256 #endif /* __lock_lint */
14257 }
14258 
14259 static void
14260 fcp_post_callback(struct fcp_pkt *cmd)
14261 {
14262 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14263 }
14264 
14265 
14266 /*
14267  * called to do polled I/O by fcp_start()
14268  *
14269  * return a transport status value, i.e. TRAN_ACCECPT for success
14270  */
14271 static int
14272 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14273 {
14274 	int	rval;
14275 
14276 #ifdef	DEBUG
14277 	mutex_enter(&pptr->port_pkt_mutex);
14278 	pptr->port_npkts++;
14279 	mutex_exit(&pptr->port_pkt_mutex);
14280 #endif /* DEBUG */
14281 
14282 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14283 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14284 	} else {
14285 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14286 	}
14287 
14288 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14289 
14290 	cmd->cmd_state = FCP_PKT_ISSUED;
14291 
14292 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14293 
14294 #ifdef	DEBUG
14295 	mutex_enter(&pptr->port_pkt_mutex);
14296 	pptr->port_npkts--;
14297 	mutex_exit(&pptr->port_pkt_mutex);
14298 #endif /* DEBUG */
14299 
14300 	cmd->cmd_state = FCP_PKT_IDLE;
14301 
14302 	switch (rval) {
14303 	case FC_SUCCESS:
14304 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14305 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14306 			rval = TRAN_ACCEPT;
14307 		} else {
14308 			rval = TRAN_FATAL_ERROR;
14309 		}
14310 		break;
14311 
14312 	case FC_TRAN_BUSY:
14313 		rval = TRAN_BUSY;
14314 		cmd->cmd_pkt->pkt_resid = 0;
14315 		break;
14316 
14317 	case FC_BADPACKET:
14318 		rval = TRAN_BADPKT;
14319 		break;
14320 
14321 	default:
14322 		rval = TRAN_FATAL_ERROR;
14323 		break;
14324 	}
14325 
14326 	return (rval);
14327 }
14328 
14329 
14330 /*
14331  * called by some of the following transport-called routines to convert
14332  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14333  */
14334 static struct fcp_port *
14335 fcp_dip2port(dev_info_t *dip)
14336 {
14337 	int	instance;
14338 
14339 	instance = ddi_get_instance(dip);
14340 	return (ddi_get_soft_state(fcp_softstate, instance));
14341 }
14342 
14343 
14344 /*
14345  * called internally to return a LUN given a dip
14346  */
14347 struct fcp_lun *
14348 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14349 {
14350 	struct fcp_tgt *ptgt;
14351 	struct fcp_lun *plun;
14352 	int i;
14353 
14354 
14355 	ASSERT(mutex_owned(&pptr->port_mutex));
14356 
14357 	for (i = 0; i < FCP_NUM_HASH; i++) {
14358 		for (ptgt = pptr->port_tgt_hash_table[i];
14359 		    ptgt != NULL;
14360 		    ptgt = ptgt->tgt_next) {
14361 			mutex_enter(&ptgt->tgt_mutex);
14362 			for (plun = ptgt->tgt_lun; plun != NULL;
14363 			    plun = plun->lun_next) {
14364 				mutex_enter(&plun->lun_mutex);
14365 				if (plun->lun_cip == cip) {
14366 					mutex_exit(&plun->lun_mutex);
14367 					mutex_exit(&ptgt->tgt_mutex);
14368 					return (plun); /* match found */
14369 				}
14370 				mutex_exit(&plun->lun_mutex);
14371 			}
14372 			mutex_exit(&ptgt->tgt_mutex);
14373 		}
14374 	}
14375 	return (NULL);				/* no LUN found */
14376 }
14377 
14378 /*
14379  * pass an element to the hotplug list, kick the hotplug thread
14380  * and wait for the element to get processed by the hotplug thread.
14381  * on return the element is freed.
14382  *
14383  * return zero success and non-zero on failure
14384  *
14385  * acquires/releases the target mutex
14386  *
14387  */
14388 static int
14389 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14390     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14391 {
14392 	struct fcp_hp_elem	*elem;
14393 	int			rval;
14394 
14395 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14396 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14397 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14398 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14399 		fcp_log(CE_CONT, pptr->port_dip,
14400 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14401 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14402 		return (NDI_FAILURE);
14403 	}
14404 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14405 	mutex_enter(&elem->mutex);
14406 	if (elem->wait) {
14407 		while (elem->wait) {
14408 			cv_wait(&elem->cv, &elem->mutex);
14409 		}
14410 	}
14411 	rval = (elem->result);
14412 	mutex_exit(&elem->mutex);
14413 	mutex_destroy(&elem->mutex);
14414 	cv_destroy(&elem->cv);
14415 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14416 	return (rval);
14417 }
14418 
14419 /*
14420  * pass an element to the hotplug list, and then
14421  * kick the hotplug thread
14422  *
14423  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14424  *
14425  * acquires/releases the hotplug mutex
14426  *
14427  * called with the target mutex owned
14428  *
14429  * memory acquired in NOSLEEP mode
14430  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14431  *	 for the hp daemon to process the request and is responsible for
14432  *	 freeing the element
14433  */
14434 static struct fcp_hp_elem *
14435 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14436     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14437 {
14438 	struct fcp_hp_elem	*elem;
14439 	dev_info_t *pdip;
14440 
14441 	ASSERT(pptr != NULL);
14442 	ASSERT(plun != NULL);
14443 	ASSERT(plun->lun_tgt != NULL);
14444 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14445 
14446 	/* create space for a hotplug element */
14447 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14448 	    == NULL) {
14449 		fcp_log(CE_WARN, NULL,
14450 		    "!can't allocate memory for hotplug element");
14451 		return (NULL);
14452 	}
14453 
14454 	/* fill in hotplug element */
14455 	elem->port = pptr;
14456 	elem->lun = plun;
14457 	elem->cip = cip;
14458 	elem->old_lun_mpxio = plun->lun_mpxio;
14459 	elem->what = what;
14460 	elem->flags = flags;
14461 	elem->link_cnt = link_cnt;
14462 	elem->tgt_cnt = tgt_cnt;
14463 	elem->wait = wait;
14464 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14465 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14466 
14467 	/* schedule the hotplug task */
14468 	pdip = pptr->port_dip;
14469 	mutex_enter(&plun->lun_mutex);
14470 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14471 		plun->lun_event_count++;
14472 		elem->event_cnt = plun->lun_event_count;
14473 	}
14474 	mutex_exit(&plun->lun_mutex);
14475 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14476 	    (void *)elem, KM_NOSLEEP) == NULL) {
14477 		mutex_enter(&plun->lun_mutex);
14478 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14479 			plun->lun_event_count--;
14480 		}
14481 		mutex_exit(&plun->lun_mutex);
14482 		kmem_free(elem, sizeof (*elem));
14483 		return (0);
14484 	}
14485 
14486 	return (elem);
14487 }
14488 
14489 
14490 static void
14491 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14492 {
14493 	int			rval;
14494 	struct scsi_address	*ap;
14495 	struct fcp_lun	*plun;
14496 	struct fcp_tgt	*ptgt;
14497 	fc_packet_t	*fpkt;
14498 
14499 	ap = &cmd->cmd_pkt->pkt_address;
14500 	plun = ADDR2LUN(ap);
14501 	ptgt = plun->lun_tgt;
14502 
14503 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14504 
14505 	cmd->cmd_state = FCP_PKT_IDLE;
14506 
14507 	mutex_enter(&pptr->port_mutex);
14508 	mutex_enter(&ptgt->tgt_mutex);
14509 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14510 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14511 		fc_ulp_rscn_info_t *rscnp;
14512 
14513 		cmd->cmd_state = FCP_PKT_ISSUED;
14514 
14515 		/*
14516 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14517 		 * originally NULL, hence we try to set it to the pd pointed
14518 		 * to by the SCSI device we're trying to get to.
14519 		 */
14520 
14521 		fpkt = cmd->cmd_fp_pkt;
14522 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14523 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14524 			/*
14525 			 * We need to notify the transport that we now have a
14526 			 * reference to the remote port handle.
14527 			 */
14528 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14529 		}
14530 
14531 		mutex_exit(&ptgt->tgt_mutex);
14532 		mutex_exit(&pptr->port_mutex);
14533 
14534 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14535 
14536 		/* prepare the packet */
14537 
14538 		fcp_prepare_pkt(pptr, cmd, plun);
14539 
14540 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14541 		    pkt_ulp_rscn_infop;
14542 
14543 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14544 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14545 
14546 		if (rscnp != NULL) {
14547 			rscnp->ulp_rscn_count =
14548 			    fc_ulp_get_rscn_count(pptr->
14549 			    port_fp_handle);
14550 		}
14551 
14552 		rval = fcp_transport(pptr->port_fp_handle,
14553 		    cmd->cmd_fp_pkt, 0);
14554 
14555 		if (rval == FC_SUCCESS) {
14556 			return;
14557 		}
14558 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14559 	} else {
14560 		mutex_exit(&ptgt->tgt_mutex);
14561 		mutex_exit(&pptr->port_mutex);
14562 	}
14563 
14564 	fcp_queue_pkt(pptr, cmd);
14565 }
14566 
14567 
14568 static void
14569 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14570 {
14571 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14572 
14573 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14574 	cmd->cmd_state = FCP_PKT_IDLE;
14575 
14576 	cmd->cmd_pkt->pkt_reason = reason;
14577 	cmd->cmd_pkt->pkt_state = 0;
14578 	cmd->cmd_pkt->pkt_statistics = statistics;
14579 
14580 	fcp_post_callback(cmd);
14581 }
14582 
14583 /*
14584  *     Function: fcp_queue_pkt
14585  *
14586  *  Description: This function queues the packet passed by the caller into
14587  *		 the list of packets of the FCP port.
14588  *
14589  *     Argument: *pptr		FCP port.
14590  *		 *cmd		FCP packet to queue.
14591  *
14592  * Return Value: None
14593  *
14594  *	Context: User, Kernel and Interrupt context.
14595  */
14596 static void
14597 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14598 {
14599 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14600 
14601 	mutex_enter(&pptr->port_pkt_mutex);
14602 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14603 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14604 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14605 
14606 	/*
14607 	 * zero pkt_time means hang around for ever
14608 	 */
14609 	if (cmd->cmd_pkt->pkt_time) {
14610 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14611 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14612 		} else {
14613 			/*
14614 			 * Indicate the watch thread to fail the
14615 			 * command by setting it to highest value
14616 			 */
14617 			cmd->cmd_timeout = fcp_watchdog_time;
14618 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14619 		}
14620 	}
14621 
14622 	if (pptr->port_pkt_head) {
14623 		ASSERT(pptr->port_pkt_tail != NULL);
14624 
14625 		pptr->port_pkt_tail->cmd_next = cmd;
14626 		pptr->port_pkt_tail = cmd;
14627 	} else {
14628 		ASSERT(pptr->port_pkt_tail == NULL);
14629 
14630 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14631 	}
14632 	cmd->cmd_next = NULL;
14633 	mutex_exit(&pptr->port_pkt_mutex);
14634 }
14635 
14636 /*
14637  *     Function: fcp_update_targets
14638  *
14639  *  Description: This function applies the specified change of state to all
14640  *		 the targets listed.  The operation applied is 'set'.
14641  *
14642  *     Argument: *pptr		FCP port.
14643  *		 *dev_list	Array of fc_portmap_t structures.
14644  *		 count		Length of dev_list.
14645  *		 state		State bits to update.
14646  *		 cause		Reason for the update.
14647  *
14648  * Return Value: None
14649  *
14650  *	Context: User, Kernel and Interrupt context.
14651  *		 The mutex pptr->port_mutex must be held.
14652  */
14653 static void
14654 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14655     uint32_t count, uint32_t state, int cause)
14656 {
14657 	fc_portmap_t		*map_entry;
14658 	struct fcp_tgt	*ptgt;
14659 
14660 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14661 
14662 	while (count--) {
14663 		map_entry = &(dev_list[count]);
14664 		ptgt = fcp_lookup_target(pptr,
14665 		    (uchar_t *)&(map_entry->map_pwwn));
14666 		if (ptgt == NULL) {
14667 			continue;
14668 		}
14669 
14670 		mutex_enter(&ptgt->tgt_mutex);
14671 		ptgt->tgt_trace = 0;
14672 		ptgt->tgt_change_cnt++;
14673 		ptgt->tgt_statec_cause = cause;
14674 		ptgt->tgt_tmp_cnt = 1;
14675 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14676 		mutex_exit(&ptgt->tgt_mutex);
14677 	}
14678 }
14679 
14680 static int
14681 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14682     int lcount, int tcount, int cause)
14683 {
14684 	int rval;
14685 
14686 	mutex_enter(&pptr->port_mutex);
14687 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14688 	mutex_exit(&pptr->port_mutex);
14689 
14690 	return (rval);
14691 }
14692 
14693 
14694 static int
14695 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14696     int lcount, int tcount, int cause)
14697 {
14698 	int	finish_init = 0;
14699 	int	finish_tgt = 0;
14700 	int	do_finish_init = 0;
14701 	int	rval = FCP_NO_CHANGE;
14702 
14703 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14704 	    cause == FCP_CAUSE_LINK_DOWN) {
14705 		do_finish_init = 1;
14706 	}
14707 
14708 	if (ptgt != NULL) {
14709 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14710 		    FCP_BUF_LEVEL_2, 0,
14711 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14712 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14713 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14714 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14715 		    ptgt->tgt_d_id, ptgt->tgt_done);
14716 
14717 		mutex_enter(&ptgt->tgt_mutex);
14718 
14719 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14720 			rval = FCP_DEV_CHANGE;
14721 			if (do_finish_init && ptgt->tgt_done == 0) {
14722 				ptgt->tgt_done++;
14723 				finish_init = 1;
14724 			}
14725 		} else {
14726 			if (--ptgt->tgt_tmp_cnt <= 0) {
14727 				ptgt->tgt_tmp_cnt = 0;
14728 				finish_tgt = 1;
14729 
14730 				if (do_finish_init) {
14731 					finish_init = 1;
14732 				}
14733 			}
14734 		}
14735 		mutex_exit(&ptgt->tgt_mutex);
14736 	} else {
14737 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14738 		    FCP_BUF_LEVEL_2, 0,
14739 		    "Call Finish Init for NO target");
14740 
14741 		if (do_finish_init) {
14742 			finish_init = 1;
14743 		}
14744 	}
14745 
14746 	if (finish_tgt) {
14747 		ASSERT(ptgt != NULL);
14748 
14749 		mutex_enter(&ptgt->tgt_mutex);
14750 #ifdef	DEBUG
14751 		bzero(ptgt->tgt_tmp_cnt_stack,
14752 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14753 
14754 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14755 		    FCP_STACK_DEPTH);
14756 #endif /* DEBUG */
14757 		mutex_exit(&ptgt->tgt_mutex);
14758 
14759 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14760 	}
14761 
14762 	if (finish_init && lcount == pptr->port_link_cnt) {
14763 		ASSERT(pptr->port_tmp_cnt > 0);
14764 		if (--pptr->port_tmp_cnt == 0) {
14765 			fcp_finish_init(pptr);
14766 		}
14767 	} else if (lcount != pptr->port_link_cnt) {
14768 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14769 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14770 		    "fcp_call_finish_init_held,1: state change occured"
14771 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14772 	}
14773 
14774 	return (rval);
14775 }
14776 
14777 static void
14778 fcp_reconfigure_luns(void * tgt_handle)
14779 {
14780 	uint32_t		dev_cnt;
14781 	fc_portmap_t		*devlist;
14782 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14783 	struct fcp_port		*pptr = ptgt->tgt_port;
14784 
14785 	/*
14786 	 * If the timer that fires this off got canceled too late, the
14787 	 * target could have been destroyed.
14788 	 */
14789 
14790 	if (ptgt->tgt_tid == NULL) {
14791 		return;
14792 	}
14793 
14794 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14795 	if (devlist == NULL) {
14796 		fcp_log(CE_WARN, pptr->port_dip,
14797 		    "!fcp%d: failed to allocate for portmap",
14798 		    pptr->port_instance);
14799 		return;
14800 	}
14801 
14802 	dev_cnt = 1;
14803 	devlist->map_pd = ptgt->tgt_pd_handle;
14804 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14805 	devlist->map_did.port_id = ptgt->tgt_d_id;
14806 
14807 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14808 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14809 
14810 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14811 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14812 	devlist->map_flags = 0;
14813 
14814 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14815 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14816 
14817 	/*
14818 	 * Clear the tgt_tid after no more references to
14819 	 * the fcp_tgt
14820 	 */
14821 	mutex_enter(&ptgt->tgt_mutex);
14822 	ptgt->tgt_tid = NULL;
14823 	mutex_exit(&ptgt->tgt_mutex);
14824 
14825 	kmem_free(devlist, sizeof (*devlist));
14826 }
14827 
14828 
14829 static void
14830 fcp_free_targets(struct fcp_port *pptr)
14831 {
14832 	int			i;
14833 	struct fcp_tgt	*ptgt;
14834 
14835 	mutex_enter(&pptr->port_mutex);
14836 	for (i = 0; i < FCP_NUM_HASH; i++) {
14837 		ptgt = pptr->port_tgt_hash_table[i];
14838 		while (ptgt != NULL) {
14839 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14840 
14841 			fcp_free_target(ptgt);
14842 			ptgt = next_tgt;
14843 		}
14844 	}
14845 	mutex_exit(&pptr->port_mutex);
14846 }
14847 
14848 
14849 static void
14850 fcp_free_target(struct fcp_tgt *ptgt)
14851 {
14852 	struct fcp_lun	*plun;
14853 	timeout_id_t		tid;
14854 
14855 	mutex_enter(&ptgt->tgt_mutex);
14856 	tid = ptgt->tgt_tid;
14857 
14858 	/*
14859 	 * Cancel any pending timeouts for this target.
14860 	 */
14861 
14862 	if (tid != NULL) {
14863 		/*
14864 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14865 		 * If tgt_tid is NULL, the callback will simply return.
14866 		 */
14867 		ptgt->tgt_tid = NULL;
14868 		mutex_exit(&ptgt->tgt_mutex);
14869 		(void) untimeout(tid);
14870 		mutex_enter(&ptgt->tgt_mutex);
14871 	}
14872 
14873 	plun = ptgt->tgt_lun;
14874 	while (plun != NULL) {
14875 		struct fcp_lun *next_lun = plun->lun_next;
14876 
14877 		fcp_dealloc_lun(plun);
14878 		plun = next_lun;
14879 	}
14880 
14881 	mutex_exit(&ptgt->tgt_mutex);
14882 	fcp_dealloc_tgt(ptgt);
14883 }
14884 
14885 /*
14886  *     Function: fcp_is_retryable
14887  *
14888  *  Description: Indicates if the internal packet is retryable.
14889  *
14890  *     Argument: *icmd		FCP internal packet.
14891  *
14892  * Return Value: 0	Not retryable
14893  *		 1	Retryable
14894  *
14895  *	Context: User, Kernel and Interrupt context
14896  */
14897 static int
14898 fcp_is_retryable(struct fcp_ipkt *icmd)
14899 {
14900 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14901 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14902 		return (0);
14903 	}
14904 
14905 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14906 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14907 }
14908 
14909 /*
14910  *     Function: fcp_create_on_demand
14911  *
14912  *     Argument: *pptr		FCP port.
14913  *		 *pwwn		Port WWN.
14914  *
14915  * Return Value: 0	Success
14916  *		 EIO
14917  *		 ENOMEM
14918  *		 EBUSY
14919  *		 EINVAL
14920  *
14921  *	Context: User and Kernel context
14922  */
14923 static int
14924 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14925 {
14926 	int			wait_ms;
14927 	int			tcount;
14928 	int			lcount;
14929 	int			ret;
14930 	int			error;
14931 	int			rval = EIO;
14932 	int			ntries;
14933 	fc_portmap_t		*devlist;
14934 	opaque_t		pd;
14935 	struct fcp_lun		*plun;
14936 	struct fcp_tgt		*ptgt;
14937 	int			old_manual = 0;
14938 
14939 	/* Allocates the fc_portmap_t structure. */
14940 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14941 
14942 	/*
14943 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14944 	 * in the commented statement below:
14945 	 *
14946 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14947 	 *
14948 	 * Below, the deadline for the discovery process is set.
14949 	 */
14950 	mutex_enter(&pptr->port_mutex);
14951 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14952 	mutex_exit(&pptr->port_mutex);
14953 
14954 	/*
14955 	 * We try to find the remote port based on the WWN provided by the
14956 	 * caller.  We actually ask fp/fctl if it has it.
14957 	 */
14958 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14959 	    (la_wwn_t *)pwwn, &error, 1);
14960 
14961 	if (pd == NULL) {
14962 		kmem_free(devlist, sizeof (*devlist));
14963 		return (rval);
14964 	}
14965 
14966 	/*
14967 	 * The remote port was found.  We ask fp/fctl to update our
14968 	 * fc_portmap_t structure.
14969 	 */
14970 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14971 	    (la_wwn_t *)pwwn, devlist);
14972 	if (ret != FC_SUCCESS) {
14973 		kmem_free(devlist, sizeof (*devlist));
14974 		return (rval);
14975 	}
14976 
14977 	/*
14978 	 * The map flag field is set to indicates that the creation is being
14979 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14980 	 */
14981 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14982 
14983 	mutex_enter(&pptr->port_mutex);
14984 
14985 	/*
14986 	 * We check to see if fcp already has a target that describes the
14987 	 * device being created.  If not it is created.
14988 	 */
14989 	ptgt = fcp_lookup_target(pptr, pwwn);
14990 	if (ptgt == NULL) {
14991 		lcount = pptr->port_link_cnt;
14992 		mutex_exit(&pptr->port_mutex);
14993 
14994 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14995 		if (ptgt == NULL) {
14996 			fcp_log(CE_WARN, pptr->port_dip,
14997 			    "!FC target allocation failed");
14998 			return (ENOMEM);
14999 		}
15000 
15001 		mutex_enter(&pptr->port_mutex);
15002 	}
15003 
15004 	mutex_enter(&ptgt->tgt_mutex);
15005 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
15006 	ptgt->tgt_tmp_cnt = 1;
15007 	ptgt->tgt_device_created = 0;
15008 	/*
15009 	 * If fabric and auto config is set but the target was
15010 	 * manually unconfigured then reset to the manual_config_only to
15011 	 * 0 so the device will get configured.
15012 	 */
15013 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15014 	    fcp_enable_auto_configuration &&
15015 	    ptgt->tgt_manual_config_only == 1) {
15016 		old_manual = 1;
15017 		ptgt->tgt_manual_config_only = 0;
15018 	}
15019 	mutex_exit(&ptgt->tgt_mutex);
15020 
15021 	fcp_update_targets(pptr, devlist, 1,
15022 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15023 
15024 	lcount = pptr->port_link_cnt;
15025 	tcount = ptgt->tgt_change_cnt;
15026 
15027 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15028 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15029 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15030 		    fcp_enable_auto_configuration && old_manual) {
15031 			mutex_enter(&ptgt->tgt_mutex);
15032 			ptgt->tgt_manual_config_only = 1;
15033 			mutex_exit(&ptgt->tgt_mutex);
15034 		}
15035 
15036 		if (pptr->port_link_cnt != lcount ||
15037 		    ptgt->tgt_change_cnt != tcount) {
15038 			rval = EBUSY;
15039 		}
15040 		mutex_exit(&pptr->port_mutex);
15041 
15042 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15043 		    FCP_BUF_LEVEL_3, 0,
15044 		    "fcp_create_on_demand: mapflags ptgt=%x, "
15045 		    "lcount=%x::port_link_cnt=%x, "
15046 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
15047 		    ptgt, lcount, pptr->port_link_cnt,
15048 		    tcount, ptgt->tgt_change_cnt, rval);
15049 		return (rval);
15050 	}
15051 
15052 	/*
15053 	 * Due to lack of synchronization mechanisms, we perform
15054 	 * periodic monitoring of our request; Because requests
15055 	 * get dropped when another one supercedes (either because
15056 	 * of a link change or a target change), it is difficult to
15057 	 * provide a clean synchronization mechanism (such as a
15058 	 * semaphore or a conditional variable) without exhaustively
15059 	 * rewriting the mainline discovery code of this driver.
15060 	 */
15061 	wait_ms = 500;
15062 
15063 	ntries = fcp_max_target_retries;
15064 
15065 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15066 	    FCP_BUF_LEVEL_3, 0,
15067 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15068 	    "lcount=%x::port_link_cnt=%x, "
15069 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15070 	    "tgt_tmp_cnt =%x",
15071 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15072 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15073 	    ptgt->tgt_tmp_cnt);
15074 
15075 	mutex_enter(&ptgt->tgt_mutex);
15076 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15077 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15078 		mutex_exit(&ptgt->tgt_mutex);
15079 		mutex_exit(&pptr->port_mutex);
15080 
15081 		delay(drv_usectohz(wait_ms * 1000));
15082 
15083 		mutex_enter(&pptr->port_mutex);
15084 		mutex_enter(&ptgt->tgt_mutex);
15085 	}
15086 
15087 
15088 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15089 		rval = EBUSY;
15090 	} else {
15091 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15092 		    FCP_TGT_NODE_PRESENT) {
15093 			rval = 0;
15094 		}
15095 	}
15096 
15097 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15098 	    FCP_BUF_LEVEL_3, 0,
15099 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15100 	    "lcount=%x::port_link_cnt=%x, "
15101 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15102 	    "tgt_tmp_cnt =%x",
15103 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15104 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15105 	    ptgt->tgt_tmp_cnt);
15106 
15107 	if (rval) {
15108 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15109 		    fcp_enable_auto_configuration && old_manual) {
15110 			ptgt->tgt_manual_config_only = 1;
15111 		}
15112 		mutex_exit(&ptgt->tgt_mutex);
15113 		mutex_exit(&pptr->port_mutex);
15114 		kmem_free(devlist, sizeof (*devlist));
15115 
15116 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15117 		    FCP_BUF_LEVEL_3, 0,
15118 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15119 		    "lcount=%x::port_link_cnt=%x, "
15120 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15121 		    "tgt_device_created=%x, tgt D_ID=%x",
15122 		    ntries, ptgt, lcount, pptr->port_link_cnt,
15123 		    tcount, ptgt->tgt_change_cnt, rval,
15124 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
15125 		return (rval);
15126 	}
15127 
15128 	if ((plun = ptgt->tgt_lun) != NULL) {
15129 		tcount = plun->lun_tgt->tgt_change_cnt;
15130 	} else {
15131 		rval = EINVAL;
15132 	}
15133 	lcount = pptr->port_link_cnt;
15134 
15135 	/*
15136 	 * Configuring the target with no LUNs will fail. We
15137 	 * should reset the node state so that it is not
15138 	 * automatically configured when the LUNs are added
15139 	 * to this target.
15140 	 */
15141 	if (ptgt->tgt_lun_cnt == 0) {
15142 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15143 	}
15144 	mutex_exit(&ptgt->tgt_mutex);
15145 	mutex_exit(&pptr->port_mutex);
15146 
15147 	while (plun) {
15148 		child_info_t	*cip;
15149 
15150 		mutex_enter(&plun->lun_mutex);
15151 		cip = plun->lun_cip;
15152 		mutex_exit(&plun->lun_mutex);
15153 
15154 		mutex_enter(&ptgt->tgt_mutex);
15155 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15156 			mutex_exit(&ptgt->tgt_mutex);
15157 
15158 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15159 			    FCP_ONLINE, lcount, tcount,
15160 			    NDI_ONLINE_ATTACH);
15161 			if (rval != NDI_SUCCESS) {
15162 				FCP_TRACE(fcp_logq,
15163 				    pptr->port_instbuf, fcp_trace,
15164 				    FCP_BUF_LEVEL_3, 0,
15165 				    "fcp_create_on_demand: "
15166 				    "pass_to_hp_and_wait failed "
15167 				    "rval=%x", rval);
15168 				rval = EIO;
15169 			} else {
15170 				mutex_enter(&LUN_TGT->tgt_mutex);
15171 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
15172 				    FCP_LUN_BUSY);
15173 				mutex_exit(&LUN_TGT->tgt_mutex);
15174 			}
15175 			mutex_enter(&ptgt->tgt_mutex);
15176 		}
15177 
15178 		plun = plun->lun_next;
15179 		mutex_exit(&ptgt->tgt_mutex);
15180 	}
15181 
15182 	kmem_free(devlist, sizeof (*devlist));
15183 
15184 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15185 	    fcp_enable_auto_configuration && old_manual) {
15186 		mutex_enter(&ptgt->tgt_mutex);
15187 		/* if successful then set manual to 0 */
15188 		if (rval == 0) {
15189 			ptgt->tgt_manual_config_only = 0;
15190 		} else {
15191 			/* reset to 1 so the user has to do the config */
15192 			ptgt->tgt_manual_config_only = 1;
15193 		}
15194 		mutex_exit(&ptgt->tgt_mutex);
15195 	}
15196 
15197 	return (rval);
15198 }
15199 
15200 
15201 static void
15202 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15203 {
15204 	int		count;
15205 	uchar_t		byte;
15206 
15207 	count = 0;
15208 	while (*string) {
15209 		byte = FCP_ATOB(*string); string++;
15210 		byte = byte << 4 | FCP_ATOB(*string); string++;
15211 		bytes[count++] = byte;
15212 
15213 		if (count >= byte_len) {
15214 			break;
15215 		}
15216 	}
15217 }
15218 
15219 static void
15220 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15221 {
15222 	int		i;
15223 
15224 	for (i = 0; i < FC_WWN_SIZE; i++) {
15225 		(void) sprintf(string + (i * 2),
15226 		    "%02x", wwn[i]);
15227 	}
15228 
15229 }
15230 
15231 static void
15232 fcp_print_error(fc_packet_t *fpkt)
15233 {
15234 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15235 	    fpkt->pkt_ulp_private;
15236 	struct fcp_port	*pptr;
15237 	struct fcp_tgt	*ptgt;
15238 	struct fcp_lun	*plun;
15239 	caddr_t			buf;
15240 	int			scsi_cmd = 0;
15241 
15242 	ptgt = icmd->ipkt_tgt;
15243 	plun = icmd->ipkt_lun;
15244 	pptr = ptgt->tgt_port;
15245 
15246 	buf = kmem_zalloc(256, KM_NOSLEEP);
15247 	if (buf == NULL) {
15248 		return;
15249 	}
15250 
15251 	switch (icmd->ipkt_opcode) {
15252 	case SCMD_REPORT_LUN:
15253 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15254 		    " lun=0x%%x failed");
15255 		scsi_cmd++;
15256 		break;
15257 
15258 	case SCMD_INQUIRY_PAGE83:
15259 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15260 		    " lun=0x%%x failed");
15261 		scsi_cmd++;
15262 		break;
15263 
15264 	case SCMD_INQUIRY:
15265 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15266 		    " lun=0x%%x failed");
15267 		scsi_cmd++;
15268 		break;
15269 
15270 	case LA_ELS_PLOGI:
15271 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15272 		break;
15273 
15274 	case LA_ELS_PRLI:
15275 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15276 		break;
15277 	}
15278 
15279 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15280 		struct fcp_rsp		response, *rsp;
15281 		uchar_t			asc, ascq;
15282 		caddr_t			sense_key = NULL;
15283 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15284 
15285 		if (icmd->ipkt_nodma) {
15286 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15287 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15288 			    sizeof (struct fcp_rsp));
15289 		} else {
15290 			rsp = &response;
15291 			bep = &fcp_rsp_err;
15292 
15293 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15294 			    sizeof (struct fcp_rsp));
15295 
15296 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15297 			    bep, fpkt->pkt_resp_acc,
15298 			    sizeof (struct fcp_rsp_info));
15299 		}
15300 
15301 
15302 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15303 			(void) sprintf(buf + strlen(buf),
15304 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15305 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15306 			    " senselen=%%x. Giving up");
15307 
15308 			fcp_log(CE_WARN, pptr->port_dip, buf,
15309 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15310 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15311 			    rsp->fcp_u.fcp_status.reserved_1,
15312 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15313 
15314 			kmem_free(buf, 256);
15315 			return;
15316 		}
15317 
15318 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15319 		    bep->rsp_code != FCP_NO_FAILURE) {
15320 			(void) sprintf(buf + strlen(buf),
15321 			    " FCP Response code = 0x%x", bep->rsp_code);
15322 		}
15323 
15324 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15325 			struct scsi_extended_sense sense_info, *sense_ptr;
15326 
15327 			if (icmd->ipkt_nodma) {
15328 				sense_ptr = (struct scsi_extended_sense *)
15329 				    ((caddr_t)fpkt->pkt_resp +
15330 				    sizeof (struct fcp_rsp) +
15331 				    rsp->fcp_response_len);
15332 			} else {
15333 				sense_ptr = &sense_info;
15334 
15335 				FCP_CP_IN(fpkt->pkt_resp +
15336 				    sizeof (struct fcp_rsp) +
15337 				    rsp->fcp_response_len, &sense_info,
15338 				    fpkt->pkt_resp_acc,
15339 				    sizeof (struct scsi_extended_sense));
15340 			}
15341 
15342 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15343 			    NUM_IMPL_SENSE_KEYS) {
15344 				sense_key = sense_keys[sense_ptr->es_key];
15345 			} else {
15346 				sense_key = "Undefined";
15347 			}
15348 
15349 			asc = sense_ptr->es_add_code;
15350 			ascq = sense_ptr->es_qual_code;
15351 
15352 			(void) sprintf(buf + strlen(buf),
15353 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15354 			    " Giving up");
15355 
15356 			fcp_log(CE_WARN, pptr->port_dip, buf,
15357 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15358 			    asc, ascq);
15359 		} else {
15360 			(void) sprintf(buf + strlen(buf),
15361 			    " : SCSI status=%%x. Giving up");
15362 
15363 			fcp_log(CE_WARN, pptr->port_dip, buf,
15364 			    ptgt->tgt_d_id, plun->lun_num,
15365 			    rsp->fcp_u.fcp_status.scsi_status);
15366 		}
15367 	} else {
15368 		caddr_t state, reason, action, expln;
15369 
15370 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15371 		    &action, &expln);
15372 
15373 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15374 		    " Reason:%%s. Giving up");
15375 
15376 		if (scsi_cmd) {
15377 			fcp_log(CE_WARN, pptr->port_dip, buf,
15378 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15379 		} else {
15380 			fcp_log(CE_WARN, pptr->port_dip, buf,
15381 			    ptgt->tgt_d_id, state, reason);
15382 		}
15383 	}
15384 
15385 	kmem_free(buf, 256);
15386 }
15387 
15388 
15389 static int
15390 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15391     struct fcp_ipkt *icmd, int rval, caddr_t op)
15392 {
15393 	int	ret = DDI_FAILURE;
15394 	char	*error;
15395 
15396 	switch (rval) {
15397 	case FC_DEVICE_BUSY_NEW_RSCN:
15398 		/*
15399 		 * This means that there was a new RSCN that the transport
15400 		 * knows about (which the ULP *may* know about too) but the
15401 		 * pkt that was sent down was related to an older RSCN. So, we
15402 		 * are just going to reset the retry count and deadline and
15403 		 * continue to retry. The idea is that transport is currently
15404 		 * working on the new RSCN and will soon let the ULPs know
15405 		 * about it and when it does the existing logic will kick in
15406 		 * where it will change the tcount to indicate that something
15407 		 * changed on the target. So, rediscovery will start and there
15408 		 * will not be an infinite retry.
15409 		 *
15410 		 * For a full flow of how the RSCN info is transferred back and
15411 		 * forth, see fp.c
15412 		 */
15413 		icmd->ipkt_retries = 0;
15414 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15415 		    FCP_ICMD_DEADLINE;
15416 
15417 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15418 		    FCP_BUF_LEVEL_3, 0,
15419 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15420 		    rval, ptgt->tgt_d_id);
15421 		/* FALLTHROUGH */
15422 
15423 	case FC_STATEC_BUSY:
15424 	case FC_DEVICE_BUSY:
15425 	case FC_PBUSY:
15426 	case FC_FBUSY:
15427 	case FC_TRAN_BUSY:
15428 	case FC_OFFLINE:
15429 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15430 		    FCP_BUF_LEVEL_3, 0,
15431 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15432 		    rval, ptgt->tgt_d_id);
15433 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15434 		    fcp_is_retryable(icmd)) {
15435 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15436 			ret = DDI_SUCCESS;
15437 		}
15438 		break;
15439 
15440 	case FC_LOGINREQ:
15441 		/*
15442 		 * FC_LOGINREQ used to be handled just like all the cases
15443 		 * above. It has been changed to handled a PRLI that fails
15444 		 * with FC_LOGINREQ different than other ipkts that fail
15445 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15446 		 * a simple matter to turn it into a PLOGI instead, so that's
15447 		 * exactly what we do here.
15448 		 */
15449 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15450 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15451 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15452 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15453 		} else {
15454 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15455 			    FCP_BUF_LEVEL_3, 0,
15456 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15457 			    rval, ptgt->tgt_d_id);
15458 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15459 			    fcp_is_retryable(icmd)) {
15460 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15461 				ret = DDI_SUCCESS;
15462 			}
15463 		}
15464 		break;
15465 
15466 	default:
15467 		mutex_enter(&pptr->port_mutex);
15468 		mutex_enter(&ptgt->tgt_mutex);
15469 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15470 			mutex_exit(&ptgt->tgt_mutex);
15471 			mutex_exit(&pptr->port_mutex);
15472 
15473 			(void) fc_ulp_error(rval, &error);
15474 			fcp_log(CE_WARN, pptr->port_dip,
15475 			    "!Failed to send %s to D_ID=%x error=%s",
15476 			    op, ptgt->tgt_d_id, error);
15477 		} else {
15478 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15479 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15480 			    "fcp_handle_ipkt_errors,1: state change occured"
15481 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15482 			mutex_exit(&ptgt->tgt_mutex);
15483 			mutex_exit(&pptr->port_mutex);
15484 		}
15485 		break;
15486 	}
15487 
15488 	return (ret);
15489 }
15490 
15491 
15492 /*
15493  * Check of outstanding commands on any LUN for this target
15494  */
15495 static int
15496 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15497 {
15498 	struct	fcp_lun	*plun;
15499 	struct	fcp_pkt	*cmd;
15500 
15501 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15502 		mutex_enter(&plun->lun_mutex);
15503 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15504 		    cmd = cmd->cmd_forw) {
15505 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15506 				mutex_exit(&plun->lun_mutex);
15507 				return (FC_SUCCESS);
15508 			}
15509 		}
15510 		mutex_exit(&plun->lun_mutex);
15511 	}
15512 
15513 	return (FC_FAILURE);
15514 }
15515 
15516 static fc_portmap_t *
15517 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15518 {
15519 	int			i;
15520 	fc_portmap_t		*devlist;
15521 	fc_portmap_t		*devptr = NULL;
15522 	struct fcp_tgt	*ptgt;
15523 
15524 	mutex_enter(&pptr->port_mutex);
15525 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15526 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15527 		    ptgt = ptgt->tgt_next) {
15528 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15529 				++*dev_cnt;
15530 			}
15531 		}
15532 	}
15533 
15534 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15535 	    KM_NOSLEEP);
15536 	if (devlist == NULL) {
15537 		mutex_exit(&pptr->port_mutex);
15538 		fcp_log(CE_WARN, pptr->port_dip,
15539 		    "!fcp%d: failed to allocate for portmap for construct map",
15540 		    pptr->port_instance);
15541 		return (devptr);
15542 	}
15543 
15544 	for (i = 0; i < FCP_NUM_HASH; i++) {
15545 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15546 		    ptgt = ptgt->tgt_next) {
15547 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15548 				int ret;
15549 
15550 				ret = fc_ulp_pwwn_to_portmap(
15551 				    pptr->port_fp_handle,
15552 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15553 				    devlist);
15554 
15555 				if (ret == FC_SUCCESS) {
15556 					devlist++;
15557 					continue;
15558 				}
15559 
15560 				devlist->map_pd = NULL;
15561 				devlist->map_did.port_id = ptgt->tgt_d_id;
15562 				devlist->map_hard_addr.hard_addr =
15563 				    ptgt->tgt_hard_addr;
15564 
15565 				devlist->map_state = PORT_DEVICE_INVALID;
15566 				devlist->map_type = PORT_DEVICE_OLD;
15567 
15568 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15569 				    &devlist->map_nwwn, FC_WWN_SIZE);
15570 
15571 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15572 				    &devlist->map_pwwn, FC_WWN_SIZE);
15573 
15574 				devlist++;
15575 			}
15576 		}
15577 	}
15578 
15579 	mutex_exit(&pptr->port_mutex);
15580 
15581 	return (devptr);
15582 }
15583 /*
15584  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15585  */
15586 static void
15587 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15588 {
15589 	int i;
15590 	struct fcp_tgt	*ptgt;
15591 	struct fcp_lun	*plun;
15592 
15593 	for (i = 0; i < FCP_NUM_HASH; i++) {
15594 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15595 		    ptgt = ptgt->tgt_next) {
15596 			mutex_enter(&ptgt->tgt_mutex);
15597 			for (plun = ptgt->tgt_lun; plun != NULL;
15598 			    plun = plun->lun_next) {
15599 				if (plun->lun_mpxio &&
15600 				    plun->lun_state & FCP_LUN_BUSY) {
15601 					if (!fcp_pass_to_hp(pptr, plun,
15602 					    plun->lun_cip,
15603 					    FCP_MPXIO_PATH_SET_BUSY,
15604 					    pptr->port_link_cnt,
15605 					    ptgt->tgt_change_cnt, 0, 0)) {
15606 						FCP_TRACE(fcp_logq,
15607 						    pptr->port_instbuf,
15608 						    fcp_trace,
15609 						    FCP_BUF_LEVEL_2, 0,
15610 						    "path_verifybusy: "
15611 						    "disable lun %p failed!",
15612 						    plun);
15613 					}
15614 				}
15615 			}
15616 			mutex_exit(&ptgt->tgt_mutex);
15617 		}
15618 	}
15619 }
15620 
15621 static int
15622 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15623 {
15624 	dev_info_t		*cdip = NULL;
15625 	dev_info_t		*pdip = NULL;
15626 
15627 	ASSERT(plun);
15628 
15629 	mutex_enter(&plun->lun_mutex);
15630 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15631 		mutex_exit(&plun->lun_mutex);
15632 		return (NDI_FAILURE);
15633 	}
15634 	mutex_exit(&plun->lun_mutex);
15635 	cdip = mdi_pi_get_client(PIP(cip));
15636 	pdip = mdi_pi_get_phci(PIP(cip));
15637 
15638 	ASSERT(cdip != NULL);
15639 	ASSERT(pdip != NULL);
15640 
15641 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15642 		/* LUN ready for IO */
15643 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15644 	} else {
15645 		/* LUN busy to accept IO */
15646 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15647 	}
15648 	return (NDI_SUCCESS);
15649 }
15650 
15651 /*
15652  * Caller must free the returned string of MAXPATHLEN len
15653  * If the device is offline (-1 instance number) NULL
15654  * will be returned.
15655  */
15656 static char *
15657 fcp_get_lun_path(struct fcp_lun *plun) {
15658 	dev_info_t	*dip = NULL;
15659 	char		*path = NULL;
15660 	mdi_pathinfo_t	*pip = NULL;
15661 
15662 	if (plun == NULL) {
15663 		return (NULL);
15664 	}
15665 
15666 	mutex_enter(&plun->lun_mutex);
15667 	if (plun->lun_mpxio == 0) {
15668 		dip = DIP(plun->lun_cip);
15669 		mutex_exit(&plun->lun_mutex);
15670 	} else {
15671 		/*
15672 		 * lun_cip must be accessed with lun_mutex held. Here
15673 		 * plun->lun_cip either points to a valid node or it is NULL.
15674 		 * Make a copy so that we can release lun_mutex.
15675 		 */
15676 		pip = PIP(plun->lun_cip);
15677 
15678 		/*
15679 		 * Increase ref count on the path so that we can release
15680 		 * lun_mutex and still be sure that the pathinfo node (and thus
15681 		 * also the client) is not deallocated. If pip is NULL, this
15682 		 * has no effect.
15683 		 */
15684 		mdi_hold_path(pip);
15685 
15686 		mutex_exit(&plun->lun_mutex);
15687 
15688 		/* Get the client. If pip is NULL, we get NULL. */
15689 		dip = mdi_pi_get_client(pip);
15690 	}
15691 
15692 	if (dip == NULL)
15693 		goto out;
15694 	if (ddi_get_instance(dip) < 0)
15695 		goto out;
15696 
15697 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15698 	if (path == NULL)
15699 		goto out;
15700 
15701 	(void) ddi_pathname(dip, path);
15702 
15703 	/* Clean up. */
15704 out:
15705 	if (pip != NULL)
15706 		mdi_rele_path(pip);
15707 
15708 	/*
15709 	 * In reality, the user wants a fully valid path (one they can open)
15710 	 * but this string is lacking the mount point, and the minor node.
15711 	 * It would be nice if we could "figure these out" somehow
15712 	 * and fill them in.  Otherwise, the userland code has to understand
15713 	 * driver specific details of which minor node is the "best" or
15714 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15715 	 * which tape doesn't rewind)
15716 	 */
15717 	return (path);
15718 }
15719 
15720 static int
15721 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15722     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15723 {
15724 	int64_t reset_delay;
15725 	int rval, retry = 0;
15726 	struct fcp_port *pptr = fcp_dip2port(parent);
15727 
15728 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15729 	    (ddi_get_lbolt64() - pptr->port_attach_time);
15730 	if (reset_delay < 0) {
15731 		reset_delay = 0;
15732 	}
15733 
15734 	if (fcp_bus_config_debug) {
15735 		flag |= NDI_DEVI_DEBUG;
15736 	}
15737 
15738 	switch (op) {
15739 	case BUS_CONFIG_ONE:
15740 		/*
15741 		 * Retry the command since we need to ensure
15742 		 * the fabric devices are available for root
15743 		 */
15744 		while (retry++ < fcp_max_bus_config_retries) {
15745 			rval =	(ndi_busop_bus_config(parent,
15746 			    flag | NDI_MDI_FALLBACK, op,
15747 			    arg, childp, (clock_t)reset_delay));
15748 			if (rval == 0) {
15749 				return (rval);
15750 			}
15751 		}
15752 
15753 		/*
15754 		 * drain taskq to make sure nodes are created and then
15755 		 * try again.
15756 		 */
15757 		taskq_wait(DEVI(parent)->devi_taskq);
15758 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15759 		    op, arg, childp, 0));
15760 
15761 	case BUS_CONFIG_DRIVER:
15762 	case BUS_CONFIG_ALL: {
15763 		/*
15764 		 * delay till all devices report in (port_tmp_cnt == 0)
15765 		 * or FCP_INIT_WAIT_TIMEOUT
15766 		 */
15767 		mutex_enter(&pptr->port_mutex);
15768 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15769 			(void) cv_timedwait(&pptr->port_config_cv,
15770 			    &pptr->port_mutex,
15771 			    ddi_get_lbolt() + (clock_t)reset_delay);
15772 			reset_delay =
15773 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15774 			    (ddi_get_lbolt64() - pptr->port_attach_time);
15775 		}
15776 		mutex_exit(&pptr->port_mutex);
15777 		/* drain taskq to make sure nodes are created */
15778 		taskq_wait(DEVI(parent)->devi_taskq);
15779 		return (ndi_busop_bus_config(parent, flag, op,
15780 		    arg, childp, 0));
15781 	}
15782 
15783 	default:
15784 		return (NDI_FAILURE);
15785 	}
15786 	/*NOTREACHED*/
15787 }
15788 
15789 static int
15790 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15791     ddi_bus_config_op_t op, void *arg)
15792 {
15793 	if (fcp_bus_config_debug) {
15794 		flag |= NDI_DEVI_DEBUG;
15795 	}
15796 
15797 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15798 }
15799 
15800 
15801 /*
15802  * Routine to copy GUID into the lun structure.
15803  * returns 0 if copy was successful and 1 if encountered a
15804  * failure and did not copy the guid.
15805  */
15806 static int
15807 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15808 {
15809 
15810 	int retval = 0;
15811 
15812 	/* add one for the null terminator */
15813 	const unsigned int len = strlen(guidp) + 1;
15814 
15815 	if ((guidp == NULL) || (plun == NULL)) {
15816 		return (1);
15817 	}
15818 
15819 	/*
15820 	 * if the plun->lun_guid already has been allocated,
15821 	 * then check the size. if the size is exact, reuse
15822 	 * it....if not free it an allocate the required size.
15823 	 * The reallocation should NOT typically happen
15824 	 * unless the GUIDs reported changes between passes.
15825 	 * We free up and alloc again even if the
15826 	 * size was more than required. This is due to the
15827 	 * fact that the field lun_guid_size - serves
15828 	 * dual role of indicating the size of the wwn
15829 	 * size and ALSO the allocation size.
15830 	 */
15831 	if (plun->lun_guid) {
15832 		if (plun->lun_guid_size != len) {
15833 			/*
15834 			 * free the allocated memory and
15835 			 * initialize the field
15836 			 * lun_guid_size to 0.
15837 			 */
15838 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15839 			plun->lun_guid = NULL;
15840 			plun->lun_guid_size = 0;
15841 		}
15842 	}
15843 	/*
15844 	 * alloc only if not already done.
15845 	 */
15846 	if (plun->lun_guid == NULL) {
15847 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15848 		if (plun->lun_guid == NULL) {
15849 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15850 			    "Unable to allocate"
15851 			    "Memory for GUID!!! size %d", len);
15852 			retval = 1;
15853 		} else {
15854 			plun->lun_guid_size = len;
15855 		}
15856 	}
15857 	if (plun->lun_guid) {
15858 		/*
15859 		 * now copy the GUID
15860 		 */
15861 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15862 	}
15863 	return (retval);
15864 }
15865 
15866 /*
15867  * fcp_reconfig_wait
15868  *
15869  * Wait for a rediscovery/reconfiguration to complete before continuing.
15870  */
15871 
15872 static void
15873 fcp_reconfig_wait(struct fcp_port *pptr)
15874 {
15875 	clock_t		reconfig_start, wait_timeout;
15876 
15877 	/*
15878 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15879 	 * reconfiguration in progress.
15880 	 */
15881 
15882 	mutex_enter(&pptr->port_mutex);
15883 	if (pptr->port_tmp_cnt == 0) {
15884 		mutex_exit(&pptr->port_mutex);
15885 		return;
15886 	}
15887 	mutex_exit(&pptr->port_mutex);
15888 
15889 	/*
15890 	 * If we cause a reconfig by raising power, delay until all devices
15891 	 * report in (port_tmp_cnt returns to 0)
15892 	 */
15893 
15894 	reconfig_start = ddi_get_lbolt();
15895 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15896 
15897 	mutex_enter(&pptr->port_mutex);
15898 
15899 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15900 	    pptr->port_tmp_cnt) {
15901 
15902 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15903 		    reconfig_start + wait_timeout);
15904 	}
15905 
15906 	mutex_exit(&pptr->port_mutex);
15907 
15908 	/*
15909 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15910 	 * we want may still be ok.  If not, it will error out later
15911 	 */
15912 }
15913 
15914 /*
15915  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15916  * We rely on the fcp_global_mutex to provide protection against changes to
15917  * the fcp_lun_blacklist.
15918  *
15919  * You can describe a list of target port WWNs and LUN numbers which will
15920  * not be configured. LUN numbers will be interpreted as decimal. White
15921  * spaces and ',' can be used in the list of LUN numbers.
15922  *
15923  * To prevent LUNs 1 and 2 from being configured for target
15924  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15925  *
15926  * pwwn-lun-blacklist=
15927  * "510000f010fd92a1,1,2",
15928  * "510000e012079df1,1,2";
15929  */
15930 static void
15931 fcp_read_blacklist(dev_info_t *dip,
15932     struct fcp_black_list_entry **pplun_blacklist) {
15933 	char **prop_array	= NULL;
15934 	char *curr_pwwn		= NULL;
15935 	char *curr_lun		= NULL;
15936 	uint32_t prop_item	= 0;
15937 	int idx			= 0;
15938 	int len			= 0;
15939 
15940 	ASSERT(mutex_owned(&fcp_global_mutex));
15941 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15942 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15943 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15944 		return;
15945 	}
15946 
15947 	for (idx = 0; idx < prop_item; idx++) {
15948 
15949 		curr_pwwn = prop_array[idx];
15950 		while (*curr_pwwn == ' ') {
15951 			curr_pwwn++;
15952 		}
15953 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15954 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15955 			    ", please check.", curr_pwwn);
15956 			continue;
15957 		}
15958 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15959 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15960 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15961 			    ", please check.", curr_pwwn);
15962 			continue;
15963 		}
15964 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15965 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15966 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15967 				    "blacklist, please check.", curr_pwwn);
15968 				break;
15969 			}
15970 		}
15971 		if (len != sizeof (la_wwn_t) * 2) {
15972 			continue;
15973 		}
15974 
15975 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15976 		*(curr_lun - 1) = '\0';
15977 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15978 	}
15979 
15980 	ddi_prop_free(prop_array);
15981 }
15982 
15983 /*
15984  * Get the masking info about one remote target port designated by wwn.
15985  * Lun ids could be separated by ',' or white spaces.
15986  */
15987 static void
15988 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15989     struct fcp_black_list_entry **pplun_blacklist) {
15990 	int		idx			= 0;
15991 	uint32_t	offset			= 0;
15992 	unsigned long	lun_id			= 0;
15993 	char		lunid_buf[16];
15994 	char		*pend			= NULL;
15995 	int		illegal_digit		= 0;
15996 
15997 	while (offset < strlen(curr_lun)) {
15998 		while ((curr_lun[offset + idx] != ',') &&
15999 		    (curr_lun[offset + idx] != '\0') &&
16000 		    (curr_lun[offset + idx] != ' ')) {
16001 			if (isdigit(curr_lun[offset + idx]) == 0) {
16002 				illegal_digit++;
16003 			}
16004 			idx++;
16005 		}
16006 		if (illegal_digit > 0) {
16007 			offset += (idx+1);	/* To the start of next lun */
16008 			idx = 0;
16009 			illegal_digit = 0;
16010 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16011 			    "the blacklist, please check digits.",
16012 			    curr_lun, curr_pwwn);
16013 			continue;
16014 		}
16015 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16016 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16017 			    "the blacklist, please check the length of LUN#.",
16018 			    curr_lun, curr_pwwn);
16019 			break;
16020 		}
16021 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
16022 		    offset++;
16023 		    continue;
16024 		}
16025 
16026 		bcopy(curr_lun + offset, lunid_buf, idx);
16027 		lunid_buf[idx] = '\0';
16028 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16029 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16030 		} else {
16031 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16032 			    "the blacklist, please check %s.",
16033 			    curr_lun, curr_pwwn, lunid_buf);
16034 		}
16035 		offset += (idx+1);	/* To the start of next lun */
16036 		idx = 0;
16037 	}
16038 }
16039 
16040 /*
16041  * Add one masking record
16042  */
16043 static void
16044 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16045     struct fcp_black_list_entry **pplun_blacklist) {
16046 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16047 	struct fcp_black_list_entry	*new_entry	= NULL;
16048 	la_wwn_t			wwn;
16049 
16050 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16051 	while (tmp_entry) {
16052 		if ((bcmp(&tmp_entry->wwn, &wwn,
16053 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16054 			return;
16055 		}
16056 
16057 		tmp_entry = tmp_entry->next;
16058 	}
16059 
16060 	/* add to black list */
16061 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16062 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16063 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16064 	new_entry->lun = lun_id;
16065 	new_entry->masked = 0;
16066 	new_entry->next = *pplun_blacklist;
16067 	*pplun_blacklist = new_entry;
16068 }
16069 
16070 /*
16071  * Check if we should mask the specified lun of this fcp_tgt
16072  */
16073 static int
16074 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16075 	struct fcp_black_list_entry *remote_port;
16076 
16077 	remote_port = fcp_lun_blacklist;
16078 	while (remote_port != NULL) {
16079 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16080 			if (remote_port->lun == lun_id) {
16081 				remote_port->masked++;
16082 				if (remote_port->masked == 1) {
16083 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
16084 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
16085 					    "is masked due to black listing.\n",
16086 					    lun_id, wwn->raw_wwn[0],
16087 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
16088 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
16089 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
16090 					    wwn->raw_wwn[7]);
16091 				}
16092 				return (TRUE);
16093 			}
16094 		}
16095 		remote_port = remote_port->next;
16096 	}
16097 	return (FALSE);
16098 }
16099 
16100 /*
16101  * Release all allocated resources
16102  */
16103 static void
16104 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16105 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16106 	struct fcp_black_list_entry	*current_entry	= NULL;
16107 
16108 	ASSERT(mutex_owned(&fcp_global_mutex));
16109 	/*
16110 	 * Traverse all luns
16111 	 */
16112 	while (tmp_entry) {
16113 		current_entry = tmp_entry;
16114 		tmp_entry = tmp_entry->next;
16115 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16116 	}
16117 	*pplun_blacklist = NULL;
16118 }
16119 
16120 /*
16121  * In fcp module,
16122  *   pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16123  */
16124 static struct scsi_pkt *
16125 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16126     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16127     int flags, int (*callback)(), caddr_t arg)
16128 {
16129 	fcp_port_t	*pptr = ADDR2FCP(ap);
16130 	fcp_pkt_t	*cmd  = NULL;
16131 	fc_frame_hdr_t	*hp;
16132 
16133 	/*
16134 	 * First step: get the packet
16135 	 */
16136 	if (pkt == NULL) {
16137 		pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16138 		    tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16139 		    callback, arg);
16140 		if (pkt == NULL) {
16141 			return (NULL);
16142 		}
16143 
16144 		/*
16145 		 * All fields in scsi_pkt will be initialized properly or
16146 		 * set to zero. We need do nothing for scsi_pkt.
16147 		 */
16148 		/*
16149 		 * But it's our responsibility to link other related data
16150 		 * structures. Their initialization will be done, just
16151 		 * before the scsi_pkt will be sent to FCA.
16152 		 */
16153 		cmd		= PKT2CMD(pkt);
16154 		cmd->cmd_pkt	= pkt;
16155 		cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16156 		/*
16157 		 * fc_packet_t
16158 		 */
16159 		cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16160 		cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16161 		    sizeof (struct fcp_pkt));
16162 		cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16163 		cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16164 		cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16165 		cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16166 		/*
16167 		 * Fill in the Fabric Channel Header
16168 		 */
16169 		hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16170 		hp->r_ctl = R_CTL_COMMAND;
16171 		hp->rsvd = 0;
16172 		hp->type = FC_TYPE_SCSI_FCP;
16173 		hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16174 		hp->seq_id = 0;
16175 		hp->df_ctl  = 0;
16176 		hp->seq_cnt = 0;
16177 		hp->ox_id = 0xffff;
16178 		hp->rx_id = 0xffff;
16179 		hp->ro = 0;
16180 	} else {
16181 		/*
16182 		 * We need think if we should reset any elements in
16183 		 * related data structures.
16184 		 */
16185 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
16186 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
16187 		    "reusing pkt, flags %d", flags);
16188 		cmd = PKT2CMD(pkt);
16189 		if (cmd->cmd_fp_pkt->pkt_pd) {
16190 			cmd->cmd_fp_pkt->pkt_pd = NULL;
16191 		}
16192 	}
16193 
16194 	/*
16195 	 * Second step:	 dma allocation/move
16196 	 */
16197 	if (bp && bp->b_bcount != 0) {
16198 		/*
16199 		 * Mark if it's read or write
16200 		 */
16201 		if (bp->b_flags & B_READ) {
16202 			cmd->cmd_flags |= CFLAG_IS_READ;
16203 		} else {
16204 			cmd->cmd_flags &= ~CFLAG_IS_READ;
16205 		}
16206 
16207 		bp_mapin(bp);
16208 		cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16209 		cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16210 		cmd->cmd_fp_pkt->pkt_data_resid = 0;
16211 	} else {
16212 		/*
16213 		 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16214 		 * to send zero-length read/write.
16215 		 */
16216 		cmd->cmd_fp_pkt->pkt_data = NULL;
16217 		cmd->cmd_fp_pkt->pkt_datalen = 0;
16218 	}
16219 
16220 	return (pkt);
16221 }
16222 
16223 static void
16224 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16225 {
16226 	fcp_port_t	*pptr = ADDR2FCP(ap);
16227 
16228 	/*
16229 	 * First we let FCA to uninitilize private part.
16230 	 */
16231 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16232 	    PKT2CMD(pkt)->cmd_fp_pkt);
16233 
16234 	/*
16235 	 * Then we uninitialize fc_packet.
16236 	 */
16237 
16238 	/*
16239 	 * Thirdly, we uninitializae fcp_pkt.
16240 	 */
16241 
16242 	/*
16243 	 * In the end, we free scsi_pkt.
16244 	 */
16245 	scsi_hba_pkt_free(ap, pkt);
16246 }
16247 
16248 static int
16249 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16250 {
16251 	fcp_port_t	*pptr = ADDR2FCP(ap);
16252 	fcp_lun_t	*plun = ADDR2LUN(ap);
16253 	fcp_tgt_t	*ptgt = plun->lun_tgt;
16254 	fcp_pkt_t	*cmd  = PKT2CMD(pkt);
16255 	fcp_cmd_t	*fcmd = &cmd->cmd_fcp_cmd;
16256 	fc_packet_t	*fpkt = cmd->cmd_fp_pkt;
16257 	int		 rval;
16258 
16259 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
16260 	(void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16261 
16262 	/*
16263 	 * Firstly, we need initialize fcp_pkt_t
16264 	 * Secondly, we need initialize fcp_cmd_t.
16265 	 */
16266 	bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16267 	fcmd->fcp_data_len = fpkt->pkt_datalen;
16268 	fcmd->fcp_ent_addr = plun->lun_addr;
16269 	if (pkt->pkt_flags & FLAG_HTAG) {
16270 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16271 	} else if (pkt->pkt_flags & FLAG_OTAG) {
16272 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16273 	} else if (pkt->pkt_flags & FLAG_STAG) {
16274 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16275 	} else {
16276 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16277 	}
16278 
16279 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16280 		fcmd->fcp_cntl.cntl_read_data = 1;
16281 		fcmd->fcp_cntl.cntl_write_data = 0;
16282 	} else {
16283 		fcmd->fcp_cntl.cntl_read_data = 0;
16284 		fcmd->fcp_cntl.cntl_write_data = 1;
16285 	}
16286 
16287 	/*
16288 	 * Then we need initialize fc_packet_t too.
16289 	 */
16290 	fpkt->pkt_timeout = pkt->pkt_time + 2;
16291 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16292 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16293 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16294 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16295 	} else {
16296 		fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16297 	}
16298 
16299 	if (pkt->pkt_flags & FLAG_NOINTR) {
16300 		fpkt->pkt_comp = NULL;
16301 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16302 	} else {
16303 		fpkt->pkt_comp = fcp_cmd_callback;
16304 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16305 		if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16306 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16307 		}
16308 	}
16309 
16310 	/*
16311 	 * Lastly, we need initialize scsi_pkt
16312 	 */
16313 	pkt->pkt_reason = CMD_CMPLT;
16314 	pkt->pkt_state = 0;
16315 	pkt->pkt_statistics = 0;
16316 	pkt->pkt_resid = 0;
16317 
16318 	/*
16319 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
16320 	 * have to do polled I/O
16321 	 */
16322 	if (pkt->pkt_flags & FLAG_NOINTR) {
16323 		return (fcp_dopoll(pptr, cmd));
16324 	}
16325 
16326 	cmd->cmd_state = FCP_PKT_ISSUED;
16327 	rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16328 	if (rval == FC_SUCCESS) {
16329 		return (TRAN_ACCEPT);
16330 	}
16331 
16332 	/*
16333 	 * Need more consideration
16334 	 *
16335 	 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16336 	 */
16337 	cmd->cmd_state = FCP_PKT_IDLE;
16338 	if (rval == FC_TRAN_BUSY) {
16339 		return (TRAN_BUSY);
16340 	} else {
16341 		return (TRAN_FATAL_ERROR);
16342 	}
16343 }
16344 
16345 /*
16346  * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16347  * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16348  */
16349 static void
16350 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16351 {
16352 	FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16353 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16354 }
16355 
16356 /*
16357  * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16358  */
16359 static void
16360 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16361 {
16362 	FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16363 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16364 }
16365