xref: /titanic_41/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision cbdcbd056f15c9c9fd82d5543b3a502677c4d391)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  * Fibre Channel SCSI ULP Mapping driver
26  */
27 
28 #include <sys/scsi/scsi.h>
29 #include <sys/types.h>
30 #include <sys/varargs.h>
31 #include <sys/devctl.h>
32 #include <sys/thread.h>
33 #include <sys/thread.h>
34 #include <sys/open.h>
35 #include <sys/file.h>
36 #include <sys/sunndi.h>
37 #include <sys/console.h>
38 #include <sys/proc.h>
39 #include <sys/time.h>
40 #include <sys/utsname.h>
41 #include <sys/scsi/impl/scsi_reset_notify.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/byteorder.h>
44 #include <sys/fs/dv_node.h>
45 #include <sys/ctype.h>
46 #include <sys/sunmdi.h>
47 
48 #include <sys/fibre-channel/fc.h>
49 #include <sys/fibre-channel/impl/fc_ulpif.h>
50 #include <sys/fibre-channel/ulp/fcpvar.h>
51 
52 /*
53  * Discovery Process
54  * =================
55  *
56  *    The discovery process is a major function of FCP.	 In order to help
57  * understand that function a flow diagram is given here.  This diagram
58  * doesn't claim to cover all the cases and the events that can occur during
59  * the discovery process nor the subtleties of the code.  The code paths shown
60  * are simplified.  Its purpose is to help the reader (and potentially bug
61  * fixer) have an overall view of the logic of the code.  For that reason the
62  * diagram covers the simple case of the line coming up cleanly or of a new
63  * port attaching to FCP the link being up.  The reader must keep in mind
64  * that:
65  *
66  *	- There are special cases where bringing devices online and offline
67  *	  is driven by Ioctl.
68  *
69  *	- The behavior of the discovery process can be modified through the
70  *	  .conf file.
71  *
72  *	- The line can go down and come back up at any time during the
73  *	  discovery process which explains some of the complexity of the code.
74  *
75  * ............................................................................
76  *
77  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
78  *
79  *
80  *			+-------------------------+
81  *   fp/fctl module --->|    fcp_port_attach	  |
82  *			+-------------------------+
83  *	   |			     |
84  *	   |			     |
85  *	   |			     v
86  *	   |		+-------------------------+
87  *	   |		| fcp_handle_port_attach  |
88  *	   |		+-------------------------+
89  *	   |				|
90  *	   |				|
91  *	   +--------------------+	|
92  *				|	|
93  *				v	v
94  *			+-------------------------+
95  *			|   fcp_statec_callback	  |
96  *			+-------------------------+
97  *				    |
98  *				    |
99  *				    v
100  *			+-------------------------+
101  *			|    fcp_handle_devices	  |
102  *			+-------------------------+
103  *				    |
104  *				    |
105  *				    v
106  *			+-------------------------+
107  *			|   fcp_handle_mapflags	  |
108  *			+-------------------------+
109  *				    |
110  *				    |
111  *				    v
112  *			+-------------------------+
113  *			|     fcp_send_els	  |
114  *			|			  |
115  *			| PLOGI or PRLI To all the|
116  *			| reachable devices.	  |
117  *			+-------------------------+
118  *
119  *
120  * ............................................................................
121  *
122  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
123  *	   STEP 1 are called (it is actually the same function).
124  *
125  *
126  *			+-------------------------+
127  *			|    fcp_icmd_callback	  |
128  *   fp/fctl module --->|			  |
129  *			| callback for PLOGI and  |
130  *			| PRLI.			  |
131  *			+-------------------------+
132  *				     |
133  *				     |
134  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
135  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
136  *		      |		   \   /	   |
137  *		      |		    \-/		   |
138  *		      |				   |
139  *		      v				   v
140  *	+-------------------------+	+-------------------------+
141  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
142  *	|			  |	|			  |
143  *	|	  PRLI		  |	|	REPORT_LUN	  |
144  *	+-------------------------+	+-------------------------+
145  *
146  * ............................................................................
147  *
148  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
149  *	   (It is actually the same function).
150  *
151  *
152  *			    +-------------------------+
153  *   fp/fctl module ------->|	 fcp_scsi_callback    |
154  *			    +-------------------------+
155  *					|
156  *					|
157  *					|
158  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
159  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
160  *		 |		      \	  /			  |
161  *		 |		       \-/			  |
162  *		 |			|			  |
163  *		 | Receive INQUIRY reply|			  |
164  *		 |			|			  |
165  *		 v			v			  v
166  * +------------------------+ +----------------------+ +----------------------+
167  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
168  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
169  * +------------------------+ +----------------------+ +----------------------+
170  *		 |			|			  |
171  *		 |			|			  |
172  *		 |			|			  |
173  *		 v			v			  |
174  *     +-----------------+	+-----------------+		  |
175  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
176  *     |		 |	|		  |		  |
177  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
178  *     |  (To each LUN)	 |	+-----------------+		  |
179  *     +-----------------+					  |
180  *								  |
181  *								  v
182  *						      +------------------------+
183  *						      |	 fcp_call_finish_init  |
184  *						      +------------------------+
185  *								  |
186  *								  v
187  *						 +-----------------------------+
188  *						 |  fcp_call_finish_init_held  |
189  *						 +-----------------------------+
190  *								  |
191  *								  |
192  *			   All LUNs scanned			 /-\
193  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
194  *			      |					\   /
195  *			      |					 \-/
196  *			      v					  |
197  *		     +------------------+			  |
198  *		     |	fcp_finish_tgt	|			  |
199  *		     +------------------+			  |
200  *			      |	  Target Not Offline and	  |
201  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
202  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
203  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
204  *	       |	    \	/		|		  |
205  *	       |	     \-/		|		  |
206  *	       v				v		  |
207  * +----------------------------+     +-------------------+	  |
208  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
209  * |				|     +-------------------+	  |
210  * | A structure fcp_tgt_elem	|		|		  |
211  * | is created and queued in	|		v		  |
212  * | the FCP port list		|     +-------------------+	  |
213  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
214  * | will be unqueued by the	|     |			  |	  |
215  * | watchdog timer.		|     | Called for each	  |	  |
216  * +----------------------------+     | LUN. Dispatches	  |	  |
217  *		  |		      | fcp_hp_task	  |	  |
218  *		  |		      +-------------------+	  |
219  *		  |				|		  |
220  *		  |				|		  |
221  *		  |				|		  |
222  *		  |				+---------------->|
223  *		  |						  |
224  *		  +---------------------------------------------->|
225  *								  |
226  *								  |
227  *		All the targets (devices) have been scanned	 /-\
228  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
229  *			       |				\   /
230  *			       |				 \-/
231  *	    +-------------------------------------+		  |
232  *	    |		fcp_finish_init		  |		  |
233  *	    |					  |		  |
234  *	    | Signal broadcasts the condition	  |		  |
235  *	    | variable port_config_cv of the FCP  |		  |
236  *	    | port.  One potential code sequence  |		  |
237  *	    | waiting on the condition variable	  |		  |
238  *	    | the code sequence handling	  |		  |
239  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
240  *	    | The other is in the function	  |		  |
241  *	    | fcp_reconfig_wait which is called	  |		  |
242  *	    | in the transmit path preventing IOs |		  |
243  *	    | from going through till the disco-  |		  |
244  *	    | very process is over.		  |		  |
245  *	    +-------------------------------------+		  |
246  *			       |				  |
247  *			       |				  |
248  *			       +--------------------------------->|
249  *								  |
250  *								  v
251  *								Return
252  *
253  * ............................................................................
254  *
255  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
256  *
257  *
258  *			+-------------------------+
259  *			|      fcp_hp_task	  |
260  *			+-------------------------+
261  *				     |
262  *				     |
263  *				     v
264  *			+-------------------------+
265  *			|     fcp_trigger_lun	  |
266  *			+-------------------------+
267  *				     |
268  *				     |
269  *				     v
270  *		   Bring offline    /-\	 Bring online
271  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
272  *		 |		   \   /		   |
273  *		 |		    \-/			   |
274  *		 v					   v
275  *    +---------------------+			+-----------------------+
276  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
277  *    +---------------------+			|			|
278  *						| Creates a dev_info_t	|
279  *						| or a mdi_pathinfo_t	|
280  *						| depending on whether	|
281  *						| mpxio is on or off.	|
282  *						+-----------------------+
283  *							   |
284  *							   |
285  *							   v
286  *						+-----------------------+
287  *						|  fcp_online_child	|
288  *						|			|
289  *						| Set device online	|
290  *						| using NDI or MDI.	|
291  *						+-----------------------+
292  *
293  * ............................................................................
294  *
295  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
296  *	   what is described here.  We only show the target offline path.
297  *
298  *
299  *			 +--------------------------+
300  *			 |	  fcp_watch	    |
301  *			 +--------------------------+
302  *				       |
303  *				       |
304  *				       v
305  *			 +--------------------------+
306  *			 |  fcp_scan_offline_tgts   |
307  *			 +--------------------------+
308  *				       |
309  *				       |
310  *				       v
311  *			 +--------------------------+
312  *			 |  fcp_offline_target_now  |
313  *			 +--------------------------+
314  *				       |
315  *				       |
316  *				       v
317  *			 +--------------------------+
318  *			 |   fcp_offline_tgt_luns   |
319  *			 +--------------------------+
320  *				       |
321  *				       |
322  *				       v
323  *			 +--------------------------+
324  *			 |     fcp_offline_lun	    |
325  *			 +--------------------------+
326  *				       |
327  *				       |
328  *				       v
329  *		     +----------------------------------+
330  *		     |	     fcp_offline_lun_now	|
331  *		     |					|
332  *		     | A request (or two if mpxio) is	|
333  *		     | sent to the hot plug task using	|
334  *		     | a fcp_hp_elem structure.		|
335  *		     +----------------------------------+
336  */
337 
338 /*
339  * Functions registered with DDI framework
340  */
341 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
342 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
343 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
344 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
345 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
346     cred_t *credp, int *rval);
347 
348 /*
349  * Functions registered with FC Transport framework
350  */
351 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
352     fc_attach_cmd_t cmd,  uint32_t s_id);
353 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
354     fc_detach_cmd_t cmd);
355 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
356     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
357     uint32_t claimed);
358 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
359     fc_unsol_buf_t *buf, uint32_t claimed);
360 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
361     fc_unsol_buf_t *buf, uint32_t claimed);
362 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
363     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
364     uint32_t  dev_cnt, uint32_t port_sid);
365 
366 /*
367  * Functions registered with SCSA framework
368  */
369 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
370     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
371 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
372     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
373 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
374     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
375 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
376 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
377 static int fcp_scsi_reset(struct scsi_address *ap, int level);
378 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
379 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
380     int whom);
381 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
382 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
383     void (*callback)(caddr_t), caddr_t arg);
384 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
385     char *name, ddi_eventcookie_t *event_cookiep);
386 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
387     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
388     ddi_callback_id_t *cb_id);
389 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
390     ddi_callback_id_t cb_id);
391 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
392     ddi_eventcookie_t eventid, void *impldata);
393 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
394     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
395 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
396     ddi_bus_config_op_t op, void *arg);
397 
398 /*
399  * Internal functions
400  */
401 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
402     int mode, int *rval);
403 
404 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
405     int mode, int *rval);
406 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
407     struct fcp_scsi_cmd *fscsi, int mode);
408 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
409     caddr_t base_addr, int mode);
410 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
411 
412 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
413     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
414     int *fc_pkt_reason, int *fc_pkt_action);
415 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
416     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
417 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
418     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
419 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
420 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
421 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
422 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
423 
424 static void fcp_handle_devices(struct fcp_port *pptr,
425     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
426     fcp_map_tag_t *map_tag, int cause);
427 static int fcp_handle_mapflags(struct fcp_port *pptr,
428     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
429     int tgt_cnt, int cause);
430 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
431 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
432     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
433 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
434     int cause);
435 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
436     uint32_t state);
437 static struct fcp_port *fcp_get_port(opaque_t port_handle);
438 static void fcp_unsol_callback(fc_packet_t *fpkt);
439 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
440     uchar_t r_ctl, uchar_t type);
441 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
442 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
443     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
444     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
445 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
446 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
447     int nodma, int flags);
448 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
449 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
450     uchar_t *wwn);
451 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
452     uint32_t d_id);
453 static void fcp_icmd_callback(fc_packet_t *fpkt);
454 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
455     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
456 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
457 static void fcp_scsi_callback(fc_packet_t *fpkt);
458 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
459 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
461 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
462     uint16_t lun_num);
463 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
464     int link_cnt, int tgt_cnt, int cause);
465 static void fcp_finish_init(struct fcp_port *pptr);
466 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
467     int tgt_cnt, int cause);
468 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
469     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
470 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
471     int link_cnt, int tgt_cnt, int nowait, int flags);
472 static void fcp_offline_target_now(struct fcp_port *pptr,
473     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
474 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
475     int tgt_cnt, int flags);
476 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
477     int nowait, int flags);
478 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
479     int tgt_cnt);
480 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
481     int tgt_cnt, int flags);
482 static void fcp_scan_offline_luns(struct fcp_port *pptr);
483 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
484 static void fcp_update_offline_flags(struct fcp_lun *plun);
485 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
486 static void fcp_abort_commands(struct fcp_pkt *head, struct
487     fcp_port *pptr);
488 static void fcp_cmd_callback(fc_packet_t *fpkt);
489 static void fcp_complete_pkt(fc_packet_t *fpkt);
490 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
491     struct fcp_port *pptr);
492 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
493     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
494 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
495 static void fcp_dealloc_lun(struct fcp_lun *plun);
496 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
497     fc_portmap_t *map_entry, int link_cnt);
498 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
499 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
500 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
501     int internal);
502 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
503 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
504     uint32_t s_id, int instance);
505 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
506     int instance);
507 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
508 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
509     int);
510 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
511 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
512 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
513     int flags);
514 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
515 static int fcp_reset_target(struct scsi_address *ap, int level);
516 static int fcp_commoncap(struct scsi_address *ap, char *cap,
517     int val, int tgtonly, int doset);
518 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
519 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
520 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
521     int sleep);
522 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
523     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
524 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
525 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
526 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
527     int lcount, int tcount);
528 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
529 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
530 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
531     int tgt_cnt);
532 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
533     dev_info_t *pdip, caddr_t name);
534 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
535     int lcount, int tcount, int flags, int *circ);
536 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
537     int lcount, int tcount, int flags, int *circ);
538 static void fcp_remove_child(struct fcp_lun *plun);
539 static void fcp_watch(void *arg);
540 static void fcp_check_reset_delay(struct fcp_port *pptr);
541 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
542     struct fcp_lun *rlun, int tgt_cnt);
543 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
544 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
545     uchar_t *wwn, uint16_t lun);
546 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
547     struct fcp_lun *plun);
548 static void fcp_post_callback(struct fcp_pkt *cmd);
549 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
550 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
551 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
552     child_info_t *cip);
553 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
554     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
555     int tgt_cnt, int flags);
556 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
557     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
558     int tgt_cnt, int flags, int wait);
559 static void fcp_retransport_cmd(struct fcp_port *pptr,
560     struct fcp_pkt *cmd);
561 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
562     uint_t statistics);
563 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
564 static void fcp_update_targets(struct fcp_port *pptr,
565     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
566 static int fcp_call_finish_init(struct fcp_port *pptr,
567     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
568 static int fcp_call_finish_init_held(struct fcp_port *pptr,
569     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
570 static void fcp_reconfigure_luns(void * tgt_handle);
571 static void fcp_free_targets(struct fcp_port *pptr);
572 static void fcp_free_target(struct fcp_tgt *ptgt);
573 static int fcp_is_retryable(struct fcp_ipkt *icmd);
574 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
575 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
576 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
577 static void fcp_print_error(fc_packet_t *fpkt);
578 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
579     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
580 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
581 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
582     uint32_t *dev_cnt);
583 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
584 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
585 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
586     struct fcp_ioctl *, struct fcp_port **);
587 static char *fcp_get_lun_path(struct fcp_lun *plun);
588 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
589     int *rval);
590 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
592 static char *fcp_get_lun_path(struct fcp_lun *plun);
593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
594     int *rval);
595 static void fcp_reconfig_wait(struct fcp_port *pptr);
596 
597 /*
598  * New functions added for mpxio support
599  */
600 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
601     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
602 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
603     int tcount);
604 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
605     dev_info_t *pdip);
606 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
607 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
608 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
609 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
610 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
611     int what);
612 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
613     fc_packet_t *fpkt);
614 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
615 
616 /*
617  * New functions added for lun masking support
618  */
619 static void fcp_read_blacklist(dev_info_t *dip,
620     struct fcp_black_list_entry **pplun_blacklist);
621 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
622     struct fcp_black_list_entry **pplun_blacklist);
623 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
624     struct fcp_black_list_entry **pplun_blacklist);
625 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
626 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
627 
628 /*
629  * New functions to support software FCA (like fcoei)
630  */
631 static struct scsi_pkt *fcp_pseudo_init_pkt(
632 	struct scsi_address *ap, struct scsi_pkt *pkt,
633 	struct buf *bp, int cmdlen, int statuslen,
634 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
635 static void fcp_pseudo_destroy_pkt(
636 	struct scsi_address *ap, struct scsi_pkt *pkt);
637 static void fcp_pseudo_sync_pkt(
638 	struct scsi_address *ap, struct scsi_pkt *pkt);
639 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
640 static void fcp_pseudo_dmafree(
641 	struct scsi_address *ap, struct scsi_pkt *pkt);
642 
643 extern struct mod_ops	mod_driverops;
644 /*
645  * This variable is defined in modctl.c and set to '1' after the root driver
646  * and fs are loaded.  It serves as an indication that the root filesystem can
647  * be used.
648  */
649 extern int		modrootloaded;
650 /*
651  * This table contains strings associated with the SCSI sense key codes.  It
652  * is used by FCP to print a clear explanation of the code returned in the
653  * sense information by a device.
654  */
655 extern char		*sense_keys[];
656 /*
657  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
658  * under this device that the paths to a physical device are created when
659  * MPxIO is used.
660  */
661 extern dev_info_t	*scsi_vhci_dip;
662 
663 /*
664  * Report lun processing
665  */
666 #define	FCP_LUN_ADDRESSING		0x80
667 #define	FCP_PD_ADDRESSING		0x00
668 #define	FCP_VOLUME_ADDRESSING		0x40
669 
670 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
671 #define	MAX_INT_DMA			0x7fffffff
672 /*
673  * Property definitions
674  */
675 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
676 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
677 #define	TARGET_PROP	(char *)fcp_target_prop
678 #define	LUN_PROP	(char *)fcp_lun_prop
679 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
680 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
681 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
682 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
683 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
684 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
685 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
686 /*
687  * Short hand macros.
688  */
689 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
690 #define	LUN_TGT		(plun->lun_tgt)
691 
692 /*
693  * Driver private macros
694  */
695 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
696 			((x) >= 'a' && (x) <= 'f') ?			\
697 			((x) - 'a' + 10) : ((x) - 'A' + 10))
698 
699 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
700 
701 #define	FCP_N_NDI_EVENTS						\
702 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
703 
704 #define	FCP_LINK_STATE_CHANGED(p, c)			\
705 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
706 
707 #define	FCP_TGT_STATE_CHANGED(t, c)			\
708 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
709 
710 #define	FCP_STATE_CHANGED(p, t, c)		\
711 	(FCP_TGT_STATE_CHANGED(t, c))
712 
713 #define	FCP_MUST_RETRY(fpkt)				\
714 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
715 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
716 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
717 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
718 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
719 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
720 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
721 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
722 
723 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
724 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
725 	(es)->es_add_code == 0x3f &&		\
726 	(es)->es_qual_code == 0x0e)
727 
728 #define	FCP_SENSE_NO_LUN(es)			\
729 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
730 	(es)->es_add_code == 0x25 &&		\
731 	(es)->es_qual_code == 0x0)
732 
733 #define	FCP_VERSION		"20091208-1.192"
734 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
735 
736 #define	FCP_NUM_ELEMENTS(array)			\
737 	(sizeof (array) / sizeof ((array)[0]))
738 
739 /*
740  * Debugging, Error reporting, and tracing
741  */
742 #define	FCP_LOG_SIZE		1024 * 1024
743 
744 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
745 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
746 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
747 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
748 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
749 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
750 #define	FCP_LEVEL_7		0x00040
751 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
752 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
753 
754 
755 
756 /*
757  * Log contents to system messages file
758  */
759 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
760 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
761 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
762 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
763 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
764 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
765 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
766 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
767 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
768 
769 
770 /*
771  * Log contents to trace buffer
772  */
773 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
774 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
775 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
776 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
777 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
778 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
779 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
780 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
781 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
782 
783 
784 /*
785  * Log contents to both system messages file and trace buffer
786  */
787 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
788 				FC_TRACE_LOG_MSG)
789 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
790 				FC_TRACE_LOG_MSG)
791 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
792 				FC_TRACE_LOG_MSG)
793 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
794 				FC_TRACE_LOG_MSG)
795 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
796 				FC_TRACE_LOG_MSG)
797 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
798 				FC_TRACE_LOG_MSG)
799 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
800 				FC_TRACE_LOG_MSG)
801 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
802 				FC_TRACE_LOG_MSG)
803 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
804 				FC_TRACE_LOG_MSG)
805 #ifdef DEBUG
806 #define	FCP_DTRACE	fc_trace_debug
807 #else
808 #define	FCP_DTRACE
809 #endif
810 
811 #define	FCP_TRACE	fc_trace_debug
812 
813 static struct cb_ops fcp_cb_ops = {
814 	fcp_open,			/* open */
815 	fcp_close,			/* close */
816 	nodev,				/* strategy */
817 	nodev,				/* print */
818 	nodev,				/* dump */
819 	nodev,				/* read */
820 	nodev,				/* write */
821 	fcp_ioctl,			/* ioctl */
822 	nodev,				/* devmap */
823 	nodev,				/* mmap */
824 	nodev,				/* segmap */
825 	nochpoll,			/* chpoll */
826 	ddi_prop_op,			/* cb_prop_op */
827 	0,				/* streamtab */
828 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
829 	CB_REV,				/* rev */
830 	nodev,				/* aread */
831 	nodev				/* awrite */
832 };
833 
834 
835 static struct dev_ops fcp_ops = {
836 	DEVO_REV,
837 	0,
838 	ddi_getinfo_1to1,
839 	nulldev,		/* identify */
840 	nulldev,		/* probe */
841 	fcp_attach,		/* attach and detach are mandatory */
842 	fcp_detach,
843 	nodev,			/* reset */
844 	&fcp_cb_ops,		/* cb_ops */
845 	NULL,			/* bus_ops */
846 	NULL,			/* power */
847 };
848 
849 
850 char *fcp_version = FCP_NAME_VERSION;
851 
852 static struct modldrv modldrv = {
853 	&mod_driverops,
854 	FCP_NAME_VERSION,
855 	&fcp_ops
856 };
857 
858 
859 static struct modlinkage modlinkage = {
860 	MODREV_1,
861 	&modldrv,
862 	NULL
863 };
864 
865 
866 static fc_ulp_modinfo_t fcp_modinfo = {
867 	&fcp_modinfo,			/* ulp_handle */
868 	FCTL_ULP_MODREV_4,		/* ulp_rev */
869 	FC4_SCSI_FCP,			/* ulp_type */
870 	"fcp",				/* ulp_name */
871 	FCP_STATEC_MASK,		/* ulp_statec_mask */
872 	fcp_port_attach,		/* ulp_port_attach */
873 	fcp_port_detach,		/* ulp_port_detach */
874 	fcp_port_ioctl,			/* ulp_port_ioctl */
875 	fcp_els_callback,		/* ulp_els_callback */
876 	fcp_data_callback,		/* ulp_data_callback */
877 	fcp_statec_callback		/* ulp_statec_callback */
878 };
879 
880 #ifdef	DEBUG
881 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
882 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
883 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
884 				FCP_LEVEL_6 | FCP_LEVEL_7)
885 #else
886 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
887 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
888 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
889 				FCP_LEVEL_6 | FCP_LEVEL_7)
890 #endif
891 
892 /* FCP global variables */
893 int			fcp_bus_config_debug = 0;
894 static int		fcp_log_size = FCP_LOG_SIZE;
895 static int		fcp_trace = FCP_TRACE_DEFAULT;
896 static fc_trace_logq_t	*fcp_logq = NULL;
897 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
898 /*
899  * The auto-configuration is set by default.  The only way of disabling it is
900  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
901  */
902 static int		fcp_enable_auto_configuration = 1;
903 static int		fcp_max_bus_config_retries	= 4;
904 static int		fcp_lun_ready_retry = 300;
905 /*
906  * The value assigned to the following variable has changed several times due
907  * to a problem with the data underruns reporting of some firmware(s).	The
908  * current value of 50 gives a timeout value of 25 seconds for a max number
909  * of 256 LUNs.
910  */
911 static int		fcp_max_target_retries = 50;
912 /*
913  * Watchdog variables
914  * ------------------
915  *
916  * fcp_watchdog_init
917  *
918  *	Indicates if the watchdog timer is running or not.  This is actually
919  *	a counter of the number of Fibre Channel ports that attached.  When
920  *	the first port attaches the watchdog is started.  When the last port
921  *	detaches the watchdog timer is stopped.
922  *
923  * fcp_watchdog_time
924  *
925  *	This is the watchdog clock counter.  It is incremented by
926  *	fcp_watchdog_time each time the watchdog timer expires.
927  *
928  * fcp_watchdog_timeout
929  *
930  *	Increment value of the variable fcp_watchdog_time as well as the
931  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
932  *	is strange that this is not a #define	but a variable since the code
933  *	never changes this value.  The reason why it can be said that the
934  *	unit is 1 second is because the number of ticks for the watchdog
935  *	timer is determined like this:
936  *
937  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
938  *				  drv_usectohz(1000000);
939  *
940  *	The value 1000000 is hard coded in the code.
941  *
942  * fcp_watchdog_tick
943  *
944  *	Watchdog timer value in ticks.
945  */
946 static int		fcp_watchdog_init = 0;
947 static int		fcp_watchdog_time = 0;
948 static int		fcp_watchdog_timeout = 1;
949 static int		fcp_watchdog_tick;
950 
951 /*
952  * fcp_offline_delay is a global variable to enable customisation of
953  * the timeout on link offlines or RSCNs. The default value is set
954  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
955  * specified in FCP4 Chapter 11 (see www.t10.org).
956  *
957  * The variable fcp_offline_delay is specified in SECONDS.
958  *
959  * If we made this a static var then the user would not be able to
960  * change it. This variable is set in fcp_attach().
961  */
962 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
963 
964 static void		*fcp_softstate = NULL; /* for soft state */
965 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
966 static kmutex_t		fcp_global_mutex;
967 static kmutex_t		fcp_ioctl_mutex;
968 static dev_info_t	*fcp_global_dip = NULL;
969 static timeout_id_t	fcp_watchdog_id;
970 const char		*fcp_lun_prop = "lun";
971 const char		*fcp_sam_lun_prop = "sam-lun";
972 const char		*fcp_target_prop = "target";
973 /*
974  * NOTE: consumers of "node-wwn" property include stmsboot in ON
975  * consolidation.
976  */
977 const char		*fcp_node_wwn_prop = "node-wwn";
978 const char		*fcp_port_wwn_prop = "port-wwn";
979 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
980 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
981 const char		*fcp_manual_config_only = "manual_configuration_only";
982 const char		*fcp_init_port_prop = "initiator-port";
983 const char		*fcp_tgt_port_prop = "target-port";
984 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
985 
986 static struct fcp_port	*fcp_port_head = NULL;
987 static ddi_eventcookie_t	fcp_insert_eid;
988 static ddi_eventcookie_t	fcp_remove_eid;
989 
990 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
991 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
992 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
993 };
994 
995 /*
996  * List of valid commands for the scsi_ioctl call
997  */
998 static uint8_t scsi_ioctl_list[] = {
999 	SCMD_INQUIRY,
1000 	SCMD_REPORT_LUN,
1001 	SCMD_READ_CAPACITY
1002 };
1003 
1004 /*
1005  * this is used to dummy up a report lun response for cases
1006  * where the target doesn't support it
1007  */
1008 static uchar_t fcp_dummy_lun[] = {
1009 	0x00,		/* MSB length (length = no of luns * 8) */
1010 	0x00,
1011 	0x00,
1012 	0x08,		/* LSB length */
1013 	0x00,		/* MSB reserved */
1014 	0x00,
1015 	0x00,
1016 	0x00,		/* LSB reserved */
1017 	FCP_PD_ADDRESSING,
1018 	0x00,		/* LUN is ZERO at the first level */
1019 	0x00,
1020 	0x00,		/* second level is zero */
1021 	0x00,
1022 	0x00,		/* third level is zero */
1023 	0x00,
1024 	0x00		/* fourth level is zero */
1025 };
1026 
1027 static uchar_t fcp_alpa_to_switch[] = {
1028 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1029 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1030 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1031 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1032 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1033 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1034 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1035 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1036 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1037 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1038 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1039 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1040 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1041 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1042 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1043 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1044 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1045 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1046 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1047 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1048 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1049 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1050 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1051 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1052 };
1053 
1054 static caddr_t pid = "SESS01	      ";
1055 
1056 #if	!defined(lint)
1057 
1058 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1059     fcp_port::fcp_next fcp_watchdog_id))
1060 
1061 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1062 
1063 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1064     fcp_insert_eid
1065     fcp_remove_eid
1066     fcp_watchdog_time))
1067 
1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1069     fcp_cb_ops
1070     fcp_ops
1071     callb_cpr))
1072 
1073 #endif /* lint */
1074 
1075 /*
1076  * This table is used to determine whether or not it's safe to copy in
1077  * the target node name for a lun.  Since all luns behind the same target
1078  * have the same wwnn, only tagets that do not support multiple luns are
1079  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1080  */
1081 
1082 char *fcp_symmetric_disk_table[] = {
1083 	"SEAGATE ST",
1084 	"IBM	 DDYFT",
1085 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1086 	"SUN	 SENA",		/* SES device */
1087 	"SUN	 SESS01"	/* VICOM SVE box */
1088 };
1089 
1090 int fcp_symmetric_disk_table_size =
1091 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1092 
1093 /*
1094  * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1095  * will panic if you don't pass this in to the routine, this information.
1096  * Need to determine what the actual impact to the system is by providing
1097  * this information if any. Since dma allocation is done in pkt_init it may
1098  * not have any impact. These values are straight from the Writing Device
1099  * Driver manual.
1100  */
1101 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1102 	DMA_ATTR_V0,	/* ddi_dma_attr version */
1103 	0,		/* low address */
1104 	0xffffffff,	/* high address */
1105 	0x00ffffff,	/* counter upper bound */
1106 	1,		/* alignment requirements */
1107 	0x3f,		/* burst sizes */
1108 	1,		/* minimum DMA access */
1109 	0xffffffff,	/* maximum DMA access */
1110 	(1 << 24) - 1,	/* segment boundary restrictions */
1111 	1,		/* scater/gather list length */
1112 	512,		/* device granularity */
1113 	0		/* DMA flags */
1114 };
1115 
1116 /*
1117  * The _init(9e) return value should be that of mod_install(9f). Under
1118  * some circumstances, a failure may not be related mod_install(9f) and
1119  * one would then require a return value to indicate the failure. Looking
1120  * at mod_install(9f), it is expected to return 0 for success and non-zero
1121  * for failure. mod_install(9f) for device drivers, further goes down the
1122  * calling chain and ends up in ddi_installdrv(), whose return values are
1123  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1124  * calling chain of mod_install(9f) which return values like EINVAL and
1125  * in some even return -1.
1126  *
1127  * To work around the vagaries of the mod_install() calling chain, return
1128  * either 0 or ENODEV depending on the success or failure of mod_install()
1129  */
1130 int
1131 _init(void)
1132 {
1133 	int rval;
1134 
1135 	/*
1136 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1137 	 * before registering with the transport first.
1138 	 */
1139 	if (ddi_soft_state_init(&fcp_softstate,
1140 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1141 		return (EINVAL);
1142 	}
1143 
1144 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1145 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1146 
1147 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1148 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1149 		mutex_destroy(&fcp_global_mutex);
1150 		mutex_destroy(&fcp_ioctl_mutex);
1151 		ddi_soft_state_fini(&fcp_softstate);
1152 		return (ENODEV);
1153 	}
1154 
1155 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1156 
1157 	if ((rval = mod_install(&modlinkage)) != 0) {
1158 		fc_trace_free_logq(fcp_logq);
1159 		(void) fc_ulp_remove(&fcp_modinfo);
1160 		mutex_destroy(&fcp_global_mutex);
1161 		mutex_destroy(&fcp_ioctl_mutex);
1162 		ddi_soft_state_fini(&fcp_softstate);
1163 		rval = ENODEV;
1164 	}
1165 
1166 	return (rval);
1167 }
1168 
1169 
1170 /*
1171  * the system is done with us as a driver, so clean up
1172  */
1173 int
1174 _fini(void)
1175 {
1176 	int rval;
1177 
1178 	/*
1179 	 * don't start cleaning up until we know that the module remove
1180 	 * has worked  -- if this works, then we know that each instance
1181 	 * has successfully been DDI_DETACHed
1182 	 */
1183 	if ((rval = mod_remove(&modlinkage)) != 0) {
1184 		return (rval);
1185 	}
1186 
1187 	(void) fc_ulp_remove(&fcp_modinfo);
1188 
1189 	ddi_soft_state_fini(&fcp_softstate);
1190 	mutex_destroy(&fcp_global_mutex);
1191 	mutex_destroy(&fcp_ioctl_mutex);
1192 	fc_trace_free_logq(fcp_logq);
1193 
1194 	return (rval);
1195 }
1196 
1197 
1198 int
1199 _info(struct modinfo *modinfop)
1200 {
1201 	return (mod_info(&modlinkage, modinfop));
1202 }
1203 
1204 
1205 /*
1206  * attach the module
1207  */
1208 static int
1209 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1210 {
1211 	int rval = DDI_SUCCESS;
1212 
1213 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1214 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1215 
1216 	if (cmd == DDI_ATTACH) {
1217 		/* The FCP pseudo device is created here. */
1218 		mutex_enter(&fcp_global_mutex);
1219 		fcp_global_dip = devi;
1220 		mutex_exit(&fcp_global_mutex);
1221 
1222 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1223 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1224 			ddi_report_dev(fcp_global_dip);
1225 		} else {
1226 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1227 			mutex_enter(&fcp_global_mutex);
1228 			fcp_global_dip = NULL;
1229 			mutex_exit(&fcp_global_mutex);
1230 
1231 			rval = DDI_FAILURE;
1232 		}
1233 		/*
1234 		 * We check the fcp_offline_delay property at this
1235 		 * point. This variable is global for the driver,
1236 		 * not specific to an instance.
1237 		 *
1238 		 * We do not recommend setting the value to less
1239 		 * than 10 seconds (RA_TOV_els), or greater than
1240 		 * 60 seconds.
1241 		 */
1242 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1243 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1244 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1245 		if ((fcp_offline_delay < 10) ||
1246 		    (fcp_offline_delay > 60)) {
1247 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1248 			    "to %d second(s). This is outside the "
1249 			    "recommended range of 10..60 seconds.",
1250 			    fcp_offline_delay);
1251 		}
1252 	}
1253 
1254 	return (rval);
1255 }
1256 
1257 
1258 /*ARGSUSED*/
1259 static int
1260 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1261 {
1262 	int	res = DDI_SUCCESS;
1263 
1264 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1265 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1266 
1267 	if (cmd == DDI_DETACH) {
1268 		/*
1269 		 * Check if there are active ports/threads. If there
1270 		 * are any, we will fail, else we will succeed (there
1271 		 * should not be much to clean up)
1272 		 */
1273 		mutex_enter(&fcp_global_mutex);
1274 		FCP_DTRACE(fcp_logq, "fcp",
1275 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1276 		    (void *) fcp_port_head);
1277 
1278 		if (fcp_port_head == NULL) {
1279 			ddi_remove_minor_node(fcp_global_dip, NULL);
1280 			fcp_global_dip = NULL;
1281 			mutex_exit(&fcp_global_mutex);
1282 		} else {
1283 			mutex_exit(&fcp_global_mutex);
1284 			res = DDI_FAILURE;
1285 		}
1286 	}
1287 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1288 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1289 
1290 	return (res);
1291 }
1292 
1293 
1294 /* ARGSUSED */
1295 static int
1296 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1297 {
1298 	if (otype != OTYP_CHR) {
1299 		return (EINVAL);
1300 	}
1301 
1302 	/*
1303 	 * Allow only root to talk;
1304 	 */
1305 	if (drv_priv(credp)) {
1306 		return (EPERM);
1307 	}
1308 
1309 	mutex_enter(&fcp_global_mutex);
1310 	if (fcp_oflag & FCP_EXCL) {
1311 		mutex_exit(&fcp_global_mutex);
1312 		return (EBUSY);
1313 	}
1314 
1315 	if (flag & FEXCL) {
1316 		if (fcp_oflag & FCP_OPEN) {
1317 			mutex_exit(&fcp_global_mutex);
1318 			return (EBUSY);
1319 		}
1320 		fcp_oflag |= FCP_EXCL;
1321 	}
1322 	fcp_oflag |= FCP_OPEN;
1323 	mutex_exit(&fcp_global_mutex);
1324 
1325 	return (0);
1326 }
1327 
1328 
1329 /* ARGSUSED */
1330 static int
1331 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1332 {
1333 	if (otype != OTYP_CHR) {
1334 		return (EINVAL);
1335 	}
1336 
1337 	mutex_enter(&fcp_global_mutex);
1338 	if (!(fcp_oflag & FCP_OPEN)) {
1339 		mutex_exit(&fcp_global_mutex);
1340 		return (ENODEV);
1341 	}
1342 	fcp_oflag = FCP_IDLE;
1343 	mutex_exit(&fcp_global_mutex);
1344 
1345 	return (0);
1346 }
1347 
1348 
1349 /*
1350  * fcp_ioctl
1351  *	Entry point for the FCP ioctls
1352  *
1353  * Input:
1354  *	See ioctl(9E)
1355  *
1356  * Output:
1357  *	See ioctl(9E)
1358  *
1359  * Returns:
1360  *	See ioctl(9E)
1361  *
1362  * Context:
1363  *	Kernel context.
1364  */
1365 /* ARGSUSED */
1366 static int
1367 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1368     int *rval)
1369 {
1370 	int			ret = 0;
1371 
1372 	mutex_enter(&fcp_global_mutex);
1373 	if (!(fcp_oflag & FCP_OPEN)) {
1374 		mutex_exit(&fcp_global_mutex);
1375 		return (ENXIO);
1376 	}
1377 	mutex_exit(&fcp_global_mutex);
1378 
1379 	switch (cmd) {
1380 	case FCP_TGT_INQUIRY:
1381 	case FCP_TGT_CREATE:
1382 	case FCP_TGT_DELETE:
1383 		ret = fcp_setup_device_data_ioctl(cmd,
1384 		    (struct fcp_ioctl *)data, mode, rval);
1385 		break;
1386 
1387 	case FCP_TGT_SEND_SCSI:
1388 		mutex_enter(&fcp_ioctl_mutex);
1389 		ret = fcp_setup_scsi_ioctl(
1390 		    (struct fcp_scsi_cmd *)data, mode, rval);
1391 		mutex_exit(&fcp_ioctl_mutex);
1392 		break;
1393 
1394 	case FCP_STATE_COUNT:
1395 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1396 		    mode, rval);
1397 		break;
1398 	case FCP_GET_TARGET_MAPPINGS:
1399 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1400 		    mode, rval);
1401 		break;
1402 	default:
1403 		fcp_log(CE_WARN, NULL,
1404 		    "!Invalid ioctl opcode = 0x%x", cmd);
1405 		ret	= EINVAL;
1406 	}
1407 
1408 	return (ret);
1409 }
1410 
1411 
1412 /*
1413  * fcp_setup_device_data_ioctl
1414  *	Setup handler for the "device data" style of
1415  *	ioctl for FCP.	See "fcp_util.h" for data structure
1416  *	definition.
1417  *
1418  * Input:
1419  *	cmd	= FCP ioctl command
1420  *	data	= ioctl data
1421  *	mode	= See ioctl(9E)
1422  *
1423  * Output:
1424  *	data	= ioctl data
1425  *	rval	= return value - see ioctl(9E)
1426  *
1427  * Returns:
1428  *	See ioctl(9E)
1429  *
1430  * Context:
1431  *	Kernel context.
1432  */
1433 /* ARGSUSED */
1434 static int
1435 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1436     int *rval)
1437 {
1438 	struct fcp_port	*pptr;
1439 	struct	device_data	*dev_data;
1440 	uint32_t		link_cnt;
1441 	la_wwn_t		*wwn_ptr = NULL;
1442 	struct fcp_tgt		*ptgt = NULL;
1443 	struct fcp_lun		*plun = NULL;
1444 	int			i, error;
1445 	struct fcp_ioctl	fioctl;
1446 
1447 #ifdef	_MULTI_DATAMODEL
1448 	switch (ddi_model_convert_from(mode & FMODELS)) {
1449 	case DDI_MODEL_ILP32: {
1450 		struct fcp32_ioctl f32_ioctl;
1451 
1452 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1453 		    sizeof (struct fcp32_ioctl), mode)) {
1454 			return (EFAULT);
1455 		}
1456 		fioctl.fp_minor = f32_ioctl.fp_minor;
1457 		fioctl.listlen = f32_ioctl.listlen;
1458 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1459 		break;
1460 	}
1461 	case DDI_MODEL_NONE:
1462 		if (ddi_copyin((void *)data, (void *)&fioctl,
1463 		    sizeof (struct fcp_ioctl), mode)) {
1464 			return (EFAULT);
1465 		}
1466 		break;
1467 	}
1468 
1469 #else	/* _MULTI_DATAMODEL */
1470 	if (ddi_copyin((void *)data, (void *)&fioctl,
1471 	    sizeof (struct fcp_ioctl), mode)) {
1472 		return (EFAULT);
1473 	}
1474 #endif	/* _MULTI_DATAMODEL */
1475 
1476 	/*
1477 	 * Right now we can assume that the minor number matches with
1478 	 * this instance of fp. If this changes we will need to
1479 	 * revisit this logic.
1480 	 */
1481 	mutex_enter(&fcp_global_mutex);
1482 	pptr = fcp_port_head;
1483 	while (pptr) {
1484 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1485 			break;
1486 		} else {
1487 			pptr = pptr->port_next;
1488 		}
1489 	}
1490 	mutex_exit(&fcp_global_mutex);
1491 	if (pptr == NULL) {
1492 		return (ENXIO);
1493 	}
1494 	mutex_enter(&pptr->port_mutex);
1495 
1496 
1497 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1498 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1499 		mutex_exit(&pptr->port_mutex);
1500 		return (ENOMEM);
1501 	}
1502 
1503 	if (ddi_copyin(fioctl.list, dev_data,
1504 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1505 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1506 		mutex_exit(&pptr->port_mutex);
1507 		return (EFAULT);
1508 	}
1509 	link_cnt = pptr->port_link_cnt;
1510 
1511 	if (cmd == FCP_TGT_INQUIRY) {
1512 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1513 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1514 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1515 			/* This ioctl is requesting INQ info of local HBA */
1516 			mutex_exit(&pptr->port_mutex);
1517 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1518 			dev_data[0].dev_status = 0;
1519 			if (ddi_copyout(dev_data, fioctl.list,
1520 			    (sizeof (struct device_data)) * fioctl.listlen,
1521 			    mode)) {
1522 				kmem_free(dev_data,
1523 				    sizeof (*dev_data) * fioctl.listlen);
1524 				return (EFAULT);
1525 			}
1526 			kmem_free(dev_data,
1527 			    sizeof (*dev_data) * fioctl.listlen);
1528 #ifdef	_MULTI_DATAMODEL
1529 			switch (ddi_model_convert_from(mode & FMODELS)) {
1530 			case DDI_MODEL_ILP32: {
1531 				struct fcp32_ioctl f32_ioctl;
1532 				f32_ioctl.fp_minor = fioctl.fp_minor;
1533 				f32_ioctl.listlen = fioctl.listlen;
1534 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1535 				if (ddi_copyout((void *)&f32_ioctl,
1536 				    (void *)data,
1537 				    sizeof (struct fcp32_ioctl), mode)) {
1538 					return (EFAULT);
1539 				}
1540 				break;
1541 			}
1542 			case DDI_MODEL_NONE:
1543 				if (ddi_copyout((void *)&fioctl, (void *)data,
1544 				    sizeof (struct fcp_ioctl), mode)) {
1545 					return (EFAULT);
1546 				}
1547 				break;
1548 			}
1549 #else	/* _MULTI_DATAMODEL */
1550 			if (ddi_copyout((void *)&fioctl, (void *)data,
1551 			    sizeof (struct fcp_ioctl), mode)) {
1552 				return (EFAULT);
1553 			}
1554 #endif	/* _MULTI_DATAMODEL */
1555 			return (0);
1556 		}
1557 	}
1558 
1559 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1560 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1561 		mutex_exit(&pptr->port_mutex);
1562 		return (ENXIO);
1563 	}
1564 
1565 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1566 	    i++) {
1567 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1568 
1569 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1570 
1571 
1572 		dev_data[i].dev_status = ENXIO;
1573 
1574 		if ((ptgt = fcp_lookup_target(pptr,
1575 		    (uchar_t *)wwn_ptr)) == NULL) {
1576 			mutex_exit(&pptr->port_mutex);
1577 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1578 			    wwn_ptr, &error, 0) == NULL) {
1579 				dev_data[i].dev_status = ENODEV;
1580 				mutex_enter(&pptr->port_mutex);
1581 				continue;
1582 			} else {
1583 
1584 				dev_data[i].dev_status = EAGAIN;
1585 
1586 				mutex_enter(&pptr->port_mutex);
1587 				continue;
1588 			}
1589 		} else {
1590 			mutex_enter(&ptgt->tgt_mutex);
1591 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1592 			    FCP_TGT_BUSY)) {
1593 				dev_data[i].dev_status = EAGAIN;
1594 				mutex_exit(&ptgt->tgt_mutex);
1595 				continue;
1596 			}
1597 
1598 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1599 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1600 					dev_data[i].dev_status = ENOTSUP;
1601 				} else {
1602 					dev_data[i].dev_status = ENXIO;
1603 				}
1604 				mutex_exit(&ptgt->tgt_mutex);
1605 				continue;
1606 			}
1607 
1608 			switch (cmd) {
1609 			case FCP_TGT_INQUIRY:
1610 				/*
1611 				 * The reason we give device type of
1612 				 * lun 0 only even though in some
1613 				 * cases(like maxstrat) lun 0 device
1614 				 * type may be 0x3f(invalid) is that
1615 				 * for bridge boxes target will appear
1616 				 * as luns and the first lun could be
1617 				 * a device that utility may not care
1618 				 * about (like a tape device).
1619 				 */
1620 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1621 				dev_data[i].dev_status = 0;
1622 				mutex_exit(&ptgt->tgt_mutex);
1623 
1624 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1625 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1626 				} else {
1627 					dev_data[i].dev0_type = plun->lun_type;
1628 				}
1629 				mutex_enter(&ptgt->tgt_mutex);
1630 				break;
1631 
1632 			case FCP_TGT_CREATE:
1633 				mutex_exit(&ptgt->tgt_mutex);
1634 				mutex_exit(&pptr->port_mutex);
1635 
1636 				/*
1637 				 * serialize state change call backs.
1638 				 * only one call back will be handled
1639 				 * at a time.
1640 				 */
1641 				mutex_enter(&fcp_global_mutex);
1642 				if (fcp_oflag & FCP_BUSY) {
1643 					mutex_exit(&fcp_global_mutex);
1644 					if (dev_data) {
1645 						kmem_free(dev_data,
1646 						    sizeof (*dev_data) *
1647 						    fioctl.listlen);
1648 					}
1649 					return (EBUSY);
1650 				}
1651 				fcp_oflag |= FCP_BUSY;
1652 				mutex_exit(&fcp_global_mutex);
1653 
1654 				dev_data[i].dev_status =
1655 				    fcp_create_on_demand(pptr,
1656 				    wwn_ptr->raw_wwn);
1657 
1658 				if (dev_data[i].dev_status != 0) {
1659 					char	buf[25];
1660 
1661 					for (i = 0; i < FC_WWN_SIZE; i++) {
1662 						(void) sprintf(&buf[i << 1],
1663 						    "%02x",
1664 						    wwn_ptr->raw_wwn[i]);
1665 					}
1666 
1667 					fcp_log(CE_WARN, pptr->port_dip,
1668 					    "!Failed to create nodes for"
1669 					    " pwwn=%s; error=%x", buf,
1670 					    dev_data[i].dev_status);
1671 				}
1672 
1673 				/* allow state change call backs again */
1674 				mutex_enter(&fcp_global_mutex);
1675 				fcp_oflag &= ~FCP_BUSY;
1676 				mutex_exit(&fcp_global_mutex);
1677 
1678 				mutex_enter(&pptr->port_mutex);
1679 				mutex_enter(&ptgt->tgt_mutex);
1680 
1681 				break;
1682 
1683 			case FCP_TGT_DELETE:
1684 				break;
1685 
1686 			default:
1687 				fcp_log(CE_WARN, pptr->port_dip,
1688 				    "!Invalid device data ioctl "
1689 				    "opcode = 0x%x", cmd);
1690 			}
1691 			mutex_exit(&ptgt->tgt_mutex);
1692 		}
1693 	}
1694 	mutex_exit(&pptr->port_mutex);
1695 
1696 	if (ddi_copyout(dev_data, fioctl.list,
1697 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1698 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1699 		return (EFAULT);
1700 	}
1701 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1702 
1703 #ifdef	_MULTI_DATAMODEL
1704 	switch (ddi_model_convert_from(mode & FMODELS)) {
1705 	case DDI_MODEL_ILP32: {
1706 		struct fcp32_ioctl f32_ioctl;
1707 
1708 		f32_ioctl.fp_minor = fioctl.fp_minor;
1709 		f32_ioctl.listlen = fioctl.listlen;
1710 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1711 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1712 		    sizeof (struct fcp32_ioctl), mode)) {
1713 			return (EFAULT);
1714 		}
1715 		break;
1716 	}
1717 	case DDI_MODEL_NONE:
1718 		if (ddi_copyout((void *)&fioctl, (void *)data,
1719 		    sizeof (struct fcp_ioctl), mode)) {
1720 			return (EFAULT);
1721 		}
1722 		break;
1723 	}
1724 #else	/* _MULTI_DATAMODEL */
1725 
1726 	if (ddi_copyout((void *)&fioctl, (void *)data,
1727 	    sizeof (struct fcp_ioctl), mode)) {
1728 		return (EFAULT);
1729 	}
1730 #endif	/* _MULTI_DATAMODEL */
1731 
1732 	return (0);
1733 }
1734 
1735 /*
1736  * Fetch the target mappings (path, etc.) for all LUNs
1737  * on this port.
1738  */
1739 /* ARGSUSED */
1740 static int
1741 fcp_get_target_mappings(struct fcp_ioctl *data,
1742     int mode, int *rval)
1743 {
1744 	struct fcp_port	    *pptr;
1745 	fc_hba_target_mappings_t    *mappings;
1746 	fc_hba_mapping_entry_t	    *map;
1747 	struct fcp_tgt	    *ptgt = NULL;
1748 	struct fcp_lun	    *plun = NULL;
1749 	int			    i, mapIndex, mappingSize;
1750 	int			    listlen;
1751 	struct fcp_ioctl	    fioctl;
1752 	char			    *path;
1753 	fcp_ent_addr_t		    sam_lun_addr;
1754 
1755 #ifdef	_MULTI_DATAMODEL
1756 	switch (ddi_model_convert_from(mode & FMODELS)) {
1757 	case DDI_MODEL_ILP32: {
1758 		struct fcp32_ioctl f32_ioctl;
1759 
1760 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1761 		    sizeof (struct fcp32_ioctl), mode)) {
1762 			return (EFAULT);
1763 		}
1764 		fioctl.fp_minor = f32_ioctl.fp_minor;
1765 		fioctl.listlen = f32_ioctl.listlen;
1766 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1767 		break;
1768 	}
1769 	case DDI_MODEL_NONE:
1770 		if (ddi_copyin((void *)data, (void *)&fioctl,
1771 		    sizeof (struct fcp_ioctl), mode)) {
1772 			return (EFAULT);
1773 		}
1774 		break;
1775 	}
1776 
1777 #else	/* _MULTI_DATAMODEL */
1778 	if (ddi_copyin((void *)data, (void *)&fioctl,
1779 	    sizeof (struct fcp_ioctl), mode)) {
1780 		return (EFAULT);
1781 	}
1782 #endif	/* _MULTI_DATAMODEL */
1783 
1784 	/*
1785 	 * Right now we can assume that the minor number matches with
1786 	 * this instance of fp. If this changes we will need to
1787 	 * revisit this logic.
1788 	 */
1789 	mutex_enter(&fcp_global_mutex);
1790 	pptr = fcp_port_head;
1791 	while (pptr) {
1792 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1793 			break;
1794 		} else {
1795 			pptr = pptr->port_next;
1796 		}
1797 	}
1798 	mutex_exit(&fcp_global_mutex);
1799 	if (pptr == NULL) {
1800 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1801 		    fioctl.fp_minor);
1802 		return (ENXIO);
1803 	}
1804 
1805 
1806 	/* We use listlen to show the total buffer size */
1807 	mappingSize = fioctl.listlen;
1808 
1809 	/* Now calculate how many mapping entries will fit */
1810 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1811 	    - sizeof (fc_hba_target_mappings_t);
1812 	if (listlen <= 0) {
1813 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1814 		return (ENXIO);
1815 	}
1816 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1817 
1818 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1819 		return (ENOMEM);
1820 	}
1821 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1822 
1823 	/* Now get to work */
1824 	mapIndex = 0;
1825 
1826 	mutex_enter(&pptr->port_mutex);
1827 	/* Loop through all targets on this port */
1828 	for (i = 0; i < FCP_NUM_HASH; i++) {
1829 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1830 		    ptgt = ptgt->tgt_next) {
1831 
1832 
1833 			/* Loop through all LUNs on this target */
1834 			for (plun = ptgt->tgt_lun; plun != NULL;
1835 			    plun = plun->lun_next) {
1836 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 					continue;
1838 				}
1839 
1840 				path = fcp_get_lun_path(plun);
1841 				if (path == NULL) {
1842 					continue;
1843 				}
1844 
1845 				if (mapIndex >= listlen) {
1846 					mapIndex ++;
1847 					kmem_free(path, MAXPATHLEN);
1848 					continue;
1849 				}
1850 				map = &mappings->entries[mapIndex++];
1851 				bcopy(path, map->targetDriver,
1852 				    sizeof (map->targetDriver));
1853 				map->d_id = ptgt->tgt_d_id;
1854 				map->busNumber = 0;
1855 				map->targetNumber = ptgt->tgt_d_id;
1856 				map->osLUN = plun->lun_num;
1857 
1858 				/*
1859 				 * We had swapped lun when we stored it in
1860 				 * lun_addr. We need to swap it back before
1861 				 * returning it to user land
1862 				 */
1863 
1864 				sam_lun_addr.ent_addr_0 =
1865 				    BE_16(plun->lun_addr.ent_addr_0);
1866 				sam_lun_addr.ent_addr_1 =
1867 				    BE_16(plun->lun_addr.ent_addr_1);
1868 				sam_lun_addr.ent_addr_2 =
1869 				    BE_16(plun->lun_addr.ent_addr_2);
1870 				sam_lun_addr.ent_addr_3 =
1871 				    BE_16(plun->lun_addr.ent_addr_3);
1872 
1873 				bcopy(&sam_lun_addr, &map->samLUN,
1874 				    FCP_LUN_SIZE);
1875 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 
1880 				if (plun->lun_guid) {
1881 
1882 					/* convert ascii wwn to bytes */
1883 					fcp_ascii_to_wwn(plun->lun_guid,
1884 					    map->guid, sizeof (map->guid));
1885 
1886 					if ((sizeof (map->guid)) <
1887 					    plun->lun_guid_size / 2) {
1888 						cmn_err(CE_WARN,
1889 						    "fcp_get_target_mappings:"
1890 						    "guid copy space "
1891 						    "insufficient."
1892 						    "Copy Truncation - "
1893 						    "available %d; need %d",
1894 						    (int)sizeof (map->guid),
1895 						    (int)
1896 						    plun->lun_guid_size / 2);
1897 					}
1898 				}
1899 				kmem_free(path, MAXPATHLEN);
1900 			}
1901 		}
1902 	}
1903 	mutex_exit(&pptr->port_mutex);
1904 	mappings->numLuns = mapIndex;
1905 
1906 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1907 		kmem_free(mappings, mappingSize);
1908 		return (EFAULT);
1909 	}
1910 	kmem_free(mappings, mappingSize);
1911 
1912 #ifdef	_MULTI_DATAMODEL
1913 	switch (ddi_model_convert_from(mode & FMODELS)) {
1914 	case DDI_MODEL_ILP32: {
1915 		struct fcp32_ioctl f32_ioctl;
1916 
1917 		f32_ioctl.fp_minor = fioctl.fp_minor;
1918 		f32_ioctl.listlen = fioctl.listlen;
1919 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1920 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1921 		    sizeof (struct fcp32_ioctl), mode)) {
1922 			return (EFAULT);
1923 		}
1924 		break;
1925 	}
1926 	case DDI_MODEL_NONE:
1927 		if (ddi_copyout((void *)&fioctl, (void *)data,
1928 		    sizeof (struct fcp_ioctl), mode)) {
1929 			return (EFAULT);
1930 		}
1931 		break;
1932 	}
1933 #else	/* _MULTI_DATAMODEL */
1934 
1935 	if (ddi_copyout((void *)&fioctl, (void *)data,
1936 	    sizeof (struct fcp_ioctl), mode)) {
1937 		return (EFAULT);
1938 	}
1939 #endif	/* _MULTI_DATAMODEL */
1940 
1941 	return (0);
1942 }
1943 
1944 /*
1945  * fcp_setup_scsi_ioctl
1946  *	Setup handler for the "scsi passthru" style of
1947  *	ioctl for FCP.	See "fcp_util.h" for data structure
1948  *	definition.
1949  *
1950  * Input:
1951  *	u_fscsi	= ioctl data (user address space)
1952  *	mode	= See ioctl(9E)
1953  *
1954  * Output:
1955  *	u_fscsi	= ioctl data (user address space)
1956  *	rval	= return value - see ioctl(9E)
1957  *
1958  * Returns:
1959  *	0	= OK
1960  *	EAGAIN	= See errno.h
1961  *	EBUSY	= See errno.h
1962  *	EFAULT	= See errno.h
1963  *	EINTR	= See errno.h
1964  *	EINVAL	= See errno.h
1965  *	EIO	= See errno.h
1966  *	ENOMEM	= See errno.h
1967  *	ENXIO	= See errno.h
1968  *
1969  * Context:
1970  *	Kernel context.
1971  */
1972 /* ARGSUSED */
1973 static int
1974 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1975     int mode, int *rval)
1976 {
1977 	int			ret		= 0;
1978 	int			temp_ret;
1979 	caddr_t			k_cdbbufaddr	= NULL;
1980 	caddr_t			k_bufaddr	= NULL;
1981 	caddr_t			k_rqbufaddr	= NULL;
1982 	caddr_t			u_cdbbufaddr;
1983 	caddr_t			u_bufaddr;
1984 	caddr_t			u_rqbufaddr;
1985 	struct fcp_scsi_cmd	k_fscsi;
1986 
1987 	/*
1988 	 * Get fcp_scsi_cmd array element from user address space
1989 	 */
1990 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1991 	    != 0) {
1992 		return (ret);
1993 	}
1994 
1995 
1996 	/*
1997 	 * Even though kmem_alloc() checks the validity of the
1998 	 * buffer length, this check is needed when the
1999 	 * kmem_flags set and the zero buffer length is passed.
2000 	 */
2001 	if ((k_fscsi.scsi_cdblen <= 0) ||
2002 	    (k_fscsi.scsi_buflen <= 0) ||
2003 	    (k_fscsi.scsi_rqlen <= 0)) {
2004 		return (EINVAL);
2005 	}
2006 
2007 	/*
2008 	 * Allocate data for fcp_scsi_cmd pointer fields
2009 	 */
2010 	if (ret == 0) {
2011 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2012 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2013 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
2014 
2015 		if (k_cdbbufaddr == NULL ||
2016 		    k_bufaddr	 == NULL ||
2017 		    k_rqbufaddr	 == NULL) {
2018 			ret = ENOMEM;
2019 		}
2020 	}
2021 
2022 	/*
2023 	 * Get fcp_scsi_cmd pointer fields from user
2024 	 * address space
2025 	 */
2026 	if (ret == 0) {
2027 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2028 		u_bufaddr    = k_fscsi.scsi_bufaddr;
2029 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
2030 
2031 		if (ddi_copyin(u_cdbbufaddr,
2032 		    k_cdbbufaddr,
2033 		    k_fscsi.scsi_cdblen,
2034 		    mode)) {
2035 			ret = EFAULT;
2036 		} else if (ddi_copyin(u_bufaddr,
2037 		    k_bufaddr,
2038 		    k_fscsi.scsi_buflen,
2039 		    mode)) {
2040 			ret = EFAULT;
2041 		} else if (ddi_copyin(u_rqbufaddr,
2042 		    k_rqbufaddr,
2043 		    k_fscsi.scsi_rqlen,
2044 		    mode)) {
2045 			ret = EFAULT;
2046 		}
2047 	}
2048 
2049 	/*
2050 	 * Send scsi command (blocking)
2051 	 */
2052 	if (ret == 0) {
2053 		/*
2054 		 * Prior to sending the scsi command, the
2055 		 * fcp_scsi_cmd data structure must contain kernel,
2056 		 * not user, addresses.
2057 		 */
2058 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2059 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2060 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2061 
2062 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2063 
2064 		/*
2065 		 * After sending the scsi command, the
2066 		 * fcp_scsi_cmd data structure must contain user,
2067 		 * not kernel, addresses.
2068 		 */
2069 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2070 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2071 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2072 	}
2073 
2074 	/*
2075 	 * Put fcp_scsi_cmd pointer fields to user address space
2076 	 */
2077 	if (ret == 0) {
2078 		if (ddi_copyout(k_cdbbufaddr,
2079 		    u_cdbbufaddr,
2080 		    k_fscsi.scsi_cdblen,
2081 		    mode)) {
2082 			ret = EFAULT;
2083 		} else if (ddi_copyout(k_bufaddr,
2084 		    u_bufaddr,
2085 		    k_fscsi.scsi_buflen,
2086 		    mode)) {
2087 			ret = EFAULT;
2088 		} else if (ddi_copyout(k_rqbufaddr,
2089 		    u_rqbufaddr,
2090 		    k_fscsi.scsi_rqlen,
2091 		    mode)) {
2092 			ret = EFAULT;
2093 		}
2094 	}
2095 
2096 	/*
2097 	 * Free data for fcp_scsi_cmd pointer fields
2098 	 */
2099 	if (k_cdbbufaddr != NULL) {
2100 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2101 	}
2102 	if (k_bufaddr != NULL) {
2103 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2104 	}
2105 	if (k_rqbufaddr != NULL) {
2106 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2107 	}
2108 
2109 	/*
2110 	 * Put fcp_scsi_cmd array element to user address space
2111 	 */
2112 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2113 	if (temp_ret != 0) {
2114 		ret = temp_ret;
2115 	}
2116 
2117 	/*
2118 	 * Return status
2119 	 */
2120 	return (ret);
2121 }
2122 
2123 
2124 /*
2125  * fcp_copyin_scsi_cmd
2126  *	Copy in fcp_scsi_cmd data structure from user address space.
2127  *	The data may be in 32 bit or 64 bit modes.
2128  *
2129  * Input:
2130  *	base_addr	= from address (user address space)
2131  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2132  *
2133  * Output:
2134  *	fscsi		= to address (kernel address space)
2135  *
2136  * Returns:
2137  *	0	= OK
2138  *	EFAULT	= Error
2139  *
2140  * Context:
2141  *	Kernel context.
2142  */
2143 static int
2144 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2145 {
2146 #ifdef	_MULTI_DATAMODEL
2147 	struct fcp32_scsi_cmd	f32scsi;
2148 
2149 	switch (ddi_model_convert_from(mode & FMODELS)) {
2150 	case DDI_MODEL_ILP32:
2151 		/*
2152 		 * Copy data from user address space
2153 		 */
2154 		if (ddi_copyin((void *)base_addr,
2155 		    &f32scsi,
2156 		    sizeof (struct fcp32_scsi_cmd),
2157 		    mode)) {
2158 			return (EFAULT);
2159 		}
2160 		/*
2161 		 * Convert from 32 bit to 64 bit
2162 		 */
2163 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2164 		break;
2165 	case DDI_MODEL_NONE:
2166 		/*
2167 		 * Copy data from user address space
2168 		 */
2169 		if (ddi_copyin((void *)base_addr,
2170 		    fscsi,
2171 		    sizeof (struct fcp_scsi_cmd),
2172 		    mode)) {
2173 			return (EFAULT);
2174 		}
2175 		break;
2176 	}
2177 #else	/* _MULTI_DATAMODEL */
2178 	/*
2179 	 * Copy data from user address space
2180 	 */
2181 	if (ddi_copyin((void *)base_addr,
2182 	    fscsi,
2183 	    sizeof (struct fcp_scsi_cmd),
2184 	    mode)) {
2185 		return (EFAULT);
2186 	}
2187 #endif	/* _MULTI_DATAMODEL */
2188 
2189 	return (0);
2190 }
2191 
2192 
2193 /*
2194  * fcp_copyout_scsi_cmd
2195  *	Copy out fcp_scsi_cmd data structure to user address space.
2196  *	The data may be in 32 bit or 64 bit modes.
2197  *
2198  * Input:
2199  *	fscsi		= to address (kernel address space)
2200  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2201  *
2202  * Output:
2203  *	base_addr	= from address (user address space)
2204  *
2205  * Returns:
2206  *	0	= OK
2207  *	EFAULT	= Error
2208  *
2209  * Context:
2210  *	Kernel context.
2211  */
2212 static int
2213 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2214 {
2215 #ifdef	_MULTI_DATAMODEL
2216 	struct fcp32_scsi_cmd	f32scsi;
2217 
2218 	switch (ddi_model_convert_from(mode & FMODELS)) {
2219 	case DDI_MODEL_ILP32:
2220 		/*
2221 		 * Convert from 64 bit to 32 bit
2222 		 */
2223 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2224 		/*
2225 		 * Copy data to user address space
2226 		 */
2227 		if (ddi_copyout(&f32scsi,
2228 		    (void *)base_addr,
2229 		    sizeof (struct fcp32_scsi_cmd),
2230 		    mode)) {
2231 			return (EFAULT);
2232 		}
2233 		break;
2234 	case DDI_MODEL_NONE:
2235 		/*
2236 		 * Copy data to user address space
2237 		 */
2238 		if (ddi_copyout(fscsi,
2239 		    (void *)base_addr,
2240 		    sizeof (struct fcp_scsi_cmd),
2241 		    mode)) {
2242 			return (EFAULT);
2243 		}
2244 		break;
2245 	}
2246 #else	/* _MULTI_DATAMODEL */
2247 	/*
2248 	 * Copy data to user address space
2249 	 */
2250 	if (ddi_copyout(fscsi,
2251 	    (void *)base_addr,
2252 	    sizeof (struct fcp_scsi_cmd),
2253 	    mode)) {
2254 		return (EFAULT);
2255 	}
2256 #endif	/* _MULTI_DATAMODEL */
2257 
2258 	return (0);
2259 }
2260 
2261 
2262 /*
2263  * fcp_send_scsi_ioctl
2264  *	Sends the SCSI command in blocking mode.
2265  *
2266  * Input:
2267  *	fscsi		= SCSI command data structure
2268  *
2269  * Output:
2270  *	fscsi		= SCSI command data structure
2271  *
2272  * Returns:
2273  *	0	= OK
2274  *	EAGAIN	= See errno.h
2275  *	EBUSY	= See errno.h
2276  *	EINTR	= See errno.h
2277  *	EINVAL	= See errno.h
2278  *	EIO	= See errno.h
2279  *	ENOMEM	= See errno.h
2280  *	ENXIO	= See errno.h
2281  *
2282  * Context:
2283  *	Kernel context.
2284  */
2285 static int
2286 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2287 {
2288 	struct fcp_lun	*plun		= NULL;
2289 	struct fcp_port	*pptr		= NULL;
2290 	struct fcp_tgt	*ptgt		= NULL;
2291 	fc_packet_t		*fpkt		= NULL;
2292 	struct fcp_ipkt	*icmd		= NULL;
2293 	int			target_created	= FALSE;
2294 	fc_frame_hdr_t		*hp;
2295 	struct fcp_cmd		fcp_cmd;
2296 	struct fcp_cmd		*fcmd;
2297 	union scsi_cdb		*scsi_cdb;
2298 	la_wwn_t		*wwn_ptr;
2299 	int			nodma;
2300 	struct fcp_rsp		*rsp;
2301 	struct fcp_rsp_info	*rsp_info;
2302 	caddr_t			rsp_sense;
2303 	int			buf_len;
2304 	int			info_len;
2305 	int			sense_len;
2306 	struct scsi_extended_sense	*sense_to = NULL;
2307 	timeout_id_t		tid;
2308 	uint8_t			reconfig_lun = FALSE;
2309 	uint8_t			reconfig_pending = FALSE;
2310 	uint8_t			scsi_cmd;
2311 	int			rsp_len;
2312 	int			cmd_index;
2313 	int			fc_status;
2314 	int			pkt_state;
2315 	int			pkt_action;
2316 	int			pkt_reason;
2317 	int			ret, xport_retval = ~FC_SUCCESS;
2318 	int			lcount;
2319 	int			tcount;
2320 	int			reconfig_status;
2321 	int			port_busy = FALSE;
2322 	uchar_t			*lun_string;
2323 
2324 	/*
2325 	 * Check valid SCSI command
2326 	 */
2327 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2328 	ret = EINVAL;
2329 	for (cmd_index = 0;
2330 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2331 	    ret != 0;
2332 	    cmd_index++) {
2333 		/*
2334 		 * First byte of CDB is the SCSI command
2335 		 */
2336 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2337 			ret = 0;
2338 		}
2339 	}
2340 
2341 	/*
2342 	 * Check inputs
2343 	 */
2344 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2345 		ret = EINVAL;
2346 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2347 		/* no larger than */
2348 		ret = EINVAL;
2349 	}
2350 
2351 
2352 	/*
2353 	 * Find FC port
2354 	 */
2355 	if (ret == 0) {
2356 		/*
2357 		 * Acquire global mutex
2358 		 */
2359 		mutex_enter(&fcp_global_mutex);
2360 
2361 		pptr = fcp_port_head;
2362 		while (pptr) {
2363 			if (pptr->port_instance ==
2364 			    (uint32_t)fscsi->scsi_fc_port_num) {
2365 				break;
2366 			} else {
2367 				pptr = pptr->port_next;
2368 			}
2369 		}
2370 
2371 		if (pptr == NULL) {
2372 			ret = ENXIO;
2373 		} else {
2374 			/*
2375 			 * fc_ulp_busy_port can raise power
2376 			 *  so, we must not hold any mutexes involved in PM
2377 			 */
2378 			mutex_exit(&fcp_global_mutex);
2379 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2380 		}
2381 
2382 		if (ret == 0) {
2383 
2384 			/* remember port is busy, so we will release later */
2385 			port_busy = TRUE;
2386 
2387 			/*
2388 			 * If there is a reconfiguration in progress, wait
2389 			 * for it to complete.
2390 			 */
2391 
2392 			fcp_reconfig_wait(pptr);
2393 
2394 			/* reacquire mutexes in order */
2395 			mutex_enter(&fcp_global_mutex);
2396 			mutex_enter(&pptr->port_mutex);
2397 
2398 			/*
2399 			 * Will port accept DMA?
2400 			 */
2401 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2402 			    ? 1 : 0;
2403 
2404 			/*
2405 			 * If init or offline, device not known
2406 			 *
2407 			 * If we are discovering (onlining), we can
2408 			 * NOT obviously provide reliable data about
2409 			 * devices until it is complete
2410 			 */
2411 			if (pptr->port_state &	  (FCP_STATE_INIT |
2412 			    FCP_STATE_OFFLINE)) {
2413 				ret = ENXIO;
2414 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2415 				ret = EBUSY;
2416 			} else {
2417 				/*
2418 				 * Find target from pwwn
2419 				 *
2420 				 * The wwn must be put into a local
2421 				 * variable to ensure alignment.
2422 				 */
2423 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2424 				ptgt = fcp_lookup_target(pptr,
2425 				    (uchar_t *)wwn_ptr);
2426 
2427 				/*
2428 				 * If target not found,
2429 				 */
2430 				if (ptgt == NULL) {
2431 					/*
2432 					 * Note: Still have global &
2433 					 * port mutexes
2434 					 */
2435 					mutex_exit(&pptr->port_mutex);
2436 					ptgt = fcp_port_create_tgt(pptr,
2437 					    wwn_ptr, &ret, &fc_status,
2438 					    &pkt_state, &pkt_action,
2439 					    &pkt_reason);
2440 					mutex_enter(&pptr->port_mutex);
2441 
2442 					fscsi->scsi_fc_status  = fc_status;
2443 					fscsi->scsi_pkt_state  =
2444 					    (uchar_t)pkt_state;
2445 					fscsi->scsi_pkt_reason = pkt_reason;
2446 					fscsi->scsi_pkt_action =
2447 					    (uchar_t)pkt_action;
2448 
2449 					if (ptgt != NULL) {
2450 						target_created = TRUE;
2451 					} else if (ret == 0) {
2452 						ret = ENOMEM;
2453 					}
2454 				}
2455 
2456 				if (ret == 0) {
2457 					/*
2458 					 * Acquire target
2459 					 */
2460 					mutex_enter(&ptgt->tgt_mutex);
2461 
2462 					/*
2463 					 * If target is mark or busy,
2464 					 * then target can not be used
2465 					 */
2466 					if (ptgt->tgt_state &
2467 					    (FCP_TGT_MARK |
2468 					    FCP_TGT_BUSY)) {
2469 						ret = EBUSY;
2470 					} else {
2471 						/*
2472 						 * Mark target as busy
2473 						 */
2474 						ptgt->tgt_state |=
2475 						    FCP_TGT_BUSY;
2476 					}
2477 
2478 					/*
2479 					 * Release target
2480 					 */
2481 					lcount = pptr->port_link_cnt;
2482 					tcount = ptgt->tgt_change_cnt;
2483 					mutex_exit(&ptgt->tgt_mutex);
2484 				}
2485 			}
2486 
2487 			/*
2488 			 * Release port
2489 			 */
2490 			mutex_exit(&pptr->port_mutex);
2491 		}
2492 
2493 		/*
2494 		 * Release global mutex
2495 		 */
2496 		mutex_exit(&fcp_global_mutex);
2497 	}
2498 
2499 	if (ret == 0) {
2500 		uint64_t belun = BE_64(fscsi->scsi_lun);
2501 
2502 		/*
2503 		 * If it's a target device, find lun from pwwn
2504 		 * The wwn must be put into a local
2505 		 * variable to ensure alignment.
2506 		 */
2507 		mutex_enter(&pptr->port_mutex);
2508 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2509 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2510 			/* this is not a target */
2511 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2512 			ret = ENXIO;
2513 		} else if ((belun << 16) != 0) {
2514 			/*
2515 			 * Since fcp only support PD and LU addressing method
2516 			 * so far, the last 6 bytes of a valid LUN are expected
2517 			 * to be filled with 00h.
2518 			 */
2519 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2520 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2521 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2522 			    (uint8_t)(belun >> 62), belun);
2523 			ret = ENXIO;
2524 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2525 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2526 			/*
2527 			 * This is a SCSI target, but no LUN at this
2528 			 * address.
2529 			 *
2530 			 * In the future, we may want to send this to
2531 			 * the target, and let it respond
2532 			 * appropriately
2533 			 */
2534 			ret = ENXIO;
2535 		}
2536 		mutex_exit(&pptr->port_mutex);
2537 	}
2538 
2539 	/*
2540 	 * Finished grabbing external resources
2541 	 * Allocate internal packet (icmd)
2542 	 */
2543 	if (ret == 0) {
2544 		/*
2545 		 * Calc rsp len assuming rsp info included
2546 		 */
2547 		rsp_len = sizeof (struct fcp_rsp) +
2548 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2549 
2550 		icmd = fcp_icmd_alloc(pptr, ptgt,
2551 		    sizeof (struct fcp_cmd),
2552 		    rsp_len,
2553 		    fscsi->scsi_buflen,
2554 		    nodma,
2555 		    lcount,			/* ipkt_link_cnt */
2556 		    tcount,			/* ipkt_change_cnt */
2557 		    0,				/* cause */
2558 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2559 
2560 		if (icmd == NULL) {
2561 			ret = ENOMEM;
2562 		} else {
2563 			/*
2564 			 * Setup internal packet as sema sync
2565 			 */
2566 			fcp_ipkt_sema_init(icmd);
2567 		}
2568 	}
2569 
2570 	if (ret == 0) {
2571 		/*
2572 		 * Init fpkt pointer for use.
2573 		 */
2574 
2575 		fpkt = icmd->ipkt_fpkt;
2576 
2577 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2578 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2579 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2580 
2581 		/*
2582 		 * Init fcmd pointer for use by SCSI command
2583 		 */
2584 
2585 		if (nodma) {
2586 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2587 		} else {
2588 			fcmd = &fcp_cmd;
2589 		}
2590 		bzero(fcmd, sizeof (struct fcp_cmd));
2591 		ptgt = plun->lun_tgt;
2592 
2593 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2594 
2595 		fcmd->fcp_ent_addr.ent_addr_0 =
2596 		    BE_16(*(uint16_t *)&(lun_string[0]));
2597 		fcmd->fcp_ent_addr.ent_addr_1 =
2598 		    BE_16(*(uint16_t *)&(lun_string[2]));
2599 		fcmd->fcp_ent_addr.ent_addr_2 =
2600 		    BE_16(*(uint16_t *)&(lun_string[4]));
2601 		fcmd->fcp_ent_addr.ent_addr_3 =
2602 		    BE_16(*(uint16_t *)&(lun_string[6]));
2603 
2604 		/*
2605 		 * Setup internal packet(icmd)
2606 		 */
2607 		icmd->ipkt_lun		= plun;
2608 		icmd->ipkt_restart	= 0;
2609 		icmd->ipkt_retries	= 0;
2610 		icmd->ipkt_opcode	= 0;
2611 
2612 		/*
2613 		 * Init the frame HEADER Pointer for use
2614 		 */
2615 		hp = &fpkt->pkt_cmd_fhdr;
2616 
2617 		hp->s_id	= pptr->port_id;
2618 		hp->d_id	= ptgt->tgt_d_id;
2619 		hp->r_ctl	= R_CTL_COMMAND;
2620 		hp->type	= FC_TYPE_SCSI_FCP;
2621 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2622 		hp->rsvd	= 0;
2623 		hp->seq_id	= 0;
2624 		hp->seq_cnt	= 0;
2625 		hp->ox_id	= 0xffff;
2626 		hp->rx_id	= 0xffff;
2627 		hp->ro		= 0;
2628 
2629 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2630 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2631 		fcmd->fcp_cntl.cntl_write_data	= 0;
2632 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2633 
2634 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2635 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2636 		    fscsi->scsi_cdblen);
2637 
2638 		if (!nodma) {
2639 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2640 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2641 		}
2642 
2643 		/*
2644 		 * Send SCSI command to FC transport
2645 		 */
2646 
2647 		if (ret == 0) {
2648 			mutex_enter(&ptgt->tgt_mutex);
2649 
2650 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2651 				mutex_exit(&ptgt->tgt_mutex);
2652 				fscsi->scsi_fc_status = xport_retval =
2653 				    fc_ulp_transport(pptr->port_fp_handle,
2654 				    fpkt);
2655 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2656 					ret = EIO;
2657 				}
2658 			} else {
2659 				mutex_exit(&ptgt->tgt_mutex);
2660 				ret = EBUSY;
2661 			}
2662 		}
2663 	}
2664 
2665 	/*
2666 	 * Wait for completion only if fc_ulp_transport was called and it
2667 	 * returned a success. This is the only time callback will happen.
2668 	 * Otherwise, there is no point in waiting
2669 	 */
2670 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2671 		ret = fcp_ipkt_sema_wait(icmd);
2672 	}
2673 
2674 	/*
2675 	 * Copy data to IOCTL data structures
2676 	 */
2677 	rsp = NULL;
2678 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2679 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2680 
2681 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2682 			fcp_log(CE_WARN, pptr->port_dip,
2683 			    "!SCSI command to d_id=0x%x lun=0x%x"
2684 			    " failed, Bad FCP response values:"
2685 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2686 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2687 			    ptgt->tgt_d_id, plun->lun_num,
2688 			    rsp->reserved_0, rsp->reserved_1,
2689 			    rsp->fcp_u.fcp_status.reserved_0,
2690 			    rsp->fcp_u.fcp_status.reserved_1,
2691 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2692 
2693 			ret = EIO;
2694 		}
2695 	}
2696 
2697 	if ((ret == 0) && (rsp != NULL)) {
2698 		/*
2699 		 * Calc response lengths
2700 		 */
2701 		sense_len = 0;
2702 		info_len = 0;
2703 
2704 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2705 			info_len = rsp->fcp_response_len;
2706 		}
2707 
2708 		rsp_info   = (struct fcp_rsp_info *)
2709 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2710 
2711 		/*
2712 		 * Get SCSI status
2713 		 */
2714 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2715 		/*
2716 		 * If a lun was just added or removed and the next command
2717 		 * comes through this interface, we need to capture the check
2718 		 * condition so we can discover the new topology.
2719 		 */
2720 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2721 		    rsp->fcp_u.fcp_status.sense_len_set) {
2722 			sense_len = rsp->fcp_sense_len;
2723 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2724 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2725 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2726 			    (FCP_SENSE_NO_LUN(sense_to))) {
2727 				reconfig_lun = TRUE;
2728 			}
2729 		}
2730 
2731 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2732 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2733 			if (reconfig_lun == FALSE) {
2734 				reconfig_status =
2735 				    fcp_is_reconfig_needed(ptgt, fpkt);
2736 			}
2737 
2738 			if ((reconfig_lun == TRUE) ||
2739 			    (reconfig_status == TRUE)) {
2740 				mutex_enter(&ptgt->tgt_mutex);
2741 				if (ptgt->tgt_tid == NULL) {
2742 					/*
2743 					 * Either we've been notified the
2744 					 * REPORT_LUN data has changed, or
2745 					 * we've determined on our own that
2746 					 * we're out of date.  Kick off
2747 					 * rediscovery.
2748 					 */
2749 					tid = timeout(fcp_reconfigure_luns,
2750 					    (caddr_t)ptgt, drv_usectohz(1));
2751 
2752 					ptgt->tgt_tid = tid;
2753 					ptgt->tgt_state |= FCP_TGT_BUSY;
2754 					ret = EBUSY;
2755 					reconfig_pending = TRUE;
2756 				}
2757 				mutex_exit(&ptgt->tgt_mutex);
2758 			}
2759 		}
2760 
2761 		/*
2762 		 * Calc residuals and buffer lengths
2763 		 */
2764 
2765 		if (ret == 0) {
2766 			buf_len = fscsi->scsi_buflen;
2767 			fscsi->scsi_bufresid	= 0;
2768 			if (rsp->fcp_u.fcp_status.resid_under) {
2769 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2770 					fscsi->scsi_bufresid = rsp->fcp_resid;
2771 				} else {
2772 					cmn_err(CE_WARN, "fcp: bad residue %x "
2773 					    "for txfer len %x", rsp->fcp_resid,
2774 					    fscsi->scsi_buflen);
2775 					fscsi->scsi_bufresid =
2776 					    fscsi->scsi_buflen;
2777 				}
2778 				buf_len -= fscsi->scsi_bufresid;
2779 			}
2780 			if (rsp->fcp_u.fcp_status.resid_over) {
2781 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2782 			}
2783 
2784 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2785 			if (fscsi->scsi_rqlen < sense_len) {
2786 				sense_len = fscsi->scsi_rqlen;
2787 			}
2788 
2789 			fscsi->scsi_fc_rspcode	= 0;
2790 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2791 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2792 			}
2793 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2794 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2795 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2796 
2797 			/*
2798 			 * Copy data and request sense
2799 			 *
2800 			 * Data must be copied by using the FCP_CP_IN macro.
2801 			 * This will ensure the proper byte order since the data
2802 			 * is being copied directly from the memory mapped
2803 			 * device register.
2804 			 *
2805 			 * The response (and request sense) will be in the
2806 			 * correct byte order.	No special copy is necessary.
2807 			 */
2808 
2809 			if (buf_len) {
2810 				FCP_CP_IN(fpkt->pkt_data,
2811 				    fscsi->scsi_bufaddr,
2812 				    fpkt->pkt_data_acc,
2813 				    buf_len);
2814 			}
2815 			bcopy((void *)rsp_sense,
2816 			    (void *)fscsi->scsi_rqbufaddr,
2817 			    sense_len);
2818 		}
2819 	}
2820 
2821 	/*
2822 	 * Cleanup transport data structures if icmd was alloc-ed
2823 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2824 	 */
2825 	if (icmd != NULL) {
2826 		fcp_ipkt_sema_cleanup(icmd);
2827 	}
2828 
2829 	/* restore pm busy/idle status */
2830 	if (port_busy) {
2831 		fc_ulp_idle_port(pptr->port_fp_handle);
2832 	}
2833 
2834 	/*
2835 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2836 	 * flag, it'll be cleared when the reconfig is complete.
2837 	 */
2838 	if ((ptgt != NULL) && !reconfig_pending) {
2839 		/*
2840 		 * If target was created,
2841 		 */
2842 		if (target_created) {
2843 			mutex_enter(&ptgt->tgt_mutex);
2844 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2845 			mutex_exit(&ptgt->tgt_mutex);
2846 		} else {
2847 			/*
2848 			 * De-mark target as busy
2849 			 */
2850 			mutex_enter(&ptgt->tgt_mutex);
2851 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2852 			mutex_exit(&ptgt->tgt_mutex);
2853 		}
2854 	}
2855 	return (ret);
2856 }
2857 
2858 
2859 static int
2860 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2861     fc_packet_t	*fpkt)
2862 {
2863 	uchar_t			*lun_string;
2864 	uint16_t		lun_num, i;
2865 	int			num_luns;
2866 	int			actual_luns;
2867 	int			num_masked_luns;
2868 	int			lun_buflen;
2869 	struct fcp_lun	*plun	= NULL;
2870 	struct fcp_reportlun_resp	*report_lun;
2871 	uint8_t			reconfig_needed = FALSE;
2872 	uint8_t			lun_exists = FALSE;
2873 	fcp_port_t			*pptr		 = ptgt->tgt_port;
2874 
2875 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2876 
2877 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2878 	    fpkt->pkt_datalen);
2879 
2880 	/* get number of luns (which is supplied as LUNS * 8) */
2881 	num_luns = BE_32(report_lun->num_lun) >> 3;
2882 
2883 	/*
2884 	 * Figure out exactly how many lun strings our response buffer
2885 	 * can hold.
2886 	 */
2887 	lun_buflen = (fpkt->pkt_datalen -
2888 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2889 
2890 	/*
2891 	 * Is our response buffer full or not? We don't want to
2892 	 * potentially walk beyond the number of luns we have.
2893 	 */
2894 	if (num_luns <= lun_buflen) {
2895 		actual_luns = num_luns;
2896 	} else {
2897 		actual_luns = lun_buflen;
2898 	}
2899 
2900 	mutex_enter(&ptgt->tgt_mutex);
2901 
2902 	/* Scan each lun to see if we have masked it. */
2903 	num_masked_luns = 0;
2904 	if (fcp_lun_blacklist != NULL) {
2905 		for (i = 0; i < actual_luns; i++) {
2906 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2907 			switch (lun_string[0] & 0xC0) {
2908 			case FCP_LUN_ADDRESSING:
2909 			case FCP_PD_ADDRESSING:
2910 			case FCP_VOLUME_ADDRESSING:
2911 				lun_num = ((lun_string[0] & 0x3F) << 8)
2912 				    | lun_string[1];
2913 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2914 				    lun_num) == TRUE) {
2915 					num_masked_luns++;
2916 				}
2917 				break;
2918 			default:
2919 				break;
2920 			}
2921 		}
2922 	}
2923 
2924 	/*
2925 	 * The quick and easy check.  If the number of LUNs reported
2926 	 * doesn't match the number we currently know about, we need
2927 	 * to reconfigure.
2928 	 */
2929 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2930 		mutex_exit(&ptgt->tgt_mutex);
2931 		kmem_free(report_lun, fpkt->pkt_datalen);
2932 		return (TRUE);
2933 	}
2934 
2935 	/*
2936 	 * If the quick and easy check doesn't turn up anything, we walk
2937 	 * the list of luns from the REPORT_LUN response and look for
2938 	 * any luns we don't know about.  If we find one, we know we need
2939 	 * to reconfigure. We will skip LUNs that are masked because of the
2940 	 * blacklist.
2941 	 */
2942 	for (i = 0; i < actual_luns; i++) {
2943 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2944 		lun_exists = FALSE;
2945 		switch (lun_string[0] & 0xC0) {
2946 		case FCP_LUN_ADDRESSING:
2947 		case FCP_PD_ADDRESSING:
2948 		case FCP_VOLUME_ADDRESSING:
2949 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2950 
2951 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2952 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2953 				lun_exists = TRUE;
2954 				break;
2955 			}
2956 
2957 			for (plun = ptgt->tgt_lun; plun;
2958 			    plun = plun->lun_next) {
2959 				if (plun->lun_num == lun_num) {
2960 					lun_exists = TRUE;
2961 					break;
2962 				}
2963 			}
2964 			break;
2965 		default:
2966 			break;
2967 		}
2968 
2969 		if (lun_exists == FALSE) {
2970 			reconfig_needed = TRUE;
2971 			break;
2972 		}
2973 	}
2974 
2975 	mutex_exit(&ptgt->tgt_mutex);
2976 	kmem_free(report_lun, fpkt->pkt_datalen);
2977 
2978 	return (reconfig_needed);
2979 }
2980 
2981 /*
2982  * This function is called by fcp_handle_page83 and uses inquiry response data
2983  * stored in plun->lun_inq to determine whether or not a device is a member of
2984  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2985  * otherwise 1.
2986  */
2987 static int
2988 fcp_symmetric_device_probe(struct fcp_lun *plun)
2989 {
2990 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2991 	char			*devidptr;
2992 	int			i, len;
2993 
2994 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2995 		devidptr = fcp_symmetric_disk_table[i];
2996 		len = (int)strlen(devidptr);
2997 
2998 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
2999 			return (0);
3000 		}
3001 	}
3002 	return (1);
3003 }
3004 
3005 
3006 /*
3007  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3008  * It basically returns the current count of # of state change callbacks
3009  * i.e the value of tgt_change_cnt.
3010  *
3011  * INPUT:
3012  *   fcp_ioctl.fp_minor -> The minor # of the fp port
3013  *   fcp_ioctl.listlen	-> 1
3014  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
3015  */
3016 /*ARGSUSED2*/
3017 static int
3018 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3019 {
3020 	int			ret;
3021 	uint32_t		link_cnt;
3022 	struct fcp_ioctl	fioctl;
3023 	struct fcp_port	*pptr = NULL;
3024 
3025 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3026 	    &pptr)) != 0) {
3027 		return (ret);
3028 	}
3029 
3030 	ASSERT(pptr != NULL);
3031 
3032 	if (fioctl.listlen != 1) {
3033 		return (EINVAL);
3034 	}
3035 
3036 	mutex_enter(&pptr->port_mutex);
3037 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3038 		mutex_exit(&pptr->port_mutex);
3039 		return (ENXIO);
3040 	}
3041 
3042 	/*
3043 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3044 	 * When the fcp initially attaches to the port and there are nothing
3045 	 * hanging out of the port or if there was a repeat offline state change
3046 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3047 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3048 	 * will differentiate the 2 cases.
3049 	 */
3050 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3051 		mutex_exit(&pptr->port_mutex);
3052 		return (ENXIO);
3053 	}
3054 
3055 	link_cnt = pptr->port_link_cnt;
3056 	mutex_exit(&pptr->port_mutex);
3057 
3058 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3059 		return (EFAULT);
3060 	}
3061 
3062 #ifdef	_MULTI_DATAMODEL
3063 	switch (ddi_model_convert_from(mode & FMODELS)) {
3064 	case DDI_MODEL_ILP32: {
3065 		struct fcp32_ioctl f32_ioctl;
3066 
3067 		f32_ioctl.fp_minor = fioctl.fp_minor;
3068 		f32_ioctl.listlen = fioctl.listlen;
3069 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3070 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3071 		    sizeof (struct fcp32_ioctl), mode)) {
3072 			return (EFAULT);
3073 		}
3074 		break;
3075 	}
3076 	case DDI_MODEL_NONE:
3077 		if (ddi_copyout((void *)&fioctl, (void *)data,
3078 		    sizeof (struct fcp_ioctl), mode)) {
3079 			return (EFAULT);
3080 		}
3081 		break;
3082 	}
3083 #else	/* _MULTI_DATAMODEL */
3084 
3085 	if (ddi_copyout((void *)&fioctl, (void *)data,
3086 	    sizeof (struct fcp_ioctl), mode)) {
3087 		return (EFAULT);
3088 	}
3089 #endif	/* _MULTI_DATAMODEL */
3090 
3091 	return (0);
3092 }
3093 
3094 /*
3095  * This function copies the fcp_ioctl structure passed in from user land
3096  * into kernel land. Handles 32 bit applications.
3097  */
3098 /*ARGSUSED*/
3099 static int
3100 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3101     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3102 {
3103 	struct fcp_port	*t_pptr;
3104 
3105 #ifdef	_MULTI_DATAMODEL
3106 	switch (ddi_model_convert_from(mode & FMODELS)) {
3107 	case DDI_MODEL_ILP32: {
3108 		struct fcp32_ioctl f32_ioctl;
3109 
3110 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3111 		    sizeof (struct fcp32_ioctl), mode)) {
3112 			return (EFAULT);
3113 		}
3114 		fioctl->fp_minor = f32_ioctl.fp_minor;
3115 		fioctl->listlen = f32_ioctl.listlen;
3116 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3117 		break;
3118 	}
3119 	case DDI_MODEL_NONE:
3120 		if (ddi_copyin((void *)data, (void *)fioctl,
3121 		    sizeof (struct fcp_ioctl), mode)) {
3122 			return (EFAULT);
3123 		}
3124 		break;
3125 	}
3126 
3127 #else	/* _MULTI_DATAMODEL */
3128 	if (ddi_copyin((void *)data, (void *)fioctl,
3129 	    sizeof (struct fcp_ioctl), mode)) {
3130 		return (EFAULT);
3131 	}
3132 #endif	/* _MULTI_DATAMODEL */
3133 
3134 	/*
3135 	 * Right now we can assume that the minor number matches with
3136 	 * this instance of fp. If this changes we will need to
3137 	 * revisit this logic.
3138 	 */
3139 	mutex_enter(&fcp_global_mutex);
3140 	t_pptr = fcp_port_head;
3141 	while (t_pptr) {
3142 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3143 			break;
3144 		} else {
3145 			t_pptr = t_pptr->port_next;
3146 		}
3147 	}
3148 	*pptr = t_pptr;
3149 	mutex_exit(&fcp_global_mutex);
3150 	if (t_pptr == NULL) {
3151 		return (ENXIO);
3152 	}
3153 
3154 	return (0);
3155 }
3156 
3157 /*
3158  *     Function: fcp_port_create_tgt
3159  *
3160  *  Description: As the name suggest this function creates the target context
3161  *		 specified by the the WWN provided by the caller.  If the
3162  *		 creation goes well and the target is known by fp/fctl a PLOGI
3163  *		 followed by a PRLI are issued.
3164  *
3165  *     Argument: pptr		fcp port structure
3166  *		 pwwn		WWN of the target
3167  *		 ret_val	Address of the return code.  It could be:
3168  *				EIO, ENOMEM or 0.
3169  *		 fc_status	PLOGI or PRLI status completion
3170  *		 fc_pkt_state	PLOGI or PRLI state completion
3171  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3172  *		 fc_pkt_action	PLOGI or PRLI action completion
3173  *
3174  * Return Value: NULL if it failed
3175  *		 Target structure address if it succeeds
3176  */
3177 static struct fcp_tgt *
3178 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3179     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3180 {
3181 	struct fcp_tgt	*ptgt = NULL;
3182 	fc_portmap_t		devlist;
3183 	int			lcount;
3184 	int			error;
3185 
3186 	*ret_val = 0;
3187 
3188 	/*
3189 	 * Check FC port device & get port map
3190 	 */
3191 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3192 	    &error, 1) == NULL) {
3193 		*ret_val = EIO;
3194 	} else {
3195 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3196 		    &devlist) != FC_SUCCESS) {
3197 			*ret_val = EIO;
3198 		}
3199 	}
3200 
3201 	/* Set port map flags */
3202 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3203 
3204 	/* Allocate target */
3205 	if (*ret_val == 0) {
3206 		lcount = pptr->port_link_cnt;
3207 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3208 		if (ptgt == NULL) {
3209 			fcp_log(CE_WARN, pptr->port_dip,
3210 			    "!FC target allocation failed");
3211 			*ret_val = ENOMEM;
3212 		} else {
3213 			/* Setup target */
3214 			mutex_enter(&ptgt->tgt_mutex);
3215 
3216 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3217 			ptgt->tgt_tmp_cnt	= 1;
3218 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3219 			ptgt->tgt_hard_addr	=
3220 			    devlist.map_hard_addr.hard_addr;
3221 			ptgt->tgt_pd_handle	= devlist.map_pd;
3222 			ptgt->tgt_fca_dev	= NULL;
3223 
3224 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3225 			    FC_WWN_SIZE);
3226 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3227 			    FC_WWN_SIZE);
3228 
3229 			mutex_exit(&ptgt->tgt_mutex);
3230 		}
3231 	}
3232 
3233 	/* Release global mutex for PLOGI and PRLI */
3234 	mutex_exit(&fcp_global_mutex);
3235 
3236 	/* Send PLOGI (If necessary) */
3237 	if (*ret_val == 0) {
3238 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3239 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3240 	}
3241 
3242 	/* Send PRLI (If necessary) */
3243 	if (*ret_val == 0) {
3244 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3245 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3246 	}
3247 
3248 	mutex_enter(&fcp_global_mutex);
3249 
3250 	return (ptgt);
3251 }
3252 
3253 /*
3254  *     Function: fcp_tgt_send_plogi
3255  *
3256  *  Description: This function sends a PLOGI to the target specified by the
3257  *		 caller and waits till it completes.
3258  *
3259  *     Argument: ptgt		Target to send the plogi to.
3260  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3261  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3262  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3263  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3264  *
3265  * Return Value: 0
3266  *		 ENOMEM
3267  *		 EIO
3268  *
3269  *	Context: User context.
3270  */
3271 static int
3272 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3273     int *fc_pkt_reason, int *fc_pkt_action)
3274 {
3275 	struct fcp_port	*pptr;
3276 	struct fcp_ipkt	*icmd;
3277 	struct fc_packet	*fpkt;
3278 	fc_frame_hdr_t		*hp;
3279 	struct la_els_logi	logi;
3280 	int			tcount;
3281 	int			lcount;
3282 	int			ret, login_retval = ~FC_SUCCESS;
3283 
3284 	ret = 0;
3285 
3286 	pptr = ptgt->tgt_port;
3287 
3288 	lcount = pptr->port_link_cnt;
3289 	tcount = ptgt->tgt_change_cnt;
3290 
3291 	/* Alloc internal packet */
3292 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3293 	    sizeof (la_els_logi_t), 0,
3294 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3295 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3296 
3297 	if (icmd == NULL) {
3298 		ret = ENOMEM;
3299 	} else {
3300 		/*
3301 		 * Setup internal packet as sema sync
3302 		 */
3303 		fcp_ipkt_sema_init(icmd);
3304 
3305 		/*
3306 		 * Setup internal packet (icmd)
3307 		 */
3308 		icmd->ipkt_lun		= NULL;
3309 		icmd->ipkt_restart	= 0;
3310 		icmd->ipkt_retries	= 0;
3311 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3312 
3313 		/*
3314 		 * Setup fc_packet
3315 		 */
3316 		fpkt = icmd->ipkt_fpkt;
3317 
3318 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3319 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3320 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3321 
3322 		/*
3323 		 * Setup FC frame header
3324 		 */
3325 		hp = &fpkt->pkt_cmd_fhdr;
3326 
3327 		hp->s_id	= pptr->port_id;	/* source ID */
3328 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3329 		hp->r_ctl	= R_CTL_ELS_REQ;
3330 		hp->type	= FC_TYPE_EXTENDED_LS;
3331 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3332 		hp->seq_id	= 0;
3333 		hp->rsvd	= 0;
3334 		hp->df_ctl	= 0;
3335 		hp->seq_cnt	= 0;
3336 		hp->ox_id	= 0xffff;		/* i.e. none */
3337 		hp->rx_id	= 0xffff;		/* i.e. none */
3338 		hp->ro		= 0;
3339 
3340 		/*
3341 		 * Setup PLOGI
3342 		 */
3343 		bzero(&logi, sizeof (struct la_els_logi));
3344 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3345 
3346 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3347 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3348 
3349 		/*
3350 		 * Send PLOGI
3351 		 */
3352 		*fc_status = login_retval =
3353 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3354 		if (*fc_status != FC_SUCCESS) {
3355 			ret = EIO;
3356 		}
3357 	}
3358 
3359 	/*
3360 	 * Wait for completion
3361 	 */
3362 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3363 		ret = fcp_ipkt_sema_wait(icmd);
3364 
3365 		*fc_pkt_state	= fpkt->pkt_state;
3366 		*fc_pkt_reason	= fpkt->pkt_reason;
3367 		*fc_pkt_action	= fpkt->pkt_action;
3368 	}
3369 
3370 	/*
3371 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3372 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3373 	 * Otherwise, cleanup happens in callback routine.
3374 	 */
3375 	if (icmd != NULL) {
3376 		fcp_ipkt_sema_cleanup(icmd);
3377 	}
3378 
3379 	return (ret);
3380 }
3381 
3382 /*
3383  *     Function: fcp_tgt_send_prli
3384  *
3385  *  Description: Does nothing as of today.
3386  *
3387  *     Argument: ptgt		Target to send the prli to.
3388  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3389  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3390  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3391  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3392  *
3393  * Return Value: 0
3394  */
3395 /*ARGSUSED*/
3396 static int
3397 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3398     int *fc_pkt_reason, int *fc_pkt_action)
3399 {
3400 	return (0);
3401 }
3402 
3403 /*
3404  *     Function: fcp_ipkt_sema_init
3405  *
3406  *  Description: Initializes the semaphore contained in the internal packet.
3407  *
3408  *     Argument: icmd	Internal packet the semaphore of which must be
3409  *			initialized.
3410  *
3411  * Return Value: None
3412  *
3413  *	Context: User context only.
3414  */
3415 static void
3416 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3417 {
3418 	struct fc_packet	*fpkt;
3419 
3420 	fpkt = icmd->ipkt_fpkt;
3421 
3422 	/* Create semaphore for sync */
3423 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3424 
3425 	/* Setup the completion callback */
3426 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3427 }
3428 
3429 /*
3430  *     Function: fcp_ipkt_sema_wait
3431  *
3432  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3433  *		 semaphore is released in the callback.
3434  *
3435  *     Argument: icmd	Internal packet to wait on for completion.
3436  *
3437  * Return Value: 0
3438  *		 EIO
3439  *		 EBUSY
3440  *		 EAGAIN
3441  *
3442  *	Context: User context only.
3443  *
3444  * This function does a conversion between the field pkt_state of the fc_packet
3445  * embedded in the internal packet (icmd) and the code it returns.
3446  */
3447 static int
3448 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3449 {
3450 	struct fc_packet	*fpkt;
3451 	int	ret;
3452 
3453 	ret = EIO;
3454 	fpkt = icmd->ipkt_fpkt;
3455 
3456 	/*
3457 	 * Wait on semaphore
3458 	 */
3459 	sema_p(&(icmd->ipkt_sema));
3460 
3461 	/*
3462 	 * Check the status of the FC packet
3463 	 */
3464 	switch (fpkt->pkt_state) {
3465 	case FC_PKT_SUCCESS:
3466 		ret = 0;
3467 		break;
3468 	case FC_PKT_LOCAL_RJT:
3469 		switch (fpkt->pkt_reason) {
3470 		case FC_REASON_SEQ_TIMEOUT:
3471 		case FC_REASON_RX_BUF_TIMEOUT:
3472 			ret = EAGAIN;
3473 			break;
3474 		case FC_REASON_PKT_BUSY:
3475 			ret = EBUSY;
3476 			break;
3477 		}
3478 		break;
3479 	case FC_PKT_TIMEOUT:
3480 		ret = EAGAIN;
3481 		break;
3482 	case FC_PKT_LOCAL_BSY:
3483 	case FC_PKT_TRAN_BSY:
3484 	case FC_PKT_NPORT_BSY:
3485 	case FC_PKT_FABRIC_BSY:
3486 		ret = EBUSY;
3487 		break;
3488 	case FC_PKT_LS_RJT:
3489 	case FC_PKT_BA_RJT:
3490 		switch (fpkt->pkt_reason) {
3491 		case FC_REASON_LOGICAL_BSY:
3492 			ret = EBUSY;
3493 			break;
3494 		}
3495 		break;
3496 	case FC_PKT_FS_RJT:
3497 		switch (fpkt->pkt_reason) {
3498 		case FC_REASON_FS_LOGICAL_BUSY:
3499 			ret = EBUSY;
3500 			break;
3501 		}
3502 		break;
3503 	}
3504 
3505 	return (ret);
3506 }
3507 
3508 /*
3509  *     Function: fcp_ipkt_sema_callback
3510  *
3511  *  Description: Registered as the completion callback function for the FC
3512  *		 transport when the ipkt semaphore is used for sync. This will
3513  *		 cleanup the used data structures, if necessary and wake up
3514  *		 the user thread to complete the transaction.
3515  *
3516  *     Argument: fpkt	FC packet (points to the icmd)
3517  *
3518  * Return Value: None
3519  *
3520  *	Context: User context only
3521  */
3522 static void
3523 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3524 {
3525 	struct fcp_ipkt	*icmd;
3526 
3527 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3528 
3529 	/*
3530 	 * Wake up user thread
3531 	 */
3532 	sema_v(&(icmd->ipkt_sema));
3533 }
3534 
3535 /*
3536  *     Function: fcp_ipkt_sema_cleanup
3537  *
3538  *  Description: Called to cleanup (if necessary) the data structures used
3539  *		 when ipkt sema is used for sync.  This function will detect
3540  *		 whether the caller is the last thread (via counter) and
3541  *		 cleanup only if necessary.
3542  *
3543  *     Argument: icmd	Internal command packet
3544  *
3545  * Return Value: None
3546  *
3547  *	Context: User context only
3548  */
3549 static void
3550 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3551 {
3552 	struct fcp_tgt	*ptgt;
3553 	struct fcp_port	*pptr;
3554 
3555 	ptgt = icmd->ipkt_tgt;
3556 	pptr = icmd->ipkt_port;
3557 
3558 	/*
3559 	 * Acquire data structure
3560 	 */
3561 	mutex_enter(&ptgt->tgt_mutex);
3562 
3563 	/*
3564 	 * Destroy semaphore
3565 	 */
3566 	sema_destroy(&(icmd->ipkt_sema));
3567 
3568 	/*
3569 	 * Cleanup internal packet
3570 	 */
3571 	mutex_exit(&ptgt->tgt_mutex);
3572 	fcp_icmd_free(pptr, icmd);
3573 }
3574 
3575 /*
3576  *     Function: fcp_port_attach
3577  *
3578  *  Description: Called by the transport framework to resume, suspend or
3579  *		 attach a new port.
3580  *
3581  *     Argument: ulph		Port handle
3582  *		 *pinfo		Port information
3583  *		 cmd		Command
3584  *		 s_id		Port ID
3585  *
3586  * Return Value: FC_FAILURE or FC_SUCCESS
3587  */
3588 /*ARGSUSED*/
3589 static int
3590 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3591     fc_attach_cmd_t cmd, uint32_t s_id)
3592 {
3593 	int	instance;
3594 	int	res = FC_FAILURE; /* default result */
3595 
3596 	ASSERT(pinfo != NULL);
3597 
3598 	instance = ddi_get_instance(pinfo->port_dip);
3599 
3600 	switch (cmd) {
3601 	case FC_CMD_ATTACH:
3602 		/*
3603 		 * this port instance attaching for the first time (or after
3604 		 * being detached before)
3605 		 */
3606 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3607 		    instance) == DDI_SUCCESS) {
3608 			res = FC_SUCCESS;
3609 		} else {
3610 			ASSERT(ddi_get_soft_state(fcp_softstate,
3611 			    instance) == NULL);
3612 		}
3613 		break;
3614 
3615 	case FC_CMD_RESUME:
3616 	case FC_CMD_POWER_UP:
3617 		/*
3618 		 * this port instance was attached and the suspended and
3619 		 * will now be resumed
3620 		 */
3621 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3622 		    instance) == DDI_SUCCESS) {
3623 			res = FC_SUCCESS;
3624 		}
3625 		break;
3626 
3627 	default:
3628 		/* shouldn't happen */
3629 		FCP_TRACE(fcp_logq, "fcp",
3630 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3631 		    "port_attach: unknown cmdcommand: %d", cmd);
3632 		break;
3633 	}
3634 
3635 	/* return result */
3636 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3637 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3638 
3639 	return (res);
3640 }
3641 
3642 
3643 /*
3644  * detach or suspend this port instance
3645  *
3646  * acquires and releases the global mutex
3647  *
3648  * acquires and releases the mutex for this port
3649  *
3650  * acquires and releases the hotplug mutex for this port
3651  */
3652 /*ARGSUSED*/
3653 static int
3654 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3655     fc_detach_cmd_t cmd)
3656 {
3657 	int			flag;
3658 	int			instance;
3659 	struct fcp_port		*pptr;
3660 
3661 	instance = ddi_get_instance(info->port_dip);
3662 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3663 
3664 	switch (cmd) {
3665 	case FC_CMD_SUSPEND:
3666 		FCP_DTRACE(fcp_logq, "fcp",
3667 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3668 		    "port suspend called for port %d", instance);
3669 		flag = FCP_STATE_SUSPENDED;
3670 		break;
3671 
3672 	case FC_CMD_POWER_DOWN:
3673 		FCP_DTRACE(fcp_logq, "fcp",
3674 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3675 		    "port power down called for port %d", instance);
3676 		flag = FCP_STATE_POWER_DOWN;
3677 		break;
3678 
3679 	case FC_CMD_DETACH:
3680 		FCP_DTRACE(fcp_logq, "fcp",
3681 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3682 		    "port detach called for port %d", instance);
3683 		flag = FCP_STATE_DETACHING;
3684 		break;
3685 
3686 	default:
3687 		/* shouldn't happen */
3688 		return (FC_FAILURE);
3689 	}
3690 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3691 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3692 
3693 	return (fcp_handle_port_detach(pptr, flag, instance));
3694 }
3695 
3696 
3697 /*
3698  * called for ioctls on the transport's devctl interface, and the transport
3699  * has passed it to us
3700  *
3701  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3702  *
3703  * return FC_SUCCESS if we decide to claim the ioctl,
3704  * else return FC_UNCLAIMED
3705  *
3706  * *rval is set iff we decide to claim the ioctl
3707  */
3708 /*ARGSUSED*/
3709 static int
3710 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3711     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3712 {
3713 	int			retval = FC_UNCLAIMED;	/* return value */
3714 	struct fcp_port		*pptr = NULL;		/* our soft state */
3715 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3716 	dev_info_t		*cdip;
3717 	mdi_pathinfo_t		*pip = NULL;
3718 	char			*ndi_nm;		/* NDI name */
3719 	char			*ndi_addr;		/* NDI addr */
3720 	int			is_mpxio, circ;
3721 	int			devi_entered = 0;
3722 	time_t			end_time;
3723 
3724 	ASSERT(rval != NULL);
3725 
3726 	FCP_DTRACE(fcp_logq, "fcp",
3727 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3728 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3729 
3730 	/* if already claimed then forget it */
3731 	if (claimed) {
3732 		/*
3733 		 * for now, if this ioctl has already been claimed, then
3734 		 * we just ignore it
3735 		 */
3736 		return (retval);
3737 	}
3738 
3739 	/* get our port info */
3740 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3741 		fcp_log(CE_WARN, NULL,
3742 		    "!fcp:Invalid port handle handle in ioctl");
3743 		*rval = ENXIO;
3744 		return (retval);
3745 	}
3746 	is_mpxio = pptr->port_mpxio;
3747 
3748 	switch (cmd) {
3749 	case DEVCTL_BUS_GETSTATE:
3750 	case DEVCTL_BUS_QUIESCE:
3751 	case DEVCTL_BUS_UNQUIESCE:
3752 	case DEVCTL_BUS_RESET:
3753 	case DEVCTL_BUS_RESETALL:
3754 
3755 	case DEVCTL_BUS_DEV_CREATE:
3756 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3757 			return (retval);
3758 		}
3759 		break;
3760 
3761 	case DEVCTL_DEVICE_GETSTATE:
3762 	case DEVCTL_DEVICE_OFFLINE:
3763 	case DEVCTL_DEVICE_ONLINE:
3764 	case DEVCTL_DEVICE_REMOVE:
3765 	case DEVCTL_DEVICE_RESET:
3766 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3767 			return (retval);
3768 		}
3769 
3770 		ASSERT(dcp != NULL);
3771 
3772 		/* ensure we have a name and address */
3773 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3774 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3775 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3776 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3777 			    "ioctl: can't get name (%s) or addr (%s)",
3778 			    ndi_nm ? ndi_nm : "<null ptr>",
3779 			    ndi_addr ? ndi_addr : "<null ptr>");
3780 			ndi_dc_freehdl(dcp);
3781 			return (retval);
3782 		}
3783 
3784 
3785 		/* get our child's DIP */
3786 		ASSERT(pptr != NULL);
3787 		if (is_mpxio) {
3788 			mdi_devi_enter(pptr->port_dip, &circ);
3789 		} else {
3790 			ndi_devi_enter(pptr->port_dip, &circ);
3791 		}
3792 		devi_entered = 1;
3793 
3794 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3795 		    ndi_addr)) == NULL) {
3796 			/* Look for virtually enumerated devices. */
3797 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3798 			if (pip == NULL ||
3799 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3800 				*rval = ENXIO;
3801 				goto out;
3802 			}
3803 		}
3804 		break;
3805 
3806 	default:
3807 		*rval = ENOTTY;
3808 		return (retval);
3809 	}
3810 
3811 	/* this ioctl is ours -- process it */
3812 
3813 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3814 
3815 	/* we assume it will be a success; else we'll set error value */
3816 	*rval = 0;
3817 
3818 
3819 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3820 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3821 	    "ioctl: claiming this one");
3822 
3823 	/* handle ioctls now */
3824 	switch (cmd) {
3825 	case DEVCTL_DEVICE_GETSTATE:
3826 		ASSERT(cdip != NULL);
3827 		ASSERT(dcp != NULL);
3828 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3829 			*rval = EFAULT;
3830 		}
3831 		break;
3832 
3833 	case DEVCTL_DEVICE_REMOVE:
3834 	case DEVCTL_DEVICE_OFFLINE: {
3835 		int			flag = 0;
3836 		int			lcount;
3837 		int			tcount;
3838 		struct fcp_pkt	*head = NULL;
3839 		struct fcp_lun	*plun;
3840 		child_info_t		*cip = CIP(cdip);
3841 		int			all = 1;
3842 		struct fcp_lun	*tplun;
3843 		struct fcp_tgt	*ptgt;
3844 
3845 		ASSERT(pptr != NULL);
3846 		ASSERT(cdip != NULL);
3847 
3848 		mutex_enter(&pptr->port_mutex);
3849 		if (pip != NULL) {
3850 			cip = CIP(pip);
3851 		}
3852 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3853 			mutex_exit(&pptr->port_mutex);
3854 			*rval = ENXIO;
3855 			break;
3856 		}
3857 
3858 		head = fcp_scan_commands(plun);
3859 		if (head != NULL) {
3860 			fcp_abort_commands(head, LUN_PORT);
3861 		}
3862 		lcount = pptr->port_link_cnt;
3863 		tcount = plun->lun_tgt->tgt_change_cnt;
3864 		mutex_exit(&pptr->port_mutex);
3865 
3866 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3867 			flag = NDI_DEVI_REMOVE;
3868 		}
3869 
3870 		if (is_mpxio) {
3871 			mdi_devi_exit(pptr->port_dip, circ);
3872 		} else {
3873 			ndi_devi_exit(pptr->port_dip, circ);
3874 		}
3875 		devi_entered = 0;
3876 
3877 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3878 		    FCP_OFFLINE, lcount, tcount, flag);
3879 
3880 		if (*rval != NDI_SUCCESS) {
3881 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3882 			break;
3883 		}
3884 
3885 		fcp_update_offline_flags(plun);
3886 
3887 		ptgt = plun->lun_tgt;
3888 		mutex_enter(&ptgt->tgt_mutex);
3889 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3890 		    tplun->lun_next) {
3891 			mutex_enter(&tplun->lun_mutex);
3892 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3893 				all = 0;
3894 			}
3895 			mutex_exit(&tplun->lun_mutex);
3896 		}
3897 
3898 		if (all) {
3899 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3900 			/*
3901 			 * The user is unconfiguring/offlining the device.
3902 			 * If fabric and the auto configuration is set
3903 			 * then make sure the user is the only one who
3904 			 * can reconfigure the device.
3905 			 */
3906 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3907 			    fcp_enable_auto_configuration) {
3908 				ptgt->tgt_manual_config_only = 1;
3909 			}
3910 		}
3911 		mutex_exit(&ptgt->tgt_mutex);
3912 		break;
3913 	}
3914 
3915 	case DEVCTL_DEVICE_ONLINE: {
3916 		int			lcount;
3917 		int			tcount;
3918 		struct fcp_lun	*plun;
3919 		child_info_t		*cip = CIP(cdip);
3920 
3921 		ASSERT(cdip != NULL);
3922 		ASSERT(pptr != NULL);
3923 
3924 		mutex_enter(&pptr->port_mutex);
3925 		if (pip != NULL) {
3926 			cip = CIP(pip);
3927 		}
3928 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3929 			mutex_exit(&pptr->port_mutex);
3930 			*rval = ENXIO;
3931 			break;
3932 		}
3933 		lcount = pptr->port_link_cnt;
3934 		tcount = plun->lun_tgt->tgt_change_cnt;
3935 		mutex_exit(&pptr->port_mutex);
3936 
3937 		/*
3938 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3939 		 * to allow the device attach to occur when the device is
3940 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3941 		 * from the scsi_probe()).
3942 		 */
3943 		mutex_enter(&LUN_TGT->tgt_mutex);
3944 		plun->lun_state |= FCP_LUN_ONLINING;
3945 		mutex_exit(&LUN_TGT->tgt_mutex);
3946 
3947 		if (is_mpxio) {
3948 			mdi_devi_exit(pptr->port_dip, circ);
3949 		} else {
3950 			ndi_devi_exit(pptr->port_dip, circ);
3951 		}
3952 		devi_entered = 0;
3953 
3954 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3955 		    FCP_ONLINE, lcount, tcount, 0);
3956 
3957 		if (*rval != NDI_SUCCESS) {
3958 			/* Reset the FCP_LUN_ONLINING bit */
3959 			mutex_enter(&LUN_TGT->tgt_mutex);
3960 			plun->lun_state &= ~FCP_LUN_ONLINING;
3961 			mutex_exit(&LUN_TGT->tgt_mutex);
3962 			*rval = EIO;
3963 			break;
3964 		}
3965 		mutex_enter(&LUN_TGT->tgt_mutex);
3966 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3967 		    FCP_LUN_ONLINING);
3968 		mutex_exit(&LUN_TGT->tgt_mutex);
3969 		break;
3970 	}
3971 
3972 	case DEVCTL_BUS_DEV_CREATE: {
3973 		uchar_t			*bytes = NULL;
3974 		uint_t			nbytes;
3975 		struct fcp_tgt		*ptgt = NULL;
3976 		struct fcp_lun		*plun = NULL;
3977 		dev_info_t		*useless_dip = NULL;
3978 
3979 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3980 		    DEVCTL_CONSTRUCT, &useless_dip);
3981 		if (*rval != 0 || useless_dip == NULL) {
3982 			break;
3983 		}
3984 
3985 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3986 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3987 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3988 			*rval = EINVAL;
3989 			(void) ndi_devi_free(useless_dip);
3990 			if (bytes != NULL) {
3991 				ddi_prop_free(bytes);
3992 			}
3993 			break;
3994 		}
3995 
3996 		*rval = fcp_create_on_demand(pptr, bytes);
3997 		if (*rval == 0) {
3998 			mutex_enter(&pptr->port_mutex);
3999 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4000 			if (ptgt) {
4001 				/*
4002 				 * We now have a pointer to the target that
4003 				 * was created. Lets point to the first LUN on
4004 				 * this new target.
4005 				 */
4006 				mutex_enter(&ptgt->tgt_mutex);
4007 
4008 				plun = ptgt->tgt_lun;
4009 				/*
4010 				 * There may be stale/offline LUN entries on
4011 				 * this list (this is by design) and so we have
4012 				 * to make sure we point to the first online
4013 				 * LUN
4014 				 */
4015 				while (plun &&
4016 				    plun->lun_state & FCP_LUN_OFFLINE) {
4017 					plun = plun->lun_next;
4018 				}
4019 
4020 				mutex_exit(&ptgt->tgt_mutex);
4021 			}
4022 			mutex_exit(&pptr->port_mutex);
4023 		}
4024 
4025 		if (*rval == 0 && ptgt && plun) {
4026 			mutex_enter(&plun->lun_mutex);
4027 			/*
4028 			 * Allow up to fcp_lun_ready_retry seconds to
4029 			 * configure all the luns behind the target.
4030 			 *
4031 			 * The intent here is to allow targets with long
4032 			 * reboot/reset-recovery times to become available
4033 			 * while limiting the maximum wait time for an
4034 			 * unresponsive target.
4035 			 */
4036 			end_time = ddi_get_lbolt() +
4037 			    SEC_TO_TICK(fcp_lun_ready_retry);
4038 
4039 			while (ddi_get_lbolt() < end_time) {
4040 				retval = FC_SUCCESS;
4041 
4042 				/*
4043 				 * The new ndi interfaces for on-demand creation
4044 				 * are inflexible, Do some more work to pass on
4045 				 * a path name of some LUN (design is broken !)
4046 				 */
4047 				if (plun->lun_cip) {
4048 					if (plun->lun_mpxio == 0) {
4049 						cdip = DIP(plun->lun_cip);
4050 					} else {
4051 						cdip = mdi_pi_get_client(
4052 						    PIP(plun->lun_cip));
4053 					}
4054 					if (cdip == NULL) {
4055 						*rval = ENXIO;
4056 						break;
4057 					}
4058 
4059 					if (!i_ddi_devi_attached(cdip)) {
4060 						mutex_exit(&plun->lun_mutex);
4061 						delay(drv_usectohz(1000000));
4062 						mutex_enter(&plun->lun_mutex);
4063 					} else {
4064 						/*
4065 						 * This Lun is ready, lets
4066 						 * check the next one.
4067 						 */
4068 						mutex_exit(&plun->lun_mutex);
4069 						plun = plun->lun_next;
4070 						while (plun && (plun->lun_state
4071 						    & FCP_LUN_OFFLINE)) {
4072 							plun = plun->lun_next;
4073 						}
4074 						if (!plun) {
4075 							break;
4076 						}
4077 						mutex_enter(&plun->lun_mutex);
4078 					}
4079 				} else {
4080 					/*
4081 					 * lun_cip field for a valid lun
4082 					 * should never be NULL. Fail the
4083 					 * command.
4084 					 */
4085 					*rval = ENXIO;
4086 					break;
4087 				}
4088 			}
4089 			if (plun) {
4090 				mutex_exit(&plun->lun_mutex);
4091 			} else {
4092 				char devnm[MAXNAMELEN];
4093 				int nmlen;
4094 
4095 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4096 				    ddi_node_name(cdip),
4097 				    ddi_get_name_addr(cdip));
4098 
4099 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4100 				    0) {
4101 					*rval = EFAULT;
4102 				}
4103 			}
4104 		} else {
4105 			int	i;
4106 			char	buf[25];
4107 
4108 			for (i = 0; i < FC_WWN_SIZE; i++) {
4109 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4110 			}
4111 
4112 			fcp_log(CE_WARN, pptr->port_dip,
4113 			    "!Failed to create nodes for pwwn=%s; error=%x",
4114 			    buf, *rval);
4115 		}
4116 
4117 		(void) ndi_devi_free(useless_dip);
4118 		ddi_prop_free(bytes);
4119 		break;
4120 	}
4121 
4122 	case DEVCTL_DEVICE_RESET: {
4123 		struct fcp_lun		*plun;
4124 		child_info_t		*cip = CIP(cdip);
4125 
4126 		ASSERT(cdip != NULL);
4127 		ASSERT(pptr != NULL);
4128 		mutex_enter(&pptr->port_mutex);
4129 		if (pip != NULL) {
4130 			cip = CIP(pip);
4131 		}
4132 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4133 			mutex_exit(&pptr->port_mutex);
4134 			*rval = ENXIO;
4135 			break;
4136 		}
4137 		mutex_exit(&pptr->port_mutex);
4138 
4139 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4140 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4141 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4142 
4143 			*rval = ENXIO;
4144 			break;
4145 		}
4146 
4147 		if (plun->lun_sd == NULL) {
4148 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4149 
4150 			*rval = ENXIO;
4151 			break;
4152 		}
4153 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4154 
4155 		/*
4156 		 * set up ap so that fcp_reset can figure out
4157 		 * which target to reset
4158 		 */
4159 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4160 		    RESET_TARGET) == FALSE) {
4161 			*rval = EIO;
4162 		}
4163 		break;
4164 	}
4165 
4166 	case DEVCTL_BUS_GETSTATE:
4167 		ASSERT(dcp != NULL);
4168 		ASSERT(pptr != NULL);
4169 		ASSERT(pptr->port_dip != NULL);
4170 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4171 		    NDI_SUCCESS) {
4172 			*rval = EFAULT;
4173 		}
4174 		break;
4175 
4176 	case DEVCTL_BUS_QUIESCE:
4177 	case DEVCTL_BUS_UNQUIESCE:
4178 		*rval = ENOTSUP;
4179 		break;
4180 
4181 	case DEVCTL_BUS_RESET:
4182 	case DEVCTL_BUS_RESETALL:
4183 		ASSERT(pptr != NULL);
4184 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4185 		break;
4186 
4187 	default:
4188 		ASSERT(dcp != NULL);
4189 		*rval = ENOTTY;
4190 		break;
4191 	}
4192 
4193 	/* all done -- clean up and return */
4194 out:	if (devi_entered) {
4195 		if (is_mpxio) {
4196 			mdi_devi_exit(pptr->port_dip, circ);
4197 		} else {
4198 			ndi_devi_exit(pptr->port_dip, circ);
4199 		}
4200 	}
4201 
4202 	if (dcp != NULL) {
4203 		ndi_dc_freehdl(dcp);
4204 	}
4205 
4206 	return (retval);
4207 }
4208 
4209 
4210 /*ARGSUSED*/
4211 static int
4212 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4213     uint32_t claimed)
4214 {
4215 	uchar_t			r_ctl;
4216 	uchar_t			ls_code;
4217 	struct fcp_port	*pptr;
4218 
4219 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4220 		return (FC_UNCLAIMED);
4221 	}
4222 
4223 	mutex_enter(&pptr->port_mutex);
4224 	if (pptr->port_state & (FCP_STATE_DETACHING |
4225 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4226 		mutex_exit(&pptr->port_mutex);
4227 		return (FC_UNCLAIMED);
4228 	}
4229 	mutex_exit(&pptr->port_mutex);
4230 
4231 	r_ctl = buf->ub_frame.r_ctl;
4232 
4233 	switch (r_ctl & R_CTL_ROUTING) {
4234 	case R_CTL_EXTENDED_SVC:
4235 		if (r_ctl == R_CTL_ELS_REQ) {
4236 			ls_code = buf->ub_buffer[0];
4237 
4238 			switch (ls_code) {
4239 			case LA_ELS_PRLI:
4240 				/*
4241 				 * We really don't care if something fails.
4242 				 * If the PRLI was not sent out, then the
4243 				 * other end will time it out.
4244 				 */
4245 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4246 					return (FC_SUCCESS);
4247 				}
4248 				return (FC_UNCLAIMED);
4249 				/* NOTREACHED */
4250 
4251 			default:
4252 				break;
4253 			}
4254 		}
4255 		/* FALLTHROUGH */
4256 
4257 	default:
4258 		return (FC_UNCLAIMED);
4259 	}
4260 }
4261 
4262 
4263 /*ARGSUSED*/
4264 static int
4265 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4266     uint32_t claimed)
4267 {
4268 	return (FC_UNCLAIMED);
4269 }
4270 
4271 /*
4272  *     Function: fcp_statec_callback
4273  *
4274  *  Description: The purpose of this function is to handle a port state change.
4275  *		 It is called from fp/fctl and, in a few instances, internally.
4276  *
4277  *     Argument: ulph		fp/fctl port handle
4278  *		 port_handle	fcp_port structure
4279  *		 port_state	Physical state of the port
4280  *		 port_top	Topology
4281  *		 *devlist	Pointer to the first entry of a table
4282  *				containing the remote ports that can be
4283  *				reached.
4284  *		 dev_cnt	Number of entries pointed by devlist.
4285  *		 port_sid	Port ID of the local port.
4286  *
4287  * Return Value: None
4288  */
4289 /*ARGSUSED*/
4290 static void
4291 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4292     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4293     uint32_t dev_cnt, uint32_t port_sid)
4294 {
4295 	uint32_t		link_count;
4296 	int			map_len = 0;
4297 	struct fcp_port	*pptr;
4298 	fcp_map_tag_t		*map_tag = NULL;
4299 
4300 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4301 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4302 		return;			/* nothing to work with! */
4303 	}
4304 
4305 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4306 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4307 	    "fcp_statec_callback: port state/dev_cnt/top ="
4308 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4309 	    dev_cnt, port_top);
4310 
4311 	mutex_enter(&pptr->port_mutex);
4312 
4313 	/*
4314 	 * If a thread is in detach, don't do anything.
4315 	 */
4316 	if (pptr->port_state & (FCP_STATE_DETACHING |
4317 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4318 		mutex_exit(&pptr->port_mutex);
4319 		return;
4320 	}
4321 
4322 	/*
4323 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4324 	 * init_pkt is called, it knows whether or not the target's status
4325 	 * (or pd) might be changing.
4326 	 */
4327 
4328 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4329 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4330 	}
4331 
4332 	/*
4333 	 * the transport doesn't allocate or probe unless being
4334 	 * asked to by either the applications or ULPs
4335 	 *
4336 	 * in cases where the port is OFFLINE at the time of port
4337 	 * attach callback and the link comes ONLINE later, for
4338 	 * easier automatic node creation (i.e. without you having to
4339 	 * go out and run the utility to perform LOGINs) the
4340 	 * following conditional is helpful
4341 	 */
4342 	pptr->port_phys_state = port_state;
4343 
4344 	if (dev_cnt) {
4345 		mutex_exit(&pptr->port_mutex);
4346 
4347 		map_len = sizeof (*map_tag) * dev_cnt;
4348 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4349 		if (map_tag == NULL) {
4350 			fcp_log(CE_WARN, pptr->port_dip,
4351 			    "!fcp%d: failed to allocate for map tags; "
4352 			    " state change will not be processed",
4353 			    pptr->port_instance);
4354 
4355 			mutex_enter(&pptr->port_mutex);
4356 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4357 			mutex_exit(&pptr->port_mutex);
4358 
4359 			return;
4360 		}
4361 
4362 		mutex_enter(&pptr->port_mutex);
4363 	}
4364 
4365 	if (pptr->port_id != port_sid) {
4366 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4367 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4368 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4369 		    port_sid);
4370 		/*
4371 		 * The local port changed ID. It is the first time a port ID
4372 		 * is assigned or something drastic happened.  We might have
4373 		 * been unplugged and replugged on another loop or fabric port
4374 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4375 		 * the fabric we were plugged into.
4376 		 */
4377 		pptr->port_id = port_sid;
4378 	}
4379 
4380 	switch (FC_PORT_STATE_MASK(port_state)) {
4381 	case FC_STATE_OFFLINE:
4382 	case FC_STATE_RESET_REQUESTED:
4383 		/*
4384 		 * link has gone from online to offline -- just update the
4385 		 * state of this port to BUSY and MARKed to go offline
4386 		 */
4387 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4388 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4389 		    "link went offline");
4390 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4391 			/*
4392 			 * We were offline a while ago and this one
4393 			 * seems to indicate that the loop has gone
4394 			 * dead forever.
4395 			 */
4396 			pptr->port_tmp_cnt += dev_cnt;
4397 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4398 			pptr->port_state |= FCP_STATE_INIT;
4399 			link_count = pptr->port_link_cnt;
4400 			fcp_handle_devices(pptr, devlist, dev_cnt,
4401 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4402 		} else {
4403 			pptr->port_link_cnt++;
4404 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4405 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4406 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4407 			if (pptr->port_mpxio) {
4408 				fcp_update_mpxio_path_verifybusy(pptr);
4409 			}
4410 			pptr->port_state |= FCP_STATE_OFFLINE;
4411 			pptr->port_state &=
4412 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4413 			pptr->port_tmp_cnt = 0;
4414 		}
4415 		mutex_exit(&pptr->port_mutex);
4416 		break;
4417 
4418 	case FC_STATE_ONLINE:
4419 	case FC_STATE_LIP:
4420 	case FC_STATE_LIP_LBIT_SET:
4421 		/*
4422 		 * link has gone from offline to online
4423 		 */
4424 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4425 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4426 		    "link went online");
4427 
4428 		pptr->port_link_cnt++;
4429 
4430 		while (pptr->port_ipkt_cnt) {
4431 			mutex_exit(&pptr->port_mutex);
4432 			delay(drv_usectohz(1000000));
4433 			mutex_enter(&pptr->port_mutex);
4434 		}
4435 
4436 		pptr->port_topology = port_top;
4437 
4438 		/*
4439 		 * The state of the targets and luns accessible through this
4440 		 * port is updated.
4441 		 */
4442 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4443 		    FCP_CAUSE_LINK_CHANGE);
4444 
4445 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4446 		pptr->port_state |= FCP_STATE_ONLINING;
4447 		pptr->port_tmp_cnt = dev_cnt;
4448 		link_count = pptr->port_link_cnt;
4449 
4450 		pptr->port_deadline = fcp_watchdog_time +
4451 		    FCP_ICMD_DEADLINE;
4452 
4453 		if (!dev_cnt) {
4454 			/*
4455 			 * We go directly to the online state if no remote
4456 			 * ports were discovered.
4457 			 */
4458 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4459 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4460 			    "No remote ports discovered");
4461 
4462 			pptr->port_state &= ~FCP_STATE_ONLINING;
4463 			pptr->port_state |= FCP_STATE_ONLINE;
4464 		}
4465 
4466 		switch (port_top) {
4467 		case FC_TOP_FABRIC:
4468 		case FC_TOP_PUBLIC_LOOP:
4469 		case FC_TOP_PRIVATE_LOOP:
4470 		case FC_TOP_PT_PT:
4471 
4472 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4473 				fcp_retry_ns_registry(pptr, port_sid);
4474 			}
4475 
4476 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4477 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4478 			break;
4479 
4480 		default:
4481 			/*
4482 			 * We got here because we were provided with an unknown
4483 			 * topology.
4484 			 */
4485 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4486 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4487 			}
4488 
4489 			pptr->port_tmp_cnt -= dev_cnt;
4490 			fcp_log(CE_WARN, pptr->port_dip,
4491 			    "!unknown/unsupported topology (0x%x)", port_top);
4492 			break;
4493 		}
4494 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4495 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4496 		    "Notify ssd of the reset to reinstate the reservations");
4497 
4498 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4499 		    &pptr->port_reset_notify_listf);
4500 
4501 		mutex_exit(&pptr->port_mutex);
4502 
4503 		break;
4504 
4505 	case FC_STATE_RESET:
4506 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4507 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4508 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4509 		    "RESET state, waiting for Offline/Online state_cb");
4510 		mutex_exit(&pptr->port_mutex);
4511 		break;
4512 
4513 	case FC_STATE_DEVICE_CHANGE:
4514 		/*
4515 		 * We come here when an application has requested
4516 		 * Dynamic node creation/deletion in Fabric connectivity.
4517 		 */
4518 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4519 		    FCP_STATE_INIT)) {
4520 			/*
4521 			 * This case can happen when the FCTL is in the
4522 			 * process of giving us on online and the host on
4523 			 * the other side issues a PLOGI/PLOGO. Ideally
4524 			 * the state changes should be serialized unless
4525 			 * they are opposite (online-offline).
4526 			 * The transport will give us a final state change
4527 			 * so we can ignore this for the time being.
4528 			 */
4529 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4530 			mutex_exit(&pptr->port_mutex);
4531 			break;
4532 		}
4533 
4534 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4535 			fcp_retry_ns_registry(pptr, port_sid);
4536 		}
4537 
4538 		/*
4539 		 * Extend the deadline under steady state conditions
4540 		 * to provide more time for the device-change-commands
4541 		 */
4542 		if (!pptr->port_ipkt_cnt) {
4543 			pptr->port_deadline = fcp_watchdog_time +
4544 			    FCP_ICMD_DEADLINE;
4545 		}
4546 
4547 		/*
4548 		 * There is another race condition here, where if we were
4549 		 * in ONLINEING state and a devices in the map logs out,
4550 		 * fp will give another state change as DEVICE_CHANGE
4551 		 * and OLD. This will result in that target being offlined.
4552 		 * The pd_handle is freed. If from the first statec callback
4553 		 * we were going to fire a PLOGI/PRLI, the system will
4554 		 * panic in fc_ulp_transport with invalid pd_handle.
4555 		 * The fix is to check for the link_cnt before issuing
4556 		 * any command down.
4557 		 */
4558 		fcp_update_targets(pptr, devlist, dev_cnt,
4559 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4560 
4561 		link_count = pptr->port_link_cnt;
4562 
4563 		fcp_handle_devices(pptr, devlist, dev_cnt,
4564 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4565 
4566 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4567 
4568 		mutex_exit(&pptr->port_mutex);
4569 		break;
4570 
4571 	case FC_STATE_TARGET_PORT_RESET:
4572 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4573 			fcp_retry_ns_registry(pptr, port_sid);
4574 		}
4575 
4576 		/* Do nothing else */
4577 		mutex_exit(&pptr->port_mutex);
4578 		break;
4579 
4580 	default:
4581 		fcp_log(CE_WARN, pptr->port_dip,
4582 		    "!Invalid state change=0x%x", port_state);
4583 		mutex_exit(&pptr->port_mutex);
4584 		break;
4585 	}
4586 
4587 	if (map_tag) {
4588 		kmem_free(map_tag, map_len);
4589 	}
4590 }
4591 
4592 /*
4593  *     Function: fcp_handle_devices
4594  *
4595  *  Description: This function updates the devices currently known by
4596  *		 walking the list provided by the caller.  The list passed
4597  *		 by the caller is supposed to be the list of reachable
4598  *		 devices.
4599  *
4600  *     Argument: *pptr		Fcp port structure.
4601  *		 *devlist	Pointer to the first entry of a table
4602  *				containing the remote ports that can be
4603  *				reached.
4604  *		 dev_cnt	Number of entries pointed by devlist.
4605  *		 link_cnt	Link state count.
4606  *		 *map_tag	Array of fcp_map_tag_t structures.
4607  *		 cause		What caused this function to be called.
4608  *
4609  * Return Value: None
4610  *
4611  *	  Notes: The pptr->port_mutex must be held.
4612  */
4613 static void
4614 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4615     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4616 {
4617 	int			i;
4618 	int			check_finish_init = 0;
4619 	fc_portmap_t		*map_entry;
4620 	struct fcp_tgt	*ptgt = NULL;
4621 
4622 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4623 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4624 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4625 
4626 	if (dev_cnt) {
4627 		ASSERT(map_tag != NULL);
4628 	}
4629 
4630 	/*
4631 	 * The following code goes through the list of remote ports that are
4632 	 * accessible through this (pptr) local port (The list walked is the
4633 	 * one provided by the caller which is the list of the remote ports
4634 	 * currently reachable).  It checks if any of them was already
4635 	 * known by looking for the corresponding target structure based on
4636 	 * the world wide name.	 If a target is part of the list it is tagged
4637 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4638 	 *
4639 	 * Old comment
4640 	 * -----------
4641 	 * Before we drop port mutex; we MUST get the tags updated; This
4642 	 * two step process is somewhat slow, but more reliable.
4643 	 */
4644 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4645 		map_entry = &(devlist[i]);
4646 
4647 		/*
4648 		 * get ptr to this map entry in our port's
4649 		 * list (if any)
4650 		 */
4651 		ptgt = fcp_lookup_target(pptr,
4652 		    (uchar_t *)&(map_entry->map_pwwn));
4653 
4654 		if (ptgt) {
4655 			map_tag[i] = ptgt->tgt_change_cnt;
4656 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4657 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4658 			}
4659 		}
4660 	}
4661 
4662 	/*
4663 	 * At this point we know which devices of the new list were already
4664 	 * known (The field tgt_aux_state of the target structure has been
4665 	 * set to FCP_TGT_TAGGED).
4666 	 *
4667 	 * The following code goes through the list of targets currently known
4668 	 * by the local port (the list is actually a hashing table).  If a
4669 	 * target is found and is not tagged, it means the target cannot
4670 	 * be reached anymore through the local port (pptr).  It is offlined.
4671 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4672 	 */
4673 	for (i = 0; i < FCP_NUM_HASH; i++) {
4674 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4675 		    ptgt = ptgt->tgt_next) {
4676 			mutex_enter(&ptgt->tgt_mutex);
4677 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4678 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4679 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4680 				fcp_offline_target_now(pptr, ptgt,
4681 				    link_cnt, ptgt->tgt_change_cnt, 0);
4682 			}
4683 			mutex_exit(&ptgt->tgt_mutex);
4684 		}
4685 	}
4686 
4687 	/*
4688 	 * At this point, the devices that were known but cannot be reached
4689 	 * anymore, have most likely been offlined.
4690 	 *
4691 	 * The following section of code seems to go through the list of
4692 	 * remote ports that can now be reached.  For every single one it
4693 	 * checks if it is already known or if it is a new port.
4694 	 */
4695 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4696 
4697 		if (check_finish_init) {
4698 			ASSERT(i > 0);
4699 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4700 			    map_tag[i - 1], cause);
4701 			check_finish_init = 0;
4702 		}
4703 
4704 		/* get a pointer to this map entry */
4705 		map_entry = &(devlist[i]);
4706 
4707 		/*
4708 		 * Check for the duplicate map entry flag. If we have marked
4709 		 * this entry as a duplicate we skip it since the correct
4710 		 * (perhaps even same) state change will be encountered
4711 		 * later in the list.
4712 		 */
4713 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4714 			continue;
4715 		}
4716 
4717 		/* get ptr to this map entry in our port's list (if any) */
4718 		ptgt = fcp_lookup_target(pptr,
4719 		    (uchar_t *)&(map_entry->map_pwwn));
4720 
4721 		if (ptgt) {
4722 			/*
4723 			 * This device was already known.  The field
4724 			 * tgt_aux_state is reset (was probably set to
4725 			 * FCP_TGT_TAGGED previously in this routine).
4726 			 */
4727 			ptgt->tgt_aux_state = 0;
4728 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4729 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4730 			    "handle_devices: map did/state/type/flags = "
4731 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4732 			    "tgt_state=%d",
4733 			    map_entry->map_did.port_id, map_entry->map_state,
4734 			    map_entry->map_type, map_entry->map_flags,
4735 			    ptgt->tgt_d_id, ptgt->tgt_state);
4736 		}
4737 
4738 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4739 		    map_entry->map_type == PORT_DEVICE_NEW ||
4740 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4741 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4742 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4743 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4744 			    "map_type=%x, did = %x",
4745 			    map_entry->map_type,
4746 			    map_entry->map_did.port_id);
4747 		}
4748 
4749 		switch (map_entry->map_type) {
4750 		case PORT_DEVICE_NOCHANGE:
4751 		case PORT_DEVICE_USER_CREATE:
4752 		case PORT_DEVICE_USER_LOGIN:
4753 		case PORT_DEVICE_NEW:
4754 		case PORT_DEVICE_REPORTLUN_CHANGED:
4755 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4756 
4757 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4758 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4759 			    cause) == TRUE) {
4760 
4761 				FCP_TGT_TRACE(ptgt, map_tag[i],
4762 				    FCP_TGT_TRACE_2);
4763 				check_finish_init++;
4764 			}
4765 			break;
4766 
4767 		case PORT_DEVICE_OLD:
4768 			if (ptgt != NULL) {
4769 				FCP_TGT_TRACE(ptgt, map_tag[i],
4770 				    FCP_TGT_TRACE_3);
4771 
4772 				mutex_enter(&ptgt->tgt_mutex);
4773 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4774 					/*
4775 					 * Must do an in-line wait for I/Os
4776 					 * to get drained
4777 					 */
4778 					mutex_exit(&ptgt->tgt_mutex);
4779 					mutex_exit(&pptr->port_mutex);
4780 
4781 					mutex_enter(&ptgt->tgt_mutex);
4782 					while (ptgt->tgt_ipkt_cnt ||
4783 					    fcp_outstanding_lun_cmds(ptgt)
4784 					    == FC_SUCCESS) {
4785 						mutex_exit(&ptgt->tgt_mutex);
4786 						delay(drv_usectohz(1000000));
4787 						mutex_enter(&ptgt->tgt_mutex);
4788 					}
4789 					mutex_exit(&ptgt->tgt_mutex);
4790 
4791 					mutex_enter(&pptr->port_mutex);
4792 					mutex_enter(&ptgt->tgt_mutex);
4793 
4794 					(void) fcp_offline_target(pptr, ptgt,
4795 					    link_cnt, map_tag[i], 0, 0);
4796 				}
4797 				mutex_exit(&ptgt->tgt_mutex);
4798 			}
4799 			check_finish_init++;
4800 			break;
4801 
4802 		case PORT_DEVICE_USER_DELETE:
4803 		case PORT_DEVICE_USER_LOGOUT:
4804 			if (ptgt != NULL) {
4805 				FCP_TGT_TRACE(ptgt, map_tag[i],
4806 				    FCP_TGT_TRACE_4);
4807 
4808 				mutex_enter(&ptgt->tgt_mutex);
4809 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4810 					(void) fcp_offline_target(pptr, ptgt,
4811 					    link_cnt, map_tag[i], 1, 0);
4812 				}
4813 				mutex_exit(&ptgt->tgt_mutex);
4814 			}
4815 			check_finish_init++;
4816 			break;
4817 
4818 		case PORT_DEVICE_CHANGED:
4819 			if (ptgt != NULL) {
4820 				FCP_TGT_TRACE(ptgt, map_tag[i],
4821 				    FCP_TGT_TRACE_5);
4822 
4823 				if (fcp_device_changed(pptr, ptgt,
4824 				    map_entry, link_cnt, map_tag[i],
4825 				    cause) == TRUE) {
4826 					check_finish_init++;
4827 				}
4828 			} else {
4829 				if (fcp_handle_mapflags(pptr, ptgt,
4830 				    map_entry, link_cnt, 0, cause) == TRUE) {
4831 					check_finish_init++;
4832 				}
4833 			}
4834 			break;
4835 
4836 		default:
4837 			fcp_log(CE_WARN, pptr->port_dip,
4838 			    "!Invalid map_type=0x%x", map_entry->map_type);
4839 			check_finish_init++;
4840 			break;
4841 		}
4842 	}
4843 
4844 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4845 		ASSERT(i > 0);
4846 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4847 		    map_tag[i-1], cause);
4848 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4849 		fcp_offline_all(pptr, link_cnt, cause);
4850 	}
4851 }
4852 
4853 static int
4854 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4855 {
4856 	struct fcp_lun	*plun;
4857 	struct fcp_port *pptr;
4858 	int		 rscn_count;
4859 	int		 lun0_newalloc;
4860 	int		 ret  = TRUE;
4861 
4862 	ASSERT(ptgt);
4863 	pptr = ptgt->tgt_port;
4864 	lun0_newalloc = 0;
4865 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4866 		/*
4867 		 * no LUN struct for LUN 0 yet exists,
4868 		 * so create one
4869 		 */
4870 		plun = fcp_alloc_lun(ptgt);
4871 		if (plun == NULL) {
4872 			fcp_log(CE_WARN, pptr->port_dip,
4873 			    "!Failed to allocate lun 0 for"
4874 			    " D_ID=%x", ptgt->tgt_d_id);
4875 			return (ret);
4876 		}
4877 		lun0_newalloc = 1;
4878 	}
4879 
4880 	mutex_enter(&ptgt->tgt_mutex);
4881 	/*
4882 	 * consider lun 0 as device not connected if it is
4883 	 * offlined or newly allocated
4884 	 */
4885 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4886 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4887 	}
4888 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4889 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4890 	ptgt->tgt_lun_cnt = 1;
4891 	ptgt->tgt_report_lun_cnt = 0;
4892 	mutex_exit(&ptgt->tgt_mutex);
4893 
4894 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4895 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4896 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4897 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4898 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4899 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4900 		    "to D_ID=%x", ptgt->tgt_d_id);
4901 	} else {
4902 		ret = FALSE;
4903 	}
4904 
4905 	return (ret);
4906 }
4907 
4908 /*
4909  *     Function: fcp_handle_mapflags
4910  *
4911  *  Description: This function creates a target structure if the ptgt passed
4912  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4913  *		 into the target yet or the PRLI if we are logged into the
4914  *		 target already.  The rest of the treatment is done in the
4915  *		 callbacks of the PLOGI or PRLI.
4916  *
4917  *     Argument: *pptr		FCP Port structure.
4918  *		 *ptgt		Target structure.
4919  *		 *map_entry	Array of fc_portmap_t structures.
4920  *		 link_cnt	Link state count.
4921  *		 tgt_cnt	Target state count.
4922  *		 cause		What caused this function to be called.
4923  *
4924  * Return Value: TRUE	Failed
4925  *		 FALSE	Succeeded
4926  *
4927  *	  Notes: pptr->port_mutex must be owned.
4928  */
4929 static int
4930 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4931     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4932 {
4933 	int			lcount;
4934 	int			tcount;
4935 	int			ret = TRUE;
4936 	int			alloc;
4937 	struct fcp_ipkt	*icmd;
4938 	struct fcp_lun	*pseq_lun = NULL;
4939 	uchar_t			opcode;
4940 	int			valid_ptgt_was_passed = FALSE;
4941 
4942 	ASSERT(mutex_owned(&pptr->port_mutex));
4943 
4944 	/*
4945 	 * This case is possible where the FCTL has come up and done discovery
4946 	 * before FCP was loaded and attached. FCTL would have discovered the
4947 	 * devices and later the ULP came online. In this case ULP's would get
4948 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4949 	 */
4950 	if (ptgt == NULL) {
4951 		/* don't already have a target */
4952 		mutex_exit(&pptr->port_mutex);
4953 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4954 		mutex_enter(&pptr->port_mutex);
4955 
4956 		if (ptgt == NULL) {
4957 			fcp_log(CE_WARN, pptr->port_dip,
4958 			    "!FC target allocation failed");
4959 			return (ret);
4960 		}
4961 		mutex_enter(&ptgt->tgt_mutex);
4962 		ptgt->tgt_statec_cause = cause;
4963 		ptgt->tgt_tmp_cnt = 1;
4964 		mutex_exit(&ptgt->tgt_mutex);
4965 	} else {
4966 		valid_ptgt_was_passed = TRUE;
4967 	}
4968 
4969 	/*
4970 	 * Copy in the target parameters
4971 	 */
4972 	mutex_enter(&ptgt->tgt_mutex);
4973 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4974 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4975 	ptgt->tgt_pd_handle = map_entry->map_pd;
4976 	ptgt->tgt_fca_dev = NULL;
4977 
4978 	/* Copy port and node WWNs */
4979 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4980 	    FC_WWN_SIZE);
4981 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4982 	    FC_WWN_SIZE);
4983 
4984 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4985 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4986 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4987 	    valid_ptgt_was_passed) {
4988 		/*
4989 		 * determine if there are any tape LUNs on this target
4990 		 */
4991 		for (pseq_lun = ptgt->tgt_lun;
4992 		    pseq_lun != NULL;
4993 		    pseq_lun = pseq_lun->lun_next) {
4994 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4995 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4996 				fcp_update_tgt_state(ptgt, FCP_RESET,
4997 				    FCP_LUN_MARK);
4998 				mutex_exit(&ptgt->tgt_mutex);
4999 				return (ret);
5000 			}
5001 		}
5002 	}
5003 
5004 	/*
5005 	 * if UA'REPORT_LUN_CHANGED received,
5006 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5007 	 */
5008 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5009 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5010 		mutex_exit(&ptgt->tgt_mutex);
5011 		mutex_exit(&pptr->port_mutex);
5012 
5013 		ret = fcp_handle_reportlun_changed(ptgt, cause);
5014 
5015 		mutex_enter(&pptr->port_mutex);
5016 		return (ret);
5017 	}
5018 
5019 	/*
5020 	 * If ptgt was NULL when this function was entered, then tgt_node_state
5021 	 * was never specifically initialized but zeroed out which means
5022 	 * FCP_TGT_NODE_NONE.
5023 	 */
5024 	switch (ptgt->tgt_node_state) {
5025 	case FCP_TGT_NODE_NONE:
5026 	case FCP_TGT_NODE_ON_DEMAND:
5027 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5028 		    !fcp_enable_auto_configuration &&
5029 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5030 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5031 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5032 		    fcp_enable_auto_configuration &&
5033 		    (ptgt->tgt_manual_config_only == 1) &&
5034 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5035 			/*
5036 			 * If auto configuration is set and
5037 			 * the tgt_manual_config_only flag is set then
5038 			 * we only want the user to be able to change
5039 			 * the state through create_on_demand.
5040 			 */
5041 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5042 		} else {
5043 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5044 		}
5045 		break;
5046 
5047 	case FCP_TGT_NODE_PRESENT:
5048 		break;
5049 	}
5050 	/*
5051 	 * If we are booting from a fabric device, make sure we
5052 	 * mark the node state appropriately for this target to be
5053 	 * enumerated
5054 	 */
5055 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5056 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5057 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5058 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5059 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5060 		}
5061 	}
5062 	mutex_exit(&ptgt->tgt_mutex);
5063 
5064 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5065 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5066 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5067 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5068 	    map_entry->map_rscn_info.ulp_rscn_count);
5069 
5070 	mutex_enter(&ptgt->tgt_mutex);
5071 
5072 	/*
5073 	 * Reset target OFFLINE state and mark the target BUSY
5074 	 */
5075 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5076 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5077 
5078 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5079 	lcount = link_cnt;
5080 
5081 	mutex_exit(&ptgt->tgt_mutex);
5082 	mutex_exit(&pptr->port_mutex);
5083 
5084 	/*
5085 	 * if we are already logged in, then we do a PRLI, else
5086 	 * we do a PLOGI first (to get logged in)
5087 	 *
5088 	 * We will not check if we are the PLOGI initiator
5089 	 */
5090 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5091 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5092 
5093 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5094 
5095 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5096 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5097 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5098 
5099 	if (icmd == NULL) {
5100 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5101 		/*
5102 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5103 		 * we need to make sure we reacquire it before returning.
5104 		 */
5105 		mutex_enter(&pptr->port_mutex);
5106 		return (FALSE);
5107 	}
5108 
5109 	/* TRUE is only returned while target is intended skipped */
5110 	ret = FALSE;
5111 	/* discover info about this target */
5112 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5113 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5114 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5115 	} else {
5116 		fcp_icmd_free(pptr, icmd);
5117 		ret = TRUE;
5118 	}
5119 	mutex_enter(&pptr->port_mutex);
5120 
5121 	return (ret);
5122 }
5123 
5124 /*
5125  *     Function: fcp_send_els
5126  *
5127  *  Description: Sends an ELS to the target specified by the caller.  Supports
5128  *		 PLOGI and PRLI.
5129  *
5130  *     Argument: *pptr		Fcp port.
5131  *		 *ptgt		Target to send the ELS to.
5132  *		 *icmd		Internal packet
5133  *		 opcode		ELS opcode
5134  *		 lcount		Link state change counter
5135  *		 tcount		Target state change counter
5136  *		 cause		What caused the call
5137  *
5138  * Return Value: DDI_SUCCESS
5139  *		 Others
5140  */
5141 static int
5142 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5143     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5144 {
5145 	fc_packet_t		*fpkt;
5146 	fc_frame_hdr_t		*hp;
5147 	int			internal = 0;
5148 	int			alloc;
5149 	int			cmd_len;
5150 	int			resp_len;
5151 	int			res = DDI_FAILURE; /* default result */
5152 	int			rval = DDI_FAILURE;
5153 
5154 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5155 	ASSERT(ptgt->tgt_port == pptr);
5156 
5157 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5158 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5159 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5160 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5161 
5162 	if (opcode == LA_ELS_PLOGI) {
5163 		cmd_len = sizeof (la_els_logi_t);
5164 		resp_len = sizeof (la_els_logi_t);
5165 	} else {
5166 		ASSERT(opcode == LA_ELS_PRLI);
5167 		cmd_len = sizeof (la_els_prli_t);
5168 		resp_len = sizeof (la_els_prli_t);
5169 	}
5170 
5171 	if (icmd == NULL) {
5172 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5173 		    sizeof (la_els_prli_t));
5174 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5175 		    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5176 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5177 		if (icmd == NULL) {
5178 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5179 			return (res);
5180 		}
5181 		internal++;
5182 	}
5183 	fpkt = icmd->ipkt_fpkt;
5184 
5185 	fpkt->pkt_cmdlen = cmd_len;
5186 	fpkt->pkt_rsplen = resp_len;
5187 	fpkt->pkt_datalen = 0;
5188 	icmd->ipkt_retries = 0;
5189 
5190 	/* fill in fpkt info */
5191 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5192 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5193 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5194 
5195 	/* get ptr to frame hdr in fpkt */
5196 	hp = &fpkt->pkt_cmd_fhdr;
5197 
5198 	/*
5199 	 * fill in frame hdr
5200 	 */
5201 	hp->r_ctl = R_CTL_ELS_REQ;
5202 	hp->s_id = pptr->port_id;	/* source ID */
5203 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5204 	hp->type = FC_TYPE_EXTENDED_LS;
5205 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5206 	hp->seq_id = 0;
5207 	hp->rsvd = 0;
5208 	hp->df_ctl  = 0;
5209 	hp->seq_cnt = 0;
5210 	hp->ox_id = 0xffff;		/* i.e. none */
5211 	hp->rx_id = 0xffff;		/* i.e. none */
5212 	hp->ro = 0;
5213 
5214 	/*
5215 	 * at this point we have a filled in cmd pkt
5216 	 *
5217 	 * fill in the respective info, then use the transport to send
5218 	 * the packet
5219 	 *
5220 	 * for a PLOGI call fc_ulp_login(), and
5221 	 * for a PRLI call fc_ulp_issue_els()
5222 	 */
5223 	switch (opcode) {
5224 	case LA_ELS_PLOGI: {
5225 		struct la_els_logi logi;
5226 
5227 		bzero(&logi, sizeof (struct la_els_logi));
5228 
5229 		hp = &fpkt->pkt_cmd_fhdr;
5230 		hp->r_ctl = R_CTL_ELS_REQ;
5231 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5232 		logi.ls_code.mbz = 0;
5233 
5234 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5235 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5236 
5237 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5238 
5239 		mutex_enter(&pptr->port_mutex);
5240 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5241 
5242 			mutex_exit(&pptr->port_mutex);
5243 
5244 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5245 			if (rval == FC_SUCCESS) {
5246 				res = DDI_SUCCESS;
5247 				break;
5248 			}
5249 
5250 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5251 
5252 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5253 			    rval, "PLOGI");
5254 		} else {
5255 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5256 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5257 			    "fcp_send_els1: state change occured"
5258 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5259 			mutex_exit(&pptr->port_mutex);
5260 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5261 		}
5262 		break;
5263 	}
5264 
5265 	case LA_ELS_PRLI: {
5266 		struct la_els_prli	prli;
5267 		struct fcp_prli		*fprli;
5268 
5269 		bzero(&prli, sizeof (struct la_els_prli));
5270 
5271 		hp = &fpkt->pkt_cmd_fhdr;
5272 		hp->r_ctl = R_CTL_ELS_REQ;
5273 
5274 		/* fill in PRLI cmd ELS fields */
5275 		prli.ls_code = LA_ELS_PRLI;
5276 		prli.page_length = 0x10;	/* huh? */
5277 		prli.payload_length = sizeof (struct la_els_prli);
5278 
5279 		icmd->ipkt_opcode = LA_ELS_PRLI;
5280 
5281 		/* get ptr to PRLI service params */
5282 		fprli = (struct fcp_prli *)prli.service_params;
5283 
5284 		/* fill in service params */
5285 		fprli->type = 0x08;
5286 		fprli->resvd1 = 0;
5287 		fprli->orig_process_assoc_valid = 0;
5288 		fprli->resp_process_assoc_valid = 0;
5289 		fprli->establish_image_pair = 1;
5290 		fprli->resvd2 = 0;
5291 		fprli->resvd3 = 0;
5292 		fprli->obsolete_1 = 0;
5293 		fprli->obsolete_2 = 0;
5294 		fprli->data_overlay_allowed = 0;
5295 		fprli->initiator_fn = 1;
5296 		fprli->confirmed_compl_allowed = 1;
5297 
5298 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5299 			fprli->target_fn = 1;
5300 		} else {
5301 			fprli->target_fn = 0;
5302 		}
5303 
5304 		fprli->retry = 1;
5305 		fprli->read_xfer_rdy_disabled = 1;
5306 		fprli->write_xfer_rdy_disabled = 0;
5307 
5308 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5309 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5310 
5311 		/* issue the PRLI request */
5312 
5313 		mutex_enter(&pptr->port_mutex);
5314 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5315 
5316 			mutex_exit(&pptr->port_mutex);
5317 
5318 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5319 			if (rval == FC_SUCCESS) {
5320 				res = DDI_SUCCESS;
5321 				break;
5322 			}
5323 
5324 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5325 
5326 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5327 			    rval, "PRLI");
5328 		} else {
5329 			mutex_exit(&pptr->port_mutex);
5330 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5331 		}
5332 		break;
5333 	}
5334 
5335 	default:
5336 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5337 		break;
5338 	}
5339 
5340 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5341 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5342 	    "fcp_send_els: returning %d", res);
5343 
5344 	if (res != DDI_SUCCESS) {
5345 		if (internal) {
5346 			fcp_icmd_free(pptr, icmd);
5347 		}
5348 	}
5349 
5350 	return (res);
5351 }
5352 
5353 
5354 /*
5355  * called internally update the state of all of the tgts and each LUN
5356  * for this port (i.e. each target  known to be attached to this port)
5357  * if they are not already offline
5358  *
5359  * must be called with the port mutex owned
5360  *
5361  * acquires and releases the target mutexes for each target attached
5362  * to this port
5363  */
5364 void
5365 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5366 {
5367 	int i;
5368 	struct fcp_tgt *ptgt;
5369 
5370 	ASSERT(mutex_owned(&pptr->port_mutex));
5371 
5372 	for (i = 0; i < FCP_NUM_HASH; i++) {
5373 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5374 		    ptgt = ptgt->tgt_next) {
5375 			mutex_enter(&ptgt->tgt_mutex);
5376 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5377 			ptgt->tgt_change_cnt++;
5378 			ptgt->tgt_statec_cause = cause;
5379 			ptgt->tgt_tmp_cnt = 1;
5380 			ptgt->tgt_done = 0;
5381 			mutex_exit(&ptgt->tgt_mutex);
5382 		}
5383 	}
5384 }
5385 
5386 
5387 static void
5388 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5389 {
5390 	int i;
5391 	int ndevs;
5392 	struct fcp_tgt *ptgt;
5393 
5394 	ASSERT(mutex_owned(&pptr->port_mutex));
5395 
5396 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5397 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5398 		    ptgt = ptgt->tgt_next) {
5399 			ndevs++;
5400 		}
5401 	}
5402 
5403 	if (ndevs == 0) {
5404 		return;
5405 	}
5406 	pptr->port_tmp_cnt = ndevs;
5407 
5408 	for (i = 0; i < FCP_NUM_HASH; i++) {
5409 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5410 		    ptgt = ptgt->tgt_next) {
5411 			(void) fcp_call_finish_init_held(pptr, ptgt,
5412 			    lcount, ptgt->tgt_change_cnt, cause);
5413 		}
5414 	}
5415 }
5416 
5417 /*
5418  *     Function: fcp_update_tgt_state
5419  *
5420  *  Description: This function updates the field tgt_state of a target.	 That
5421  *		 field is a bitmap and which bit can be set or reset
5422  *		 individually.	The action applied to the target state is also
5423  *		 applied to all the LUNs belonging to the target (provided the
5424  *		 LUN is not offline).  A side effect of applying the state
5425  *		 modification to the target and the LUNs is the field tgt_trace
5426  *		 of the target and lun_trace of the LUNs is set to zero.
5427  *
5428  *
5429  *     Argument: *ptgt	Target structure.
5430  *		 flag	Flag indication what action to apply (set/reset).
5431  *		 state	State bits to update.
5432  *
5433  * Return Value: None
5434  *
5435  *	Context: Interrupt, Kernel or User context.
5436  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5437  *		 calling this function.
5438  */
5439 void
5440 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5441 {
5442 	struct fcp_lun *plun;
5443 
5444 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5445 
5446 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5447 		/* The target is not offline. */
5448 		if (flag == FCP_SET) {
5449 			ptgt->tgt_state |= state;
5450 			ptgt->tgt_trace = 0;
5451 		} else {
5452 			ptgt->tgt_state &= ~state;
5453 		}
5454 
5455 		for (plun = ptgt->tgt_lun; plun != NULL;
5456 		    plun = plun->lun_next) {
5457 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5458 				/* The LUN is not offline. */
5459 				if (flag == FCP_SET) {
5460 					plun->lun_state |= state;
5461 					plun->lun_trace = 0;
5462 				} else {
5463 					plun->lun_state &= ~state;
5464 				}
5465 			}
5466 		}
5467 	}
5468 }
5469 
5470 /*
5471  *     Function: fcp_update_tgt_state
5472  *
5473  *  Description: This function updates the field lun_state of a LUN.  That
5474  *		 field is a bitmap and which bit can be set or reset
5475  *		 individually.
5476  *
5477  *     Argument: *plun	LUN structure.
5478  *		 flag	Flag indication what action to apply (set/reset).
5479  *		 state	State bits to update.
5480  *
5481  * Return Value: None
5482  *
5483  *	Context: Interrupt, Kernel or User context.
5484  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5485  *		 calling this function.
5486  */
5487 void
5488 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5489 {
5490 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5491 
5492 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5493 
5494 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5495 		if (flag == FCP_SET) {
5496 			plun->lun_state |= state;
5497 		} else {
5498 			plun->lun_state &= ~state;
5499 		}
5500 	}
5501 }
5502 
5503 /*
5504  *     Function: fcp_get_port
5505  *
5506  *  Description: This function returns the fcp_port structure from the opaque
5507  *		 handle passed by the caller.  That opaque handle is the handle
5508  *		 used by fp/fctl to identify a particular local port.  That
5509  *		 handle has been stored in the corresponding fcp_port
5510  *		 structure.  This function is going to walk the global list of
5511  *		 fcp_port structures till one has a port_fp_handle that matches
5512  *		 the handle passed by the caller.  This function enters the
5513  *		 mutex fcp_global_mutex while walking the global list and then
5514  *		 releases it.
5515  *
5516  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5517  *				particular port.
5518  *
5519  * Return Value: NULL		Not found.
5520  *		 Not NULL	Pointer to the fcp_port structure.
5521  *
5522  *	Context: Interrupt, Kernel or User context.
5523  */
5524 static struct fcp_port *
5525 fcp_get_port(opaque_t port_handle)
5526 {
5527 	struct fcp_port *pptr;
5528 
5529 	ASSERT(port_handle != NULL);
5530 
5531 	mutex_enter(&fcp_global_mutex);
5532 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5533 		if (pptr->port_fp_handle == port_handle) {
5534 			break;
5535 		}
5536 	}
5537 	mutex_exit(&fcp_global_mutex);
5538 
5539 	return (pptr);
5540 }
5541 
5542 
5543 static void
5544 fcp_unsol_callback(fc_packet_t *fpkt)
5545 {
5546 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5547 	struct fcp_port *pptr = icmd->ipkt_port;
5548 
5549 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5550 		caddr_t state, reason, action, expln;
5551 
5552 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5553 		    &action, &expln);
5554 
5555 		fcp_log(CE_WARN, pptr->port_dip,
5556 		    "!couldn't post response to unsolicited request: "
5557 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5558 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5559 		    fpkt->pkt_cmd_fhdr.rx_id);
5560 	}
5561 	fcp_icmd_free(pptr, icmd);
5562 }
5563 
5564 
5565 /*
5566  * Perform general purpose preparation of a response to an unsolicited request
5567  */
5568 static void
5569 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5570     uchar_t r_ctl, uchar_t type)
5571 {
5572 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5573 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5574 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5575 	pkt->pkt_cmd_fhdr.type = type;
5576 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5577 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5578 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5579 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5580 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5581 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5582 	pkt->pkt_cmd_fhdr.ro = 0;
5583 	pkt->pkt_cmd_fhdr.rsvd = 0;
5584 	pkt->pkt_comp = fcp_unsol_callback;
5585 	pkt->pkt_pd = NULL;
5586 	pkt->pkt_ub_resp_token = (opaque_t)buf;
5587 }
5588 
5589 
5590 /*ARGSUSED*/
5591 static int
5592 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5593 {
5594 	fc_packet_t		*fpkt;
5595 	struct la_els_prli	prli;
5596 	struct fcp_prli		*fprli;
5597 	struct fcp_ipkt	*icmd;
5598 	struct la_els_prli	*from;
5599 	struct fcp_prli		*orig;
5600 	struct fcp_tgt	*ptgt;
5601 	int			tcount = 0;
5602 	int			lcount;
5603 
5604 	from = (struct la_els_prli *)buf->ub_buffer;
5605 	orig = (struct fcp_prli *)from->service_params;
5606 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5607 	    NULL) {
5608 		mutex_enter(&ptgt->tgt_mutex);
5609 		tcount = ptgt->tgt_change_cnt;
5610 		mutex_exit(&ptgt->tgt_mutex);
5611 	}
5612 
5613 	mutex_enter(&pptr->port_mutex);
5614 	lcount = pptr->port_link_cnt;
5615 	mutex_exit(&pptr->port_mutex);
5616 
5617 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5618 	    sizeof (la_els_prli_t), 0,
5619 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5620 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5621 		return (FC_FAILURE);
5622 	}
5623 
5624 	fpkt = icmd->ipkt_fpkt;
5625 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5626 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5627 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5628 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5629 	fpkt->pkt_rsplen = 0;
5630 	fpkt->pkt_datalen = 0;
5631 
5632 	icmd->ipkt_opcode = LA_ELS_PRLI;
5633 
5634 	bzero(&prli, sizeof (struct la_els_prli));
5635 	fprli = (struct fcp_prli *)prli.service_params;
5636 	prli.ls_code = LA_ELS_ACC;
5637 	prli.page_length = 0x10;
5638 	prli.payload_length = sizeof (struct la_els_prli);
5639 
5640 	/* fill in service params */
5641 	fprli->type = 0x08;
5642 	fprli->resvd1 = 0;
5643 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5644 	fprli->orig_process_associator = orig->orig_process_associator;
5645 	fprli->resp_process_assoc_valid = 0;
5646 	fprli->establish_image_pair = 1;
5647 	fprli->resvd2 = 0;
5648 	fprli->resvd3 = 0;
5649 	fprli->obsolete_1 = 0;
5650 	fprli->obsolete_2 = 0;
5651 	fprli->data_overlay_allowed = 0;
5652 	fprli->initiator_fn = 1;
5653 	fprli->confirmed_compl_allowed = 1;
5654 
5655 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5656 		fprli->target_fn = 1;
5657 	} else {
5658 		fprli->target_fn = 0;
5659 	}
5660 
5661 	fprli->retry = 1;
5662 	fprli->read_xfer_rdy_disabled = 1;
5663 	fprli->write_xfer_rdy_disabled = 0;
5664 
5665 	/* save the unsol prli payload first */
5666 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5667 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5668 
5669 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5670 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5671 
5672 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5673 
5674 	mutex_enter(&pptr->port_mutex);
5675 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5676 		int rval;
5677 		mutex_exit(&pptr->port_mutex);
5678 
5679 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5680 		    FC_SUCCESS) {
5681 			if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5682 			    ptgt != NULL) {
5683 				fcp_queue_ipkt(pptr, fpkt);
5684 				return (FC_SUCCESS);
5685 			}
5686 			/* Let it timeout */
5687 			fcp_icmd_free(pptr, icmd);
5688 			return (FC_FAILURE);
5689 		}
5690 	} else {
5691 		mutex_exit(&pptr->port_mutex);
5692 		fcp_icmd_free(pptr, icmd);
5693 		return (FC_FAILURE);
5694 	}
5695 
5696 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5697 
5698 	return (FC_SUCCESS);
5699 }
5700 
5701 /*
5702  *     Function: fcp_icmd_alloc
5703  *
5704  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5705  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5706  *		 modified by the caller (such as fcp_send_scsi).  The
5707  *		 structure is also tied to the state of the line and of the
5708  *		 target at a particular time.  That link is established by
5709  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5710  *		 and tcount which came respectively from pptr->link_cnt and
5711  *		 ptgt->tgt_change_cnt.
5712  *
5713  *     Argument: *pptr		Fcp port.
5714  *		 *ptgt		Target (destination of the command).
5715  *		 cmd_len	Length of the command.
5716  *		 resp_len	Length of the expected response.
5717  *		 data_len	Length of the data.
5718  *		 nodma		Indicates weither the command and response.
5719  *				will be transfer through DMA or not.
5720  *		 lcount		Link state change counter.
5721  *		 tcount		Target state change counter.
5722  *		 cause		Reason that lead to this call.
5723  *
5724  * Return Value: NULL		Failed.
5725  *		 Not NULL	Internal packet address.
5726  */
5727 static struct fcp_ipkt *
5728 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5729     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5730     uint32_t rscn_count)
5731 {
5732 	int			dma_setup = 0;
5733 	fc_packet_t		*fpkt;
5734 	struct fcp_ipkt	*icmd = NULL;
5735 
5736 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5737 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5738 	    KM_NOSLEEP);
5739 	if (icmd == NULL) {
5740 		fcp_log(CE_WARN, pptr->port_dip,
5741 		    "!internal packet allocation failed");
5742 		return (NULL);
5743 	}
5744 
5745 	/*
5746 	 * initialize the allocated packet
5747 	 */
5748 	icmd->ipkt_nodma = nodma;
5749 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5750 	icmd->ipkt_lun = NULL;
5751 
5752 	icmd->ipkt_link_cnt = lcount;
5753 	icmd->ipkt_change_cnt = tcount;
5754 	icmd->ipkt_cause = cause;
5755 
5756 	mutex_enter(&pptr->port_mutex);
5757 	icmd->ipkt_port = pptr;
5758 	mutex_exit(&pptr->port_mutex);
5759 
5760 	/* keep track of amt of data to be sent in pkt */
5761 	icmd->ipkt_cmdlen = cmd_len;
5762 	icmd->ipkt_resplen = resp_len;
5763 	icmd->ipkt_datalen = data_len;
5764 
5765 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5766 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5767 
5768 	/* set pkt's private ptr to point to cmd pkt */
5769 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5770 
5771 	/* set FCA private ptr to memory just beyond */
5772 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5773 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5774 	    pptr->port_dmacookie_sz);
5775 
5776 	/* get ptr to fpkt substruct and fill it in */
5777 	fpkt = icmd->ipkt_fpkt;
5778 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5779 	    sizeof (struct fcp_ipkt));
5780 
5781 	if (ptgt != NULL) {
5782 		icmd->ipkt_tgt = ptgt;
5783 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5784 	}
5785 
5786 	fpkt->pkt_comp = fcp_icmd_callback;
5787 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5788 	fpkt->pkt_cmdlen = cmd_len;
5789 	fpkt->pkt_rsplen = resp_len;
5790 	fpkt->pkt_datalen = data_len;
5791 
5792 	/*
5793 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5794 	 * rscn_count as fcp knows down to the transport. If a valid count was
5795 	 * passed into this function, we allocate memory to actually pass down
5796 	 * this info.
5797 	 *
5798 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5799 	 * basically mean that fcp will not be able to help transport
5800 	 * distinguish if a new RSCN has come after fcp was last informed about
5801 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5802 	 * 5068068 where the device might end up going offline in case of RSCN
5803 	 * storms.
5804 	 */
5805 	fpkt->pkt_ulp_rscn_infop = NULL;
5806 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5807 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5808 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5809 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5810 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5811 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5812 			    "Failed to alloc memory to pass rscn info");
5813 		}
5814 	}
5815 
5816 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5817 		fc_ulp_rscn_info_t	*rscnp;
5818 
5819 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5820 		rscnp->ulp_rscn_count = rscn_count;
5821 	}
5822 
5823 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5824 		goto fail;
5825 	}
5826 	dma_setup++;
5827 
5828 	/*
5829 	 * Must hold target mutex across setting of pkt_pd and call to
5830 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5831 	 * away while we're not looking.
5832 	 */
5833 	if (ptgt != NULL) {
5834 		mutex_enter(&ptgt->tgt_mutex);
5835 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5836 
5837 		/* ask transport to do its initialization on this pkt */
5838 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5839 		    != FC_SUCCESS) {
5840 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5841 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5842 			    "fc_ulp_init_packet failed");
5843 			mutex_exit(&ptgt->tgt_mutex);
5844 			goto fail;
5845 		}
5846 		mutex_exit(&ptgt->tgt_mutex);
5847 	} else {
5848 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5849 		    != FC_SUCCESS) {
5850 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5851 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5852 			    "fc_ulp_init_packet failed");
5853 			goto fail;
5854 		}
5855 	}
5856 
5857 	mutex_enter(&pptr->port_mutex);
5858 	if (pptr->port_state & (FCP_STATE_DETACHING |
5859 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5860 		int rval;
5861 
5862 		mutex_exit(&pptr->port_mutex);
5863 
5864 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5865 		ASSERT(rval == FC_SUCCESS);
5866 
5867 		goto fail;
5868 	}
5869 
5870 	if (ptgt != NULL) {
5871 		mutex_enter(&ptgt->tgt_mutex);
5872 		ptgt->tgt_ipkt_cnt++;
5873 		mutex_exit(&ptgt->tgt_mutex);
5874 	}
5875 
5876 	pptr->port_ipkt_cnt++;
5877 
5878 	mutex_exit(&pptr->port_mutex);
5879 
5880 	return (icmd);
5881 
5882 fail:
5883 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5884 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5885 		    sizeof (fc_ulp_rscn_info_t));
5886 		fpkt->pkt_ulp_rscn_infop = NULL;
5887 	}
5888 
5889 	if (dma_setup) {
5890 		fcp_free_dma(pptr, icmd);
5891 	}
5892 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5893 	    (size_t)pptr->port_dmacookie_sz);
5894 
5895 	return (NULL);
5896 }
5897 
5898 /*
5899  *     Function: fcp_icmd_free
5900  *
5901  *  Description: Frees the internal command passed by the caller.
5902  *
5903  *     Argument: *pptr		Fcp port.
5904  *		 *icmd		Internal packet to free.
5905  *
5906  * Return Value: None
5907  */
5908 static void
5909 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5910 {
5911 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5912 
5913 	/* Let the underlying layers do their cleanup. */
5914 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5915 	    icmd->ipkt_fpkt);
5916 
5917 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5918 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5919 		    sizeof (fc_ulp_rscn_info_t));
5920 	}
5921 
5922 	fcp_free_dma(pptr, icmd);
5923 
5924 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5925 	    (size_t)pptr->port_dmacookie_sz);
5926 
5927 	mutex_enter(&pptr->port_mutex);
5928 
5929 	if (ptgt) {
5930 		mutex_enter(&ptgt->tgt_mutex);
5931 		ptgt->tgt_ipkt_cnt--;
5932 		mutex_exit(&ptgt->tgt_mutex);
5933 	}
5934 
5935 	pptr->port_ipkt_cnt--;
5936 	mutex_exit(&pptr->port_mutex);
5937 }
5938 
5939 /*
5940  *     Function: fcp_alloc_dma
5941  *
5942  *  Description: Allocated the DMA resources required for the internal
5943  *		 packet.
5944  *
5945  *     Argument: *pptr	FCP port.
5946  *		 *icmd	Internal FCP packet.
5947  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5948  *		 flags	Allocation flags (Sleep or NoSleep).
5949  *
5950  * Return Value: FC_SUCCESS
5951  *		 FC_NOMEM
5952  */
5953 static int
5954 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5955     int nodma, int flags)
5956 {
5957 	int		rval;
5958 	size_t		real_size;
5959 	uint_t		ccount;
5960 	int		bound = 0;
5961 	int		cmd_resp = 0;
5962 	fc_packet_t	*fpkt;
5963 	ddi_dma_cookie_t	pkt_data_cookie;
5964 	ddi_dma_cookie_t	*cp;
5965 	uint32_t		cnt;
5966 
5967 	fpkt = &icmd->ipkt_fc_packet;
5968 
5969 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5970 	    fpkt->pkt_resp_dma == NULL);
5971 
5972 	icmd->ipkt_nodma = nodma;
5973 
5974 	if (nodma) {
5975 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5976 		if (fpkt->pkt_cmd == NULL) {
5977 			goto fail;
5978 		}
5979 
5980 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5981 		if (fpkt->pkt_resp == NULL) {
5982 			goto fail;
5983 		}
5984 	} else {
5985 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5986 
5987 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5988 		if (rval == FC_FAILURE) {
5989 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5990 			    fpkt->pkt_resp_dma == NULL);
5991 			goto fail;
5992 		}
5993 		cmd_resp++;
5994 	}
5995 
5996 	if ((fpkt->pkt_datalen != 0) &&
5997 	    !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5998 		/*
5999 		 * set up DMA handle and memory for the data in this packet
6000 		 */
6001 		if (ddi_dma_alloc_handle(pptr->port_dip,
6002 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6003 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6004 			goto fail;
6005 		}
6006 
6007 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6008 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6009 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6010 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6011 			goto fail;
6012 		}
6013 
6014 		/* was DMA mem size gotten < size asked for/needed ?? */
6015 		if (real_size < fpkt->pkt_datalen) {
6016 			goto fail;
6017 		}
6018 
6019 		/* bind DMA address and handle together */
6020 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6021 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6022 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6023 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6024 			goto fail;
6025 		}
6026 		bound++;
6027 
6028 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6029 			goto fail;
6030 		}
6031 
6032 		fpkt->pkt_data_cookie_cnt = ccount;
6033 
6034 		cp = fpkt->pkt_data_cookie;
6035 		*cp = pkt_data_cookie;
6036 		cp++;
6037 
6038 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
6039 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
6040 			    &pkt_data_cookie);
6041 			*cp = pkt_data_cookie;
6042 		}
6043 
6044 	} else if (fpkt->pkt_datalen != 0) {
6045 		/*
6046 		 * If it's a pseudo FCA, then it can't support DMA even in
6047 		 * SCSI data phase.
6048 		 */
6049 		fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6050 		if (fpkt->pkt_data == NULL) {
6051 			goto fail;
6052 		}
6053 
6054 	}
6055 
6056 	return (FC_SUCCESS);
6057 
6058 fail:
6059 	if (bound) {
6060 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6061 	}
6062 
6063 	if (fpkt->pkt_data_dma) {
6064 		if (fpkt->pkt_data) {
6065 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6066 		}
6067 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6068 	} else {
6069 		if (fpkt->pkt_data) {
6070 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6071 		}
6072 	}
6073 
6074 	if (nodma) {
6075 		if (fpkt->pkt_cmd) {
6076 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6077 		}
6078 		if (fpkt->pkt_resp) {
6079 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6080 		}
6081 	} else {
6082 		if (cmd_resp) {
6083 			fcp_free_cmd_resp(pptr, fpkt);
6084 		}
6085 	}
6086 
6087 	return (FC_NOMEM);
6088 }
6089 
6090 
6091 static void
6092 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6093 {
6094 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6095 
6096 	if (fpkt->pkt_data_dma) {
6097 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6098 		if (fpkt->pkt_data) {
6099 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6100 		}
6101 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6102 	} else {
6103 		if (fpkt->pkt_data) {
6104 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6105 		}
6106 		/*
6107 		 * Need we reset pkt_* to zero???
6108 		 */
6109 	}
6110 
6111 	if (icmd->ipkt_nodma) {
6112 		if (fpkt->pkt_cmd) {
6113 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6114 		}
6115 		if (fpkt->pkt_resp) {
6116 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6117 		}
6118 	} else {
6119 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6120 
6121 		fcp_free_cmd_resp(pptr, fpkt);
6122 	}
6123 }
6124 
6125 /*
6126  *     Function: fcp_lookup_target
6127  *
6128  *  Description: Finds a target given a WWN.
6129  *
6130  *     Argument: *pptr	FCP port.
6131  *		 *wwn	World Wide Name of the device to look for.
6132  *
6133  * Return Value: NULL		No target found
6134  *		 Not NULL	Target structure
6135  *
6136  *	Context: Interrupt context.
6137  *		 The mutex pptr->port_mutex must be owned.
6138  */
6139 /* ARGSUSED */
6140 static struct fcp_tgt *
6141 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6142 {
6143 	int			hash;
6144 	struct fcp_tgt	*ptgt;
6145 
6146 	ASSERT(mutex_owned(&pptr->port_mutex));
6147 
6148 	hash = FCP_HASH(wwn);
6149 
6150 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6151 	    ptgt = ptgt->tgt_next) {
6152 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6153 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6154 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6155 			break;
6156 		}
6157 	}
6158 
6159 	return (ptgt);
6160 }
6161 
6162 
6163 /*
6164  * Find target structure given a port identifier
6165  */
6166 static struct fcp_tgt *
6167 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6168 {
6169 	fc_portid_t		port_id;
6170 	la_wwn_t		pwwn;
6171 	struct fcp_tgt	*ptgt = NULL;
6172 
6173 	port_id.priv_lilp_posit = 0;
6174 	port_id.port_id = d_id;
6175 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6176 	    &pwwn) == FC_SUCCESS) {
6177 		mutex_enter(&pptr->port_mutex);
6178 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6179 		mutex_exit(&pptr->port_mutex);
6180 	}
6181 
6182 	return (ptgt);
6183 }
6184 
6185 
6186 /*
6187  * the packet completion callback routine for info cmd pkts
6188  *
6189  * this means fpkt pts to a response to either a PLOGI or a PRLI
6190  *
6191  * if there is an error an attempt is made to call a routine to resend
6192  * the command that failed
6193  */
6194 static void
6195 fcp_icmd_callback(fc_packet_t *fpkt)
6196 {
6197 	struct fcp_ipkt	*icmd;
6198 	struct fcp_port	*pptr;
6199 	struct fcp_tgt	*ptgt;
6200 	struct la_els_prli	*prli;
6201 	struct la_els_prli	prli_s;
6202 	struct fcp_prli		*fprli;
6203 	struct fcp_lun	*plun;
6204 	int		free_pkt = 1;
6205 	int		rval;
6206 	ls_code_t	resp;
6207 	uchar_t		prli_acc = 0;
6208 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6209 	int		lun0_newalloc;
6210 
6211 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6212 
6213 	/* get ptrs to the port and target structs for the cmd */
6214 	pptr = icmd->ipkt_port;
6215 	ptgt = icmd->ipkt_tgt;
6216 
6217 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6218 
6219 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6220 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6221 		    sizeof (prli_s));
6222 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6223 	}
6224 
6225 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6226 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6227 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6228 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6229 	    ptgt->tgt_d_id);
6230 
6231 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6232 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6233 
6234 		mutex_enter(&ptgt->tgt_mutex);
6235 		if (ptgt->tgt_pd_handle == NULL) {
6236 			/*
6237 			 * in a fabric environment the port device handles
6238 			 * get created only after successful LOGIN into the
6239 			 * transport, so the transport makes this port
6240 			 * device (pd) handle available in this packet, so
6241 			 * save it now
6242 			 */
6243 			ASSERT(fpkt->pkt_pd != NULL);
6244 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6245 		}
6246 		mutex_exit(&ptgt->tgt_mutex);
6247 
6248 		/* which ELS cmd is this response for ?? */
6249 		switch (icmd->ipkt_opcode) {
6250 		case LA_ELS_PLOGI:
6251 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6252 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6253 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6254 			    ptgt->tgt_d_id,
6255 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6256 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6257 
6258 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6259 			    FCP_TGT_TRACE_15);
6260 
6261 			/* Note that we are not allocating a new icmd */
6262 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6263 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6264 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6265 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6266 				    FCP_TGT_TRACE_16);
6267 				goto fail;
6268 			}
6269 			break;
6270 
6271 		case LA_ELS_PRLI:
6272 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6273 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6274 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6275 
6276 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6277 			    FCP_TGT_TRACE_17);
6278 
6279 			prli = &prli_s;
6280 
6281 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6282 			    sizeof (prli_s));
6283 
6284 			fprli = (struct fcp_prli *)prli->service_params;
6285 
6286 			mutex_enter(&ptgt->tgt_mutex);
6287 			ptgt->tgt_icap = fprli->initiator_fn;
6288 			ptgt->tgt_tcap = fprli->target_fn;
6289 			mutex_exit(&ptgt->tgt_mutex);
6290 
6291 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6292 				/*
6293 				 * this FCP device does not support target mode
6294 				 */
6295 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6296 				    FCP_TGT_TRACE_18);
6297 				goto fail;
6298 			}
6299 			if (fprli->retry == 1) {
6300 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6301 				    &ptgt->tgt_port_wwn);
6302 			}
6303 
6304 			/* target is no longer offline */
6305 			mutex_enter(&pptr->port_mutex);
6306 			mutex_enter(&ptgt->tgt_mutex);
6307 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6308 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6309 				    FCP_TGT_MARK);
6310 			} else {
6311 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6312 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6313 				    "fcp_icmd_callback,1: state change "
6314 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6315 				mutex_exit(&ptgt->tgt_mutex);
6316 				mutex_exit(&pptr->port_mutex);
6317 				goto fail;
6318 			}
6319 			mutex_exit(&ptgt->tgt_mutex);
6320 			mutex_exit(&pptr->port_mutex);
6321 
6322 			/*
6323 			 * lun 0 should always respond to inquiry, so
6324 			 * get the LUN struct for LUN 0
6325 			 *
6326 			 * Currently we deal with first level of addressing.
6327 			 * If / when we start supporting 0x device types
6328 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6329 			 * this logic will need revisiting.
6330 			 */
6331 			lun0_newalloc = 0;
6332 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6333 				/*
6334 				 * no LUN struct for LUN 0 yet exists,
6335 				 * so create one
6336 				 */
6337 				plun = fcp_alloc_lun(ptgt);
6338 				if (plun == NULL) {
6339 					fcp_log(CE_WARN, pptr->port_dip,
6340 					    "!Failed to allocate lun 0 for"
6341 					    " D_ID=%x", ptgt->tgt_d_id);
6342 					goto fail;
6343 				}
6344 				lun0_newalloc = 1;
6345 			}
6346 
6347 			/* fill in LUN info */
6348 			mutex_enter(&ptgt->tgt_mutex);
6349 			/*
6350 			 * consider lun 0 as device not connected if it is
6351 			 * offlined or newly allocated
6352 			 */
6353 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6354 			    lun0_newalloc) {
6355 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6356 			}
6357 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6358 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6359 			ptgt->tgt_lun_cnt = 1;
6360 			ptgt->tgt_report_lun_cnt = 0;
6361 			mutex_exit(&ptgt->tgt_mutex);
6362 
6363 			/* Retrieve the rscn count (if a valid one exists) */
6364 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6365 				rscn_count = ((fc_ulp_rscn_info_t *)
6366 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6367 				    ->ulp_rscn_count;
6368 			} else {
6369 				rscn_count = FC_INVALID_RSCN_COUNT;
6370 			}
6371 
6372 			/* send Report Lun request to target */
6373 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6374 			    sizeof (struct fcp_reportlun_resp),
6375 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6376 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6377 				mutex_enter(&pptr->port_mutex);
6378 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6379 					fcp_log(CE_WARN, pptr->port_dip,
6380 					    "!Failed to send REPORT LUN to"
6381 					    "  D_ID=%x", ptgt->tgt_d_id);
6382 				} else {
6383 					FCP_TRACE(fcp_logq,
6384 					    pptr->port_instbuf, fcp_trace,
6385 					    FCP_BUF_LEVEL_5, 0,
6386 					    "fcp_icmd_callback,2:state change"
6387 					    " occured for D_ID=0x%x",
6388 					    ptgt->tgt_d_id);
6389 				}
6390 				mutex_exit(&pptr->port_mutex);
6391 
6392 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6393 				    FCP_TGT_TRACE_19);
6394 
6395 				goto fail;
6396 			} else {
6397 				free_pkt = 0;
6398 				fcp_icmd_free(pptr, icmd);
6399 			}
6400 			break;
6401 
6402 		default:
6403 			fcp_log(CE_WARN, pptr->port_dip,
6404 			    "!fcp_icmd_callback Invalid opcode");
6405 			goto fail;
6406 		}
6407 
6408 		return;
6409 	}
6410 
6411 
6412 	/*
6413 	 * Other PLOGI failures are not retried as the
6414 	 * transport does it already
6415 	 */
6416 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6417 		if (fcp_is_retryable(icmd) &&
6418 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6419 
6420 			if (FCP_MUST_RETRY(fpkt)) {
6421 				fcp_queue_ipkt(pptr, fpkt);
6422 				return;
6423 			}
6424 
6425 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6426 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6427 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6428 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6429 			    fpkt->pkt_reason);
6430 
6431 			/*
6432 			 * Retry by recalling the routine that
6433 			 * originally queued this packet
6434 			 */
6435 			mutex_enter(&pptr->port_mutex);
6436 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6437 				caddr_t msg;
6438 
6439 				mutex_exit(&pptr->port_mutex);
6440 
6441 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6442 
6443 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6444 					fpkt->pkt_timeout +=
6445 					    FCP_TIMEOUT_DELTA;
6446 				}
6447 
6448 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6449 				    fpkt);
6450 				if (rval == FC_SUCCESS) {
6451 					return;
6452 				}
6453 
6454 				if (rval == FC_STATEC_BUSY ||
6455 				    rval == FC_OFFLINE) {
6456 					fcp_queue_ipkt(pptr, fpkt);
6457 					return;
6458 				}
6459 				(void) fc_ulp_error(rval, &msg);
6460 
6461 				fcp_log(CE_NOTE, pptr->port_dip,
6462 				    "!ELS 0x%x failed to d_id=0x%x;"
6463 				    " %s", icmd->ipkt_opcode,
6464 				    ptgt->tgt_d_id, msg);
6465 			} else {
6466 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6467 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6468 				    "fcp_icmd_callback,3: state change "
6469 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6470 				mutex_exit(&pptr->port_mutex);
6471 			}
6472 		}
6473 	} else {
6474 		if (fcp_is_retryable(icmd) &&
6475 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6476 			if (FCP_MUST_RETRY(fpkt)) {
6477 				fcp_queue_ipkt(pptr, fpkt);
6478 				return;
6479 			}
6480 		}
6481 		mutex_enter(&pptr->port_mutex);
6482 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6483 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6484 			mutex_exit(&pptr->port_mutex);
6485 			fcp_print_error(fpkt);
6486 		} else {
6487 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6488 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6489 			    "fcp_icmd_callback,4: state change occured"
6490 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6491 			mutex_exit(&pptr->port_mutex);
6492 		}
6493 	}
6494 
6495 fail:
6496 	if (free_pkt) {
6497 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6498 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6499 		fcp_icmd_free(pptr, icmd);
6500 	}
6501 }
6502 
6503 
6504 /*
6505  * called internally to send an info cmd using the transport
6506  *
6507  * sends either an INQ or a REPORT_LUN
6508  *
6509  * when the packet is completed fcp_scsi_callback is called
6510  */
6511 static int
6512 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6513     int lcount, int tcount, int cause, uint32_t rscn_count)
6514 {
6515 	int			nodma;
6516 	struct fcp_ipkt		*icmd;
6517 	struct fcp_tgt		*ptgt;
6518 	struct fcp_port		*pptr;
6519 	fc_frame_hdr_t		*hp;
6520 	fc_packet_t		*fpkt;
6521 	struct fcp_cmd		fcp_cmd;
6522 	struct fcp_cmd		*fcmd;
6523 	union scsi_cdb		*scsi_cdb;
6524 
6525 	ASSERT(plun != NULL);
6526 
6527 	ptgt = plun->lun_tgt;
6528 	ASSERT(ptgt != NULL);
6529 
6530 	pptr = ptgt->tgt_port;
6531 	ASSERT(pptr != NULL);
6532 
6533 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6534 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6535 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6536 
6537 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6538 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6539 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6540 	    rscn_count);
6541 
6542 	if (icmd == NULL) {
6543 		return (DDI_FAILURE);
6544 	}
6545 
6546 	fpkt = icmd->ipkt_fpkt;
6547 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6548 	icmd->ipkt_retries = 0;
6549 	icmd->ipkt_opcode = opcode;
6550 	icmd->ipkt_lun = plun;
6551 
6552 	if (nodma) {
6553 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6554 	} else {
6555 		fcmd = &fcp_cmd;
6556 	}
6557 	bzero(fcmd, sizeof (struct fcp_cmd));
6558 
6559 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6560 
6561 	hp = &fpkt->pkt_cmd_fhdr;
6562 
6563 	hp->s_id = pptr->port_id;
6564 	hp->d_id = ptgt->tgt_d_id;
6565 	hp->r_ctl = R_CTL_COMMAND;
6566 	hp->type = FC_TYPE_SCSI_FCP;
6567 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6568 	hp->rsvd = 0;
6569 	hp->seq_id = 0;
6570 	hp->seq_cnt = 0;
6571 	hp->ox_id = 0xffff;
6572 	hp->rx_id = 0xffff;
6573 	hp->ro = 0;
6574 
6575 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6576 
6577 	/*
6578 	 * Request SCSI target for expedited processing
6579 	 */
6580 
6581 	/*
6582 	 * Set up for untagged queuing because we do not
6583 	 * know if the fibre device supports queuing.
6584 	 */
6585 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6586 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6587 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6588 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6589 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6590 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6591 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6592 
6593 	switch (opcode) {
6594 	case SCMD_INQUIRY_PAGE83:
6595 		/*
6596 		 * Prepare to get the Inquiry VPD page 83 information
6597 		 */
6598 		fcmd->fcp_cntl.cntl_read_data = 1;
6599 		fcmd->fcp_cntl.cntl_write_data = 0;
6600 		fcmd->fcp_data_len = alloc_len;
6601 
6602 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6603 		fpkt->pkt_comp = fcp_scsi_callback;
6604 
6605 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6606 		scsi_cdb->g0_addr2 = 0x01;
6607 		scsi_cdb->g0_addr1 = 0x83;
6608 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6609 		break;
6610 
6611 	case SCMD_INQUIRY:
6612 		fcmd->fcp_cntl.cntl_read_data = 1;
6613 		fcmd->fcp_cntl.cntl_write_data = 0;
6614 		fcmd->fcp_data_len = alloc_len;
6615 
6616 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6617 		fpkt->pkt_comp = fcp_scsi_callback;
6618 
6619 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6620 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6621 		break;
6622 
6623 	case SCMD_REPORT_LUN: {
6624 		fc_portid_t	d_id;
6625 		opaque_t	fca_dev;
6626 
6627 		ASSERT(alloc_len >= 16);
6628 
6629 		d_id.priv_lilp_posit = 0;
6630 		d_id.port_id = ptgt->tgt_d_id;
6631 
6632 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6633 
6634 		mutex_enter(&ptgt->tgt_mutex);
6635 		ptgt->tgt_fca_dev = fca_dev;
6636 		mutex_exit(&ptgt->tgt_mutex);
6637 
6638 		fcmd->fcp_cntl.cntl_read_data = 1;
6639 		fcmd->fcp_cntl.cntl_write_data = 0;
6640 		fcmd->fcp_data_len = alloc_len;
6641 
6642 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6643 		fpkt->pkt_comp = fcp_scsi_callback;
6644 
6645 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6646 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6647 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6648 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6649 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6650 		break;
6651 	}
6652 
6653 	default:
6654 		fcp_log(CE_WARN, pptr->port_dip,
6655 		    "!fcp_send_scsi Invalid opcode");
6656 		break;
6657 	}
6658 
6659 	if (!nodma) {
6660 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6661 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6662 	}
6663 
6664 	mutex_enter(&pptr->port_mutex);
6665 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6666 
6667 		mutex_exit(&pptr->port_mutex);
6668 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6669 		    FC_SUCCESS) {
6670 			fcp_icmd_free(pptr, icmd);
6671 			return (DDI_FAILURE);
6672 		}
6673 		return (DDI_SUCCESS);
6674 	} else {
6675 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6676 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6677 		    "fcp_send_scsi,1: state change occured"
6678 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6679 		mutex_exit(&pptr->port_mutex);
6680 		fcp_icmd_free(pptr, icmd);
6681 		return (DDI_FAILURE);
6682 	}
6683 }
6684 
6685 
6686 /*
6687  * called by fcp_scsi_callback to check to handle the case where
6688  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6689  */
6690 static int
6691 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6692 {
6693 	uchar_t				rqlen;
6694 	int				rval = DDI_FAILURE;
6695 	struct scsi_extended_sense	sense_info, *sense;
6696 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6697 	    fpkt->pkt_ulp_private;
6698 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6699 	struct fcp_port		*pptr = ptgt->tgt_port;
6700 
6701 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6702 
6703 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6704 		/*
6705 		 * SCSI-II Reserve Release support. Some older FC drives return
6706 		 * Reservation conflict for Report Luns command.
6707 		 */
6708 		if (icmd->ipkt_nodma) {
6709 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6710 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6711 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6712 		} else {
6713 			fcp_rsp_t	new_resp;
6714 
6715 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6716 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6717 
6718 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6719 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6720 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6721 
6722 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6723 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6724 		}
6725 
6726 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6727 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6728 
6729 		return (DDI_SUCCESS);
6730 	}
6731 
6732 	sense = &sense_info;
6733 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6734 		/* no need to continue if sense length is not set */
6735 		return (rval);
6736 	}
6737 
6738 	/* casting 64-bit integer to 8-bit */
6739 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6740 	    sizeof (struct scsi_extended_sense));
6741 
6742 	if (rqlen < 14) {
6743 		/* no need to continue if request length isn't long enough */
6744 		return (rval);
6745 	}
6746 
6747 	if (icmd->ipkt_nodma) {
6748 		/*
6749 		 * We can safely use fcp_response_len here since the
6750 		 * only path that calls fcp_check_reportlun,
6751 		 * fcp_scsi_callback, has already called
6752 		 * fcp_validate_fcp_response.
6753 		 */
6754 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6755 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6756 	} else {
6757 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6758 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6759 		    sizeof (struct scsi_extended_sense));
6760 	}
6761 
6762 	if (!FCP_SENSE_NO_LUN(sense)) {
6763 		mutex_enter(&ptgt->tgt_mutex);
6764 		/* clear the flag if any */
6765 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6766 		mutex_exit(&ptgt->tgt_mutex);
6767 	}
6768 
6769 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6770 	    (sense->es_add_code == 0x20)) {
6771 		if (icmd->ipkt_nodma) {
6772 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6773 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6774 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6775 		} else {
6776 			fcp_rsp_t	new_resp;
6777 
6778 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6779 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6780 
6781 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6782 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6783 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6784 
6785 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6786 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6787 		}
6788 
6789 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6790 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6791 
6792 		return (DDI_SUCCESS);
6793 	}
6794 
6795 	/*
6796 	 * This is for the STK library which returns a check condition,
6797 	 * to indicate device is not ready, manual assistance needed.
6798 	 * This is to a report lun command when the door is open.
6799 	 */
6800 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6801 		if (icmd->ipkt_nodma) {
6802 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6803 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6804 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6805 		} else {
6806 			fcp_rsp_t	new_resp;
6807 
6808 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6809 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6810 
6811 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6812 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6813 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6814 
6815 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6816 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6817 		}
6818 
6819 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6820 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6821 
6822 		return (DDI_SUCCESS);
6823 	}
6824 
6825 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6826 	    (FCP_SENSE_NO_LUN(sense))) {
6827 		mutex_enter(&ptgt->tgt_mutex);
6828 		if ((FCP_SENSE_NO_LUN(sense)) &&
6829 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6830 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6831 			mutex_exit(&ptgt->tgt_mutex);
6832 			/*
6833 			 * reconfig was triggred by ILLEGAL REQUEST but
6834 			 * got ILLEGAL REQUEST again
6835 			 */
6836 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6837 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6838 			    "!FCP: Unable to obtain Report Lun data"
6839 			    " target=%x", ptgt->tgt_d_id);
6840 		} else {
6841 			if (ptgt->tgt_tid == NULL) {
6842 				timeout_id_t	tid;
6843 				/*
6844 				 * REPORT LUN data has changed.	 Kick off
6845 				 * rediscovery
6846 				 */
6847 				tid = timeout(fcp_reconfigure_luns,
6848 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6849 
6850 				ptgt->tgt_tid = tid;
6851 				ptgt->tgt_state |= FCP_TGT_BUSY;
6852 			}
6853 			if (FCP_SENSE_NO_LUN(sense)) {
6854 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6855 			}
6856 			mutex_exit(&ptgt->tgt_mutex);
6857 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6858 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6859 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6860 				    "!FCP:Report Lun Has Changed"
6861 				    " target=%x", ptgt->tgt_d_id);
6862 			} else if (FCP_SENSE_NO_LUN(sense)) {
6863 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6864 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6865 				    "!FCP:LU Not Supported"
6866 				    " target=%x", ptgt->tgt_d_id);
6867 			}
6868 		}
6869 		rval = DDI_SUCCESS;
6870 	}
6871 
6872 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6873 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6874 	    "D_ID=%x, sense=%x, status=%x",
6875 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6876 	    rsp->fcp_u.fcp_status.scsi_status);
6877 
6878 	return (rval);
6879 }
6880 
6881 /*
6882  *     Function: fcp_scsi_callback
6883  *
6884  *  Description: This is the callback routine set by fcp_send_scsi() after
6885  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6886  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6887  *		 INQUIRY_PAGE83.
6888  *
6889  *     Argument: *fpkt	 FC packet used to convey the command
6890  *
6891  * Return Value: None
6892  */
6893 static void
6894 fcp_scsi_callback(fc_packet_t *fpkt)
6895 {
6896 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6897 	    fpkt->pkt_ulp_private;
6898 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6899 	struct fcp_port	*pptr;
6900 	struct fcp_tgt	*ptgt;
6901 	struct fcp_lun	*plun;
6902 	struct fcp_rsp		response, *rsp;
6903 
6904 	ptgt = icmd->ipkt_tgt;
6905 	pptr = ptgt->tgt_port;
6906 	plun = icmd->ipkt_lun;
6907 
6908 	if (icmd->ipkt_nodma) {
6909 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6910 	} else {
6911 		rsp = &response;
6912 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6913 		    sizeof (struct fcp_rsp));
6914 	}
6915 
6916 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6917 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6918 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6919 	    "status=%x, lun num=%x",
6920 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6921 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6922 
6923 	/*
6924 	 * Pre-init LUN GUID with NWWN if it is not a device that
6925 	 * supports multiple luns and we know it's not page83
6926 	 * compliant.  Although using a NWWN is not lun unique,
6927 	 * we will be fine since there is only one lun behind the taget
6928 	 * in this case.
6929 	 */
6930 	if ((plun->lun_guid_size == 0) &&
6931 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6932 	    (fcp_symmetric_device_probe(plun) == 0)) {
6933 
6934 		char ascii_wwn[FC_WWN_SIZE*2+1];
6935 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6936 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6937 	}
6938 
6939 	/*
6940 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6941 	 * when thay have more data than what is asked in CDB. An overrun
6942 	 * is really when FCP_DL is smaller than the data length in CDB.
6943 	 * In the case here we know that REPORT LUN command we formed within
6944 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6945 	 * behavior. In reality this is FC_SUCCESS.
6946 	 */
6947 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6948 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6949 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6950 		fpkt->pkt_state = FC_PKT_SUCCESS;
6951 	}
6952 
6953 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6954 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6955 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6956 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6957 		    ptgt->tgt_d_id);
6958 
6959 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6960 			/*
6961 			 * Inquiry VPD page command on A5K SES devices would
6962 			 * result in data CRC errors.
6963 			 */
6964 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6965 				(void) fcp_handle_page83(fpkt, icmd, 1);
6966 				return;
6967 			}
6968 		}
6969 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6970 		    FCP_MUST_RETRY(fpkt)) {
6971 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6972 			fcp_retry_scsi_cmd(fpkt);
6973 			return;
6974 		}
6975 
6976 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6977 		    FCP_TGT_TRACE_20);
6978 
6979 		mutex_enter(&pptr->port_mutex);
6980 		mutex_enter(&ptgt->tgt_mutex);
6981 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6982 			mutex_exit(&ptgt->tgt_mutex);
6983 			mutex_exit(&pptr->port_mutex);
6984 			fcp_print_error(fpkt);
6985 		} else {
6986 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6987 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6988 			    "fcp_scsi_callback,1: state change occured"
6989 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6990 			mutex_exit(&ptgt->tgt_mutex);
6991 			mutex_exit(&pptr->port_mutex);
6992 		}
6993 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6994 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6995 		fcp_icmd_free(pptr, icmd);
6996 		return;
6997 	}
6998 
6999 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7000 
7001 	mutex_enter(&pptr->port_mutex);
7002 	mutex_enter(&ptgt->tgt_mutex);
7003 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7004 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7005 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7006 		    "fcp_scsi_callback,2: state change occured"
7007 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7008 		mutex_exit(&ptgt->tgt_mutex);
7009 		mutex_exit(&pptr->port_mutex);
7010 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7011 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7012 		fcp_icmd_free(pptr, icmd);
7013 		return;
7014 	}
7015 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7016 
7017 	mutex_exit(&ptgt->tgt_mutex);
7018 	mutex_exit(&pptr->port_mutex);
7019 
7020 	if (icmd->ipkt_nodma) {
7021 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7022 		    sizeof (struct fcp_rsp));
7023 	} else {
7024 		bep = &fcp_rsp_err;
7025 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7026 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7027 	}
7028 
7029 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7030 		fcp_retry_scsi_cmd(fpkt);
7031 		return;
7032 	}
7033 
7034 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7035 	    FCP_NO_FAILURE) {
7036 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7037 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7038 		    "rsp_code=0x%x, rsp_len_set=0x%x",
7039 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7040 		fcp_retry_scsi_cmd(fpkt);
7041 		return;
7042 	}
7043 
7044 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7045 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7046 		fcp_queue_ipkt(pptr, fpkt);
7047 		return;
7048 	}
7049 
7050 	/*
7051 	 * Devices that do not support INQUIRY_PAGE83, return check condition
7052 	 * with illegal request as per SCSI spec.
7053 	 * Crossbridge is one such device and Daktari's SES node is another.
7054 	 * We want to ideally enumerate these devices as a non-mpxio devices.
7055 	 * SES nodes (Daktari only currently) are an exception to this.
7056 	 */
7057 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7058 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7059 
7060 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7061 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
7062 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7063 		    "check condition. May enumerate as non-mpxio device",
7064 		    ptgt->tgt_d_id, plun->lun_type);
7065 
7066 		/*
7067 		 * If we let Daktari's SES be enumerated as a non-mpxio
7068 		 * device, there will be a discrepency in that the other
7069 		 * internal FC disks will get enumerated as mpxio devices.
7070 		 * Applications like luxadm expect this to be consistent.
7071 		 *
7072 		 * So, we put in a hack here to check if this is an SES device
7073 		 * and handle it here.
7074 		 */
7075 		if (plun->lun_type == DTYPE_ESI) {
7076 			/*
7077 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7078 			 * at this stage, we fake a failure here so that
7079 			 * fcp_handle_page83 will create a device path using
7080 			 * the WWN instead of the GUID which is not there anyway
7081 			 */
7082 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7083 			(void) fcp_handle_page83(fpkt, icmd, 1);
7084 			return;
7085 		}
7086 
7087 		mutex_enter(&ptgt->tgt_mutex);
7088 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7089 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7090 		mutex_exit(&ptgt->tgt_mutex);
7091 
7092 		(void) fcp_call_finish_init(pptr, ptgt,
7093 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7094 		    icmd->ipkt_cause);
7095 		fcp_icmd_free(pptr, icmd);
7096 		return;
7097 	}
7098 
7099 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7100 		int rval = DDI_FAILURE;
7101 
7102 		/*
7103 		 * handle cases where report lun isn't supported
7104 		 * by faking up our own REPORT_LUN response or
7105 		 * UNIT ATTENTION
7106 		 */
7107 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7108 			rval = fcp_check_reportlun(rsp, fpkt);
7109 
7110 			/*
7111 			 * fcp_check_reportlun might have modified the
7112 			 * FCP response. Copy it in again to get an updated
7113 			 * FCP response
7114 			 */
7115 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7116 				rsp = &response;
7117 
7118 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7119 				    fpkt->pkt_resp_acc,
7120 				    sizeof (struct fcp_rsp));
7121 			}
7122 		}
7123 
7124 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7125 			if (rval == DDI_SUCCESS) {
7126 				(void) fcp_call_finish_init(pptr, ptgt,
7127 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7128 				    icmd->ipkt_cause);
7129 				fcp_icmd_free(pptr, icmd);
7130 			} else {
7131 				fcp_retry_scsi_cmd(fpkt);
7132 			}
7133 
7134 			return;
7135 		}
7136 	} else {
7137 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7138 			mutex_enter(&ptgt->tgt_mutex);
7139 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7140 			mutex_exit(&ptgt->tgt_mutex);
7141 		}
7142 	}
7143 
7144 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7145 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7146 		(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7147 		    DDI_DMA_SYNC_FORCPU);
7148 	}
7149 
7150 	switch (icmd->ipkt_opcode) {
7151 	case SCMD_INQUIRY:
7152 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7153 		fcp_handle_inquiry(fpkt, icmd);
7154 		break;
7155 
7156 	case SCMD_REPORT_LUN:
7157 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7158 		    FCP_TGT_TRACE_22);
7159 		fcp_handle_reportlun(fpkt, icmd);
7160 		break;
7161 
7162 	case SCMD_INQUIRY_PAGE83:
7163 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7164 		(void) fcp_handle_page83(fpkt, icmd, 0);
7165 		break;
7166 
7167 	default:
7168 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7169 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7170 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7171 		fcp_icmd_free(pptr, icmd);
7172 		break;
7173 	}
7174 }
7175 
7176 
7177 static void
7178 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7179 {
7180 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7181 	    fpkt->pkt_ulp_private;
7182 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7183 	struct fcp_port	*pptr = ptgt->tgt_port;
7184 
7185 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7186 	    fcp_is_retryable(icmd)) {
7187 		mutex_enter(&pptr->port_mutex);
7188 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7189 			mutex_exit(&pptr->port_mutex);
7190 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7191 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7192 			    "Retrying %s to %x; state=%x, reason=%x",
7193 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7194 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7195 			    fpkt->pkt_state, fpkt->pkt_reason);
7196 
7197 			fcp_queue_ipkt(pptr, fpkt);
7198 		} else {
7199 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7200 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7201 			    "fcp_retry_scsi_cmd,1: state change occured"
7202 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7203 			mutex_exit(&pptr->port_mutex);
7204 			(void) fcp_call_finish_init(pptr, ptgt,
7205 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7206 			    icmd->ipkt_cause);
7207 			fcp_icmd_free(pptr, icmd);
7208 		}
7209 	} else {
7210 		fcp_print_error(fpkt);
7211 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7212 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7213 		fcp_icmd_free(pptr, icmd);
7214 	}
7215 }
7216 
7217 /*
7218  *     Function: fcp_handle_page83
7219  *
7220  *  Description: Treats the response to INQUIRY_PAGE83.
7221  *
7222  *     Argument: *fpkt	FC packet used to convey the command.
7223  *		 *icmd	Original fcp_ipkt structure.
7224  *		 ignore_page83_data
7225  *			if it's 1, that means it's a special devices's
7226  *			page83 response, it should be enumerated under mpxio
7227  *
7228  * Return Value: None
7229  */
7230 static void
7231 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7232     int ignore_page83_data)
7233 {
7234 	struct fcp_port	*pptr;
7235 	struct fcp_lun	*plun;
7236 	struct fcp_tgt	*ptgt;
7237 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7238 	int			fail = 0;
7239 	ddi_devid_t		devid;
7240 	char			*guid = NULL;
7241 	int			ret;
7242 
7243 	ASSERT(icmd != NULL && fpkt != NULL);
7244 
7245 	pptr = icmd->ipkt_port;
7246 	ptgt = icmd->ipkt_tgt;
7247 	plun = icmd->ipkt_lun;
7248 
7249 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7250 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7251 
7252 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7253 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7254 
7255 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7256 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7257 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7258 		    "dtype=0x%x, lun num=%x",
7259 		    pptr->port_instance, ptgt->tgt_d_id,
7260 		    dev_id_page[0], plun->lun_num);
7261 
7262 		ret = ddi_devid_scsi_encode(
7263 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7264 		    NULL,		/* driver name */
7265 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7266 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7267 		    NULL,		/* page 80 data */
7268 		    0,		/* page 80 len */
7269 		    dev_id_page,	/* page 83 data */
7270 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7271 		    &devid);
7272 
7273 		if (ret == DDI_SUCCESS) {
7274 
7275 			guid = ddi_devid_to_guid(devid);
7276 
7277 			if (guid) {
7278 				/*
7279 				 * Check our current guid.  If it's non null
7280 				 * and it has changed, we need to copy it into
7281 				 * lun_old_guid since we might still need it.
7282 				 */
7283 				if (plun->lun_guid &&
7284 				    strcmp(guid, plun->lun_guid)) {
7285 					unsigned int len;
7286 
7287 					/*
7288 					 * If the guid of the LUN changes,
7289 					 * reconfiguration should be triggered
7290 					 * to reflect the changes.
7291 					 * i.e. we should offline the LUN with
7292 					 * the old guid, and online the LUN with
7293 					 * the new guid.
7294 					 */
7295 					plun->lun_state |= FCP_LUN_CHANGED;
7296 
7297 					if (plun->lun_old_guid) {
7298 						kmem_free(plun->lun_old_guid,
7299 						    plun->lun_old_guid_size);
7300 					}
7301 
7302 					len = plun->lun_guid_size;
7303 					plun->lun_old_guid_size = len;
7304 
7305 					plun->lun_old_guid = kmem_zalloc(len,
7306 					    KM_NOSLEEP);
7307 
7308 					if (plun->lun_old_guid) {
7309 						/*
7310 						 * The alloc was successful then
7311 						 * let's do the copy.
7312 						 */
7313 						bcopy(plun->lun_guid,
7314 						    plun->lun_old_guid, len);
7315 					} else {
7316 						fail = 1;
7317 						plun->lun_old_guid_size = 0;
7318 					}
7319 				}
7320 				if (!fail) {
7321 					if (fcp_copy_guid_2_lun_block(
7322 					    plun, guid)) {
7323 						fail = 1;
7324 					}
7325 				}
7326 				ddi_devid_free_guid(guid);
7327 
7328 			} else {
7329 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7330 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7331 				    "fcp_handle_page83: unable to create "
7332 				    "GUID");
7333 
7334 				/* couldn't create good guid from devid */
7335 				fail = 1;
7336 			}
7337 			ddi_devid_free(devid);
7338 
7339 		} else if (ret == DDI_NOT_WELL_FORMED) {
7340 			/* NULL filled data for page 83 */
7341 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7342 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7343 			    "fcp_handle_page83: retry GUID");
7344 
7345 			icmd->ipkt_retries = 0;
7346 			fcp_retry_scsi_cmd(fpkt);
7347 			return;
7348 		} else {
7349 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7350 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7351 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7352 			    ret);
7353 			/*
7354 			 * Since the page83 validation
7355 			 * introduced late, we are being
7356 			 * tolerant to the existing devices
7357 			 * that already found to be working
7358 			 * under mpxio, like A5200's SES device,
7359 			 * its page83 response will not be standard-compliant,
7360 			 * but we still want it to be enumerated under mpxio.
7361 			 */
7362 			if (fcp_symmetric_device_probe(plun) != 0) {
7363 				fail = 1;
7364 			}
7365 		}
7366 
7367 	} else {
7368 		/* bad packet state */
7369 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7370 
7371 		/*
7372 		 * For some special devices (A5K SES and Daktari's SES devices),
7373 		 * they should be enumerated under mpxio
7374 		 * or "luxadm dis" will fail
7375 		 */
7376 		if (ignore_page83_data) {
7377 			fail = 0;
7378 		} else {
7379 			fail = 1;
7380 		}
7381 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7382 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7383 		    "!Devid page cmd failed. "
7384 		    "fpkt_state: %x fpkt_reason: %x",
7385 		    "ignore_page83: %d",
7386 		    fpkt->pkt_state, fpkt->pkt_reason,
7387 		    ignore_page83_data);
7388 	}
7389 
7390 	mutex_enter(&pptr->port_mutex);
7391 	mutex_enter(&plun->lun_mutex);
7392 	/*
7393 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7394 	 * mismatch between lun_cip and lun_mpxio.
7395 	 */
7396 	if (plun->lun_cip == NULL) {
7397 		/*
7398 		 * If we don't have a guid for this lun it's because we were
7399 		 * unable to glean one from the page 83 response.  Set the
7400 		 * control flag to 0 here to make sure that we don't attempt to
7401 		 * enumerate it under mpxio.
7402 		 */
7403 		if (fail || pptr->port_mpxio == 0) {
7404 			plun->lun_mpxio = 0;
7405 		} else {
7406 			plun->lun_mpxio = 1;
7407 		}
7408 	}
7409 	mutex_exit(&plun->lun_mutex);
7410 	mutex_exit(&pptr->port_mutex);
7411 
7412 	mutex_enter(&ptgt->tgt_mutex);
7413 	plun->lun_state &=
7414 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7415 	mutex_exit(&ptgt->tgt_mutex);
7416 
7417 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7418 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7419 
7420 	fcp_icmd_free(pptr, icmd);
7421 }
7422 
7423 /*
7424  *     Function: fcp_handle_inquiry
7425  *
7426  *  Description: Called by fcp_scsi_callback to handle the response to an
7427  *		 INQUIRY request.
7428  *
7429  *     Argument: *fpkt	FC packet used to convey the command.
7430  *		 *icmd	Original fcp_ipkt structure.
7431  *
7432  * Return Value: None
7433  */
7434 static void
7435 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7436 {
7437 	struct fcp_port	*pptr;
7438 	struct fcp_lun	*plun;
7439 	struct fcp_tgt	*ptgt;
7440 	uchar_t		dtype;
7441 	uchar_t		pqual;
7442 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7443 
7444 	ASSERT(icmd != NULL && fpkt != NULL);
7445 
7446 	pptr = icmd->ipkt_port;
7447 	ptgt = icmd->ipkt_tgt;
7448 	plun = icmd->ipkt_lun;
7449 
7450 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7451 	    sizeof (struct scsi_inquiry));
7452 
7453 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7454 	pqual = plun->lun_inq.inq_dtype >> 5;
7455 
7456 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7457 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7458 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7459 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7460 	    plun->lun_num, dtype, pqual);
7461 
7462 	if (pqual != 0) {
7463 		/*
7464 		 * Non-zero peripheral qualifier
7465 		 */
7466 		fcp_log(CE_CONT, pptr->port_dip,
7467 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7468 		    "Device type=0x%x Peripheral qual=0x%x\n",
7469 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7470 
7471 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7472 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7473 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7474 		    "Device type=0x%x Peripheral qual=0x%x\n",
7475 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7476 
7477 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7478 
7479 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7480 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7481 		fcp_icmd_free(pptr, icmd);
7482 		return;
7483 	}
7484 
7485 	/*
7486 	 * If the device is already initialized, check the dtype
7487 	 * for a change. If it has changed then update the flags
7488 	 * so the create_luns will offline the old device and
7489 	 * create the new device. Refer to bug: 4764752
7490 	 */
7491 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7492 		plun->lun_state |= FCP_LUN_CHANGED;
7493 	}
7494 	plun->lun_type = plun->lun_inq.inq_dtype;
7495 
7496 	/*
7497 	 * This code is setting/initializing the throttling in the FCA
7498 	 * driver.
7499 	 */
7500 	mutex_enter(&pptr->port_mutex);
7501 	if (!pptr->port_notify) {
7502 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7503 			uint32_t cmd = 0;
7504 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7505 			    ((cmd & 0xFFFFFF00 >> 8) |
7506 			    FCP_SVE_THROTTLE << 8));
7507 			pptr->port_notify = 1;
7508 			mutex_exit(&pptr->port_mutex);
7509 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7510 			mutex_enter(&pptr->port_mutex);
7511 		}
7512 	}
7513 
7514 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7515 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7516 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7517 		    "fcp_handle_inquiry,1:state change occured"
7518 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7519 		mutex_exit(&pptr->port_mutex);
7520 
7521 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7522 		(void) fcp_call_finish_init(pptr, ptgt,
7523 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7524 		    icmd->ipkt_cause);
7525 		fcp_icmd_free(pptr, icmd);
7526 		return;
7527 	}
7528 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7529 	mutex_exit(&pptr->port_mutex);
7530 
7531 	/* Retrieve the rscn count (if a valid one exists) */
7532 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7533 		rscn_count = ((fc_ulp_rscn_info_t *)
7534 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7535 	} else {
7536 		rscn_count = FC_INVALID_RSCN_COUNT;
7537 	}
7538 
7539 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7540 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7541 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7542 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7543 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7544 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7545 		(void) fcp_call_finish_init(pptr, ptgt,
7546 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7547 		    icmd->ipkt_cause);
7548 	}
7549 
7550 	/*
7551 	 * Read Inquiry VPD Page 0x83 to uniquely
7552 	 * identify this logical unit.
7553 	 */
7554 	fcp_icmd_free(pptr, icmd);
7555 }
7556 
7557 /*
7558  *     Function: fcp_handle_reportlun
7559  *
7560  *  Description: Called by fcp_scsi_callback to handle the response to a
7561  *		 REPORT_LUN request.
7562  *
7563  *     Argument: *fpkt	FC packet used to convey the command.
7564  *		 *icmd	Original fcp_ipkt structure.
7565  *
7566  * Return Value: None
7567  */
7568 static void
7569 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7570 {
7571 	int				i;
7572 	int				nluns_claimed;
7573 	int				nluns_bufmax;
7574 	int				len;
7575 	uint16_t			lun_num;
7576 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7577 	struct fcp_port			*pptr;
7578 	struct fcp_tgt			*ptgt;
7579 	struct fcp_lun			*plun;
7580 	struct fcp_reportlun_resp	*report_lun;
7581 
7582 	pptr = icmd->ipkt_port;
7583 	ptgt = icmd->ipkt_tgt;
7584 	len = fpkt->pkt_datalen;
7585 
7586 	if ((len < FCP_LUN_HEADER) ||
7587 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7588 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7589 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7590 		fcp_icmd_free(pptr, icmd);
7591 		return;
7592 	}
7593 
7594 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7595 	    fpkt->pkt_datalen);
7596 
7597 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7598 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7599 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7600 	    pptr->port_instance, ptgt->tgt_d_id);
7601 
7602 	/*
7603 	 * Get the number of luns (which is supplied as LUNS * 8) the
7604 	 * device claims it has.
7605 	 */
7606 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7607 
7608 	/*
7609 	 * Get the maximum number of luns the buffer submitted can hold.
7610 	 */
7611 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7612 
7613 	/*
7614 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7615 	 */
7616 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7617 		kmem_free(report_lun, len);
7618 
7619 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7620 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7621 		    ptgt->tgt_d_id);
7622 
7623 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7624 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7625 		fcp_icmd_free(pptr, icmd);
7626 		return;
7627 	}
7628 
7629 	/*
7630 	 * If there are more LUNs than we have allocated memory for,
7631 	 * allocate more space and send down yet another report lun if
7632 	 * the maximum number of attempts hasn't been reached.
7633 	 */
7634 	mutex_enter(&ptgt->tgt_mutex);
7635 
7636 	if ((nluns_claimed > nluns_bufmax) &&
7637 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7638 
7639 		struct fcp_lun *plun;
7640 
7641 		ptgt->tgt_report_lun_cnt++;
7642 		plun = ptgt->tgt_lun;
7643 		ASSERT(plun != NULL);
7644 		mutex_exit(&ptgt->tgt_mutex);
7645 
7646 		kmem_free(report_lun, len);
7647 
7648 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7649 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7650 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7651 		    nluns_claimed, ptgt->tgt_d_id);
7652 
7653 		/* Retrieve the rscn count (if a valid one exists) */
7654 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7655 			rscn_count = ((fc_ulp_rscn_info_t *)
7656 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7657 			    ulp_rscn_count;
7658 		} else {
7659 			rscn_count = FC_INVALID_RSCN_COUNT;
7660 		}
7661 
7662 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7663 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7664 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7665 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7666 			(void) fcp_call_finish_init(pptr, ptgt,
7667 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7668 			    icmd->ipkt_cause);
7669 		}
7670 
7671 		fcp_icmd_free(pptr, icmd);
7672 		return;
7673 	}
7674 
7675 	if (nluns_claimed > nluns_bufmax) {
7676 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7677 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7678 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7679 		    "	 Number of LUNs lost=%x",
7680 		    ptgt->tgt_port_wwn.raw_wwn[0],
7681 		    ptgt->tgt_port_wwn.raw_wwn[1],
7682 		    ptgt->tgt_port_wwn.raw_wwn[2],
7683 		    ptgt->tgt_port_wwn.raw_wwn[3],
7684 		    ptgt->tgt_port_wwn.raw_wwn[4],
7685 		    ptgt->tgt_port_wwn.raw_wwn[5],
7686 		    ptgt->tgt_port_wwn.raw_wwn[6],
7687 		    ptgt->tgt_port_wwn.raw_wwn[7],
7688 		    nluns_claimed - nluns_bufmax);
7689 
7690 		nluns_claimed = nluns_bufmax;
7691 	}
7692 	ptgt->tgt_lun_cnt = nluns_claimed;
7693 
7694 	/*
7695 	 * Identify missing LUNs and print warning messages
7696 	 */
7697 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7698 		int offline;
7699 		int exists = 0;
7700 
7701 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7702 
7703 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7704 			uchar_t		*lun_string;
7705 
7706 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7707 
7708 			switch (lun_string[0] & 0xC0) {
7709 			case FCP_LUN_ADDRESSING:
7710 			case FCP_PD_ADDRESSING:
7711 			case FCP_VOLUME_ADDRESSING:
7712 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7713 				    lun_string[1];
7714 				if (plun->lun_num == lun_num) {
7715 					exists++;
7716 					break;
7717 				}
7718 				break;
7719 
7720 			default:
7721 				break;
7722 			}
7723 		}
7724 
7725 		if (!exists && !offline) {
7726 			mutex_exit(&ptgt->tgt_mutex);
7727 
7728 			mutex_enter(&pptr->port_mutex);
7729 			mutex_enter(&ptgt->tgt_mutex);
7730 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7731 				/*
7732 				 * set disappear flag when device was connected
7733 				 */
7734 				if (!(plun->lun_state &
7735 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7736 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7737 				}
7738 				mutex_exit(&ptgt->tgt_mutex);
7739 				mutex_exit(&pptr->port_mutex);
7740 				if (!(plun->lun_state &
7741 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7742 					fcp_log(CE_NOTE, pptr->port_dip,
7743 					    "!Lun=%x for target=%x disappeared",
7744 					    plun->lun_num, ptgt->tgt_d_id);
7745 				}
7746 				mutex_enter(&ptgt->tgt_mutex);
7747 			} else {
7748 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7749 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7750 				    "fcp_handle_reportlun,1: state change"
7751 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7752 				mutex_exit(&ptgt->tgt_mutex);
7753 				mutex_exit(&pptr->port_mutex);
7754 				kmem_free(report_lun, len);
7755 				(void) fcp_call_finish_init(pptr, ptgt,
7756 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7757 				    icmd->ipkt_cause);
7758 				fcp_icmd_free(pptr, icmd);
7759 				return;
7760 			}
7761 		} else if (exists) {
7762 			/*
7763 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7764 			 * actually exists in REPORT_LUN response
7765 			 */
7766 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7767 				plun->lun_state &=
7768 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7769 			}
7770 			if (offline || plun->lun_num == 0) {
7771 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7772 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7773 					mutex_exit(&ptgt->tgt_mutex);
7774 					fcp_log(CE_NOTE, pptr->port_dip,
7775 					    "!Lun=%x for target=%x reappeared",
7776 					    plun->lun_num, ptgt->tgt_d_id);
7777 					mutex_enter(&ptgt->tgt_mutex);
7778 				}
7779 			}
7780 		}
7781 	}
7782 
7783 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7784 	mutex_exit(&ptgt->tgt_mutex);
7785 
7786 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7787 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7788 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7789 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7790 
7791 	/* scan each lun */
7792 	for (i = 0; i < nluns_claimed; i++) {
7793 		uchar_t	*lun_string;
7794 
7795 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7796 
7797 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7798 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7799 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7800 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7801 		    lun_string[0]);
7802 
7803 		switch (lun_string[0] & 0xC0) {
7804 		case FCP_LUN_ADDRESSING:
7805 		case FCP_PD_ADDRESSING:
7806 		case FCP_VOLUME_ADDRESSING:
7807 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7808 
7809 			/* We will skip masked LUNs because of the blacklist. */
7810 			if (fcp_lun_blacklist != NULL) {
7811 				mutex_enter(&ptgt->tgt_mutex);
7812 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7813 				    lun_num) == TRUE) {
7814 					ptgt->tgt_lun_cnt--;
7815 					mutex_exit(&ptgt->tgt_mutex);
7816 					break;
7817 				}
7818 				mutex_exit(&ptgt->tgt_mutex);
7819 			}
7820 
7821 			/* see if this LUN is already allocated */
7822 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7823 				plun = fcp_alloc_lun(ptgt);
7824 				if (plun == NULL) {
7825 					fcp_log(CE_NOTE, pptr->port_dip,
7826 					    "!Lun allocation failed"
7827 					    " target=%x lun=%x",
7828 					    ptgt->tgt_d_id, lun_num);
7829 					break;
7830 				}
7831 			}
7832 
7833 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7834 			/* convert to LUN */
7835 			plun->lun_addr.ent_addr_0 =
7836 			    BE_16(*(uint16_t *)&(lun_string[0]));
7837 			plun->lun_addr.ent_addr_1 =
7838 			    BE_16(*(uint16_t *)&(lun_string[2]));
7839 			plun->lun_addr.ent_addr_2 =
7840 			    BE_16(*(uint16_t *)&(lun_string[4]));
7841 			plun->lun_addr.ent_addr_3 =
7842 			    BE_16(*(uint16_t *)&(lun_string[6]));
7843 
7844 			plun->lun_num = lun_num;
7845 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7846 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7847 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7848 
7849 			/* Retrieve the rscn count (if a valid one exists) */
7850 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7851 				rscn_count = ((fc_ulp_rscn_info_t *)
7852 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7853 				    ulp_rscn_count;
7854 			} else {
7855 				rscn_count = FC_INVALID_RSCN_COUNT;
7856 			}
7857 
7858 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7859 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7860 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7861 				mutex_enter(&pptr->port_mutex);
7862 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7863 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7864 					fcp_log(CE_NOTE, pptr->port_dip,
7865 					    "!failed to send INQUIRY"
7866 					    " target=%x lun=%x",
7867 					    ptgt->tgt_d_id, plun->lun_num);
7868 				} else {
7869 					FCP_TRACE(fcp_logq,
7870 					    pptr->port_instbuf, fcp_trace,
7871 					    FCP_BUF_LEVEL_5, 0,
7872 					    "fcp_handle_reportlun,2: state"
7873 					    " change occured for D_ID=0x%x",
7874 					    ptgt->tgt_d_id);
7875 				}
7876 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7877 				mutex_exit(&pptr->port_mutex);
7878 			} else {
7879 				continue;
7880 			}
7881 			break;
7882 
7883 		default:
7884 			fcp_log(CE_WARN, NULL,
7885 			    "!Unsupported LUN Addressing method %x "
7886 			    "in response to REPORT_LUN", lun_string[0]);
7887 			break;
7888 		}
7889 
7890 		/*
7891 		 * each time through this loop we should decrement
7892 		 * the tmp_cnt by one -- since we go through this loop
7893 		 * one time for each LUN, the tmp_cnt should never be <=0
7894 		 */
7895 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7896 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7897 	}
7898 
7899 	if (i == 0) {
7900 		fcp_log(CE_WARN, pptr->port_dip,
7901 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7902 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7903 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7904 	}
7905 
7906 	kmem_free(report_lun, len);
7907 	fcp_icmd_free(pptr, icmd);
7908 }
7909 
7910 
7911 /*
7912  * called internally to return a LUN given a target and a LUN number
7913  */
7914 static struct fcp_lun *
7915 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7916 {
7917 	struct fcp_lun	*plun;
7918 
7919 	mutex_enter(&ptgt->tgt_mutex);
7920 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7921 		if (plun->lun_num == lun_num) {
7922 			mutex_exit(&ptgt->tgt_mutex);
7923 			return (plun);
7924 		}
7925 	}
7926 	mutex_exit(&ptgt->tgt_mutex);
7927 
7928 	return (NULL);
7929 }
7930 
7931 
7932 /*
7933  * handle finishing one target for fcp_finish_init
7934  *
7935  * return true (non-zero) if we want finish_init to continue with the
7936  * next target
7937  *
7938  * called with the port mutex held
7939  */
7940 /*ARGSUSED*/
7941 static int
7942 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7943     int link_cnt, int tgt_cnt, int cause)
7944 {
7945 	int	rval = 1;
7946 	ASSERT(pptr != NULL);
7947 	ASSERT(ptgt != NULL);
7948 
7949 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7950 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7951 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7952 	    ptgt->tgt_state);
7953 
7954 	ASSERT(mutex_owned(&pptr->port_mutex));
7955 
7956 	if ((pptr->port_link_cnt != link_cnt) ||
7957 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7958 		/*
7959 		 * oh oh -- another link reset or target change
7960 		 * must have occurred while we are in here
7961 		 */
7962 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7963 
7964 		return (0);
7965 	} else {
7966 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7967 	}
7968 
7969 	mutex_enter(&ptgt->tgt_mutex);
7970 
7971 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7972 		/*
7973 		 * tgt is not offline -- is it marked (i.e. needs
7974 		 * to be offlined) ??
7975 		 */
7976 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7977 			/*
7978 			 * this target not offline *and*
7979 			 * marked
7980 			 */
7981 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7982 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7983 			    tgt_cnt, 0, 0);
7984 		} else {
7985 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7986 
7987 			/* create the LUNs */
7988 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7989 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7990 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7991 				    cause);
7992 				ptgt->tgt_device_created = 1;
7993 			} else {
7994 				fcp_update_tgt_state(ptgt, FCP_RESET,
7995 				    FCP_LUN_BUSY);
7996 			}
7997 		}
7998 	}
7999 
8000 	mutex_exit(&ptgt->tgt_mutex);
8001 
8002 	return (rval);
8003 }
8004 
8005 
8006 /*
8007  * this routine is called to finish port initialization
8008  *
8009  * Each port has a "temp" counter -- when a state change happens (e.g.
8010  * port online), the temp count is set to the number of devices in the map.
8011  * Then, as each device gets "discovered", the temp counter is decremented
8012  * by one.  When this count reaches zero we know that all of the devices
8013  * in the map have been discovered (or an error has occurred), so we can
8014  * then finish initialization -- which is done by this routine (well, this
8015  * and fcp-finish_tgt())
8016  *
8017  * acquires and releases the global mutex
8018  *
8019  * called with the port mutex owned
8020  */
8021 static void
8022 fcp_finish_init(struct fcp_port *pptr)
8023 {
8024 #ifdef	DEBUG
8025 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8026 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8027 	    FCP_STACK_DEPTH);
8028 #endif /* DEBUG */
8029 
8030 	ASSERT(mutex_owned(&pptr->port_mutex));
8031 
8032 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8033 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8034 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8035 
8036 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
8037 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
8038 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8039 		pptr->port_state &= ~FCP_STATE_ONLINING;
8040 		pptr->port_state |= FCP_STATE_ONLINE;
8041 	}
8042 
8043 	/* Wake up threads waiting on config done */
8044 	cv_broadcast(&pptr->port_config_cv);
8045 }
8046 
8047 
8048 /*
8049  * called from fcp_finish_init to create the LUNs for a target
8050  *
8051  * called with the port mutex owned
8052  */
8053 static void
8054 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8055 {
8056 	struct fcp_lun	*plun;
8057 	struct fcp_port	*pptr;
8058 	child_info_t		*cip = NULL;
8059 
8060 	ASSERT(ptgt != NULL);
8061 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8062 
8063 	pptr = ptgt->tgt_port;
8064 
8065 	ASSERT(pptr != NULL);
8066 
8067 	/* scan all LUNs for this target */
8068 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8069 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8070 			continue;
8071 		}
8072 
8073 		if (plun->lun_state & FCP_LUN_MARK) {
8074 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8075 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8076 			    "fcp_create_luns: offlining marked LUN!");
8077 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8078 			continue;
8079 		}
8080 
8081 		plun->lun_state &= ~FCP_LUN_BUSY;
8082 
8083 		/*
8084 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8085 		 * but we have a valid plun->lun_cip. To cover this case also
8086 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8087 		 */
8088 		if (plun->lun_mpxio && plun->lun_cip &&
8089 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8090 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8091 		    0, 0))) {
8092 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8093 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8094 			    "fcp_create_luns: enable lun %p failed!",
8095 			    plun);
8096 		}
8097 
8098 		if (plun->lun_state & FCP_LUN_INIT &&
8099 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8100 			continue;
8101 		}
8102 
8103 		if (cause == FCP_CAUSE_USER_CREATE) {
8104 			continue;
8105 		}
8106 
8107 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8108 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8109 		    "create_luns: passing ONLINE elem to HP thread");
8110 
8111 		/*
8112 		 * If lun has changed, prepare for offlining the old path.
8113 		 * Do not offline the old path right now, since it may be
8114 		 * still opened.
8115 		 */
8116 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8117 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8118 		}
8119 
8120 		/* pass an ONLINE element to the hotplug thread */
8121 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8122 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8123 
8124 			/*
8125 			 * We can not synchronous attach (i.e pass
8126 			 * NDI_ONLINE_ATTACH) here as we might be
8127 			 * coming from an interrupt or callback
8128 			 * thread.
8129 			 */
8130 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8131 			    link_cnt, tgt_cnt, 0, 0)) {
8132 				fcp_log(CE_CONT, pptr->port_dip,
8133 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8134 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8135 			}
8136 		}
8137 	}
8138 }
8139 
8140 
8141 /*
8142  * function to online/offline devices
8143  */
8144 static int
8145 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8146     int online, int lcount, int tcount, int flags)
8147 {
8148 	int			rval = NDI_FAILURE;
8149 	int			circ;
8150 	child_info_t		*ccip;
8151 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8152 	int			is_mpxio = pptr->port_mpxio;
8153 	dev_info_t		*cdip, *pdip;
8154 	char			*devname;
8155 
8156 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8157 		/*
8158 		 * When this event gets serviced, lun_cip and lun_mpxio
8159 		 * has changed, so it should be invalidated now.
8160 		 */
8161 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8162 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8163 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8164 		return (rval);
8165 	}
8166 
8167 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8168 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8169 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8170 	    "flags=%x mpxio=%x\n",
8171 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8172 	    plun->lun_mpxio);
8173 
8174 	/*
8175 	 * lun_mpxio needs checking here because we can end up in a race
8176 	 * condition where this task has been dispatched while lun_mpxio is
8177 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8178 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8179 	 * the flag. We rely on the serialization of the tasks here. We return
8180 	 * NDI_SUCCESS so any callers continue without reporting spurious
8181 	 * errors, and the still think we're an MPXIO LUN.
8182 	 */
8183 
8184 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8185 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8186 		if (plun->lun_mpxio) {
8187 			rval = fcp_update_mpxio_path(plun, cip, online);
8188 		} else {
8189 			rval = NDI_SUCCESS;
8190 		}
8191 		return (rval);
8192 	}
8193 
8194 	/*
8195 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8196 	 * executing devfs_clean() if parent lock is held.
8197 	 */
8198 	ASSERT(!servicing_interrupt());
8199 	if (online == FCP_OFFLINE) {
8200 		if (plun->lun_mpxio == 0) {
8201 			if (plun->lun_cip == cip) {
8202 				cdip = DIP(plun->lun_cip);
8203 			} else {
8204 				cdip = DIP(cip);
8205 			}
8206 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8207 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8208 		} else if ((plun->lun_cip != cip) && cip) {
8209 			/*
8210 			 * This means a DTYPE/GUID change, we shall get the
8211 			 * dip of the old cip instead of the current lun_cip.
8212 			 */
8213 			cdip = mdi_pi_get_client(PIP(cip));
8214 		}
8215 		if (cdip) {
8216 			if (i_ddi_devi_attached(cdip)) {
8217 				pdip = ddi_get_parent(cdip);
8218 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8219 				ndi_devi_enter(pdip, &circ);
8220 				(void) ddi_deviname(cdip, devname);
8221 				ndi_devi_exit(pdip, circ);
8222 				/*
8223 				 * Release parent lock before calling
8224 				 * devfs_clean().
8225 				 */
8226 				rval = devfs_clean(pdip, devname + 1,
8227 				    DV_CLEAN_FORCE);
8228 				kmem_free(devname, MAXNAMELEN + 1);
8229 				/*
8230 				 * Return if devfs_clean() fails for
8231 				 * non-MPXIO case.
8232 				 * For MPXIO case, another path could be
8233 				 * offlined.
8234 				 */
8235 				if (rval && plun->lun_mpxio == 0) {
8236 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8237 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8238 					    "fcp_trigger_lun: devfs_clean "
8239 					    "failed rval=%x  dip=%p",
8240 					    rval, pdip);
8241 					return (NDI_FAILURE);
8242 				}
8243 			}
8244 		}
8245 	}
8246 
8247 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8248 		return (NDI_FAILURE);
8249 	}
8250 
8251 	if (is_mpxio) {
8252 		mdi_devi_enter(pptr->port_dip, &circ);
8253 	} else {
8254 		ndi_devi_enter(pptr->port_dip, &circ);
8255 	}
8256 
8257 	mutex_enter(&pptr->port_mutex);
8258 	mutex_enter(&plun->lun_mutex);
8259 
8260 	if (online == FCP_ONLINE) {
8261 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8262 		if (ccip == NULL) {
8263 			goto fail;
8264 		}
8265 	} else {
8266 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8267 			goto fail;
8268 		}
8269 		ccip = cip;
8270 	}
8271 
8272 	if (online == FCP_ONLINE) {
8273 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8274 		    &circ);
8275 		fc_ulp_log_device_event(pptr->port_fp_handle,
8276 		    FC_ULP_DEVICE_ONLINE);
8277 	} else {
8278 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8279 		    &circ);
8280 		fc_ulp_log_device_event(pptr->port_fp_handle,
8281 		    FC_ULP_DEVICE_OFFLINE);
8282 	}
8283 
8284 fail:	mutex_exit(&plun->lun_mutex);
8285 	mutex_exit(&pptr->port_mutex);
8286 
8287 	if (is_mpxio) {
8288 		mdi_devi_exit(pptr->port_dip, circ);
8289 	} else {
8290 		ndi_devi_exit(pptr->port_dip, circ);
8291 	}
8292 
8293 	fc_ulp_idle_port(pptr->port_fp_handle);
8294 
8295 	return (rval);
8296 }
8297 
8298 
8299 /*
8300  * take a target offline by taking all of its LUNs offline
8301  */
8302 /*ARGSUSED*/
8303 static int
8304 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8305     int link_cnt, int tgt_cnt, int nowait, int flags)
8306 {
8307 	struct fcp_tgt_elem	*elem;
8308 
8309 	ASSERT(mutex_owned(&pptr->port_mutex));
8310 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8311 
8312 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8313 
8314 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8315 	    ptgt->tgt_change_cnt)) {
8316 		mutex_exit(&ptgt->tgt_mutex);
8317 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8318 		mutex_enter(&ptgt->tgt_mutex);
8319 
8320 		return (0);
8321 	}
8322 
8323 	ptgt->tgt_pd_handle = NULL;
8324 	mutex_exit(&ptgt->tgt_mutex);
8325 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8326 	mutex_enter(&ptgt->tgt_mutex);
8327 
8328 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8329 
8330 	if (ptgt->tgt_tcap &&
8331 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8332 		elem->flags = flags;
8333 		elem->time = fcp_watchdog_time;
8334 		if (nowait == 0) {
8335 			elem->time += fcp_offline_delay;
8336 		}
8337 		elem->ptgt = ptgt;
8338 		elem->link_cnt = link_cnt;
8339 		elem->tgt_cnt = tgt_cnt;
8340 		elem->next = pptr->port_offline_tgts;
8341 		pptr->port_offline_tgts = elem;
8342 	} else {
8343 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8344 	}
8345 
8346 	return (1);
8347 }
8348 
8349 
8350 static void
8351 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8352     int link_cnt, int tgt_cnt, int flags)
8353 {
8354 	ASSERT(mutex_owned(&pptr->port_mutex));
8355 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8356 
8357 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8358 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8359 	ptgt->tgt_pd_handle = NULL;
8360 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8361 }
8362 
8363 
8364 static void
8365 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8366     int flags)
8367 {
8368 	struct	fcp_lun	*plun;
8369 
8370 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8371 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8372 
8373 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8374 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8375 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8376 		}
8377 	}
8378 }
8379 
8380 
8381 /*
8382  * take a LUN offline
8383  *
8384  * enters and leaves with the target mutex held, releasing it in the process
8385  *
8386  * allocates memory in non-sleep mode
8387  */
8388 static void
8389 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8390     int nowait, int flags)
8391 {
8392 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8393 	struct fcp_lun_elem	*elem;
8394 
8395 	ASSERT(plun != NULL);
8396 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8397 
8398 	if (nowait) {
8399 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8400 		return;
8401 	}
8402 
8403 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8404 		elem->flags = flags;
8405 		elem->time = fcp_watchdog_time;
8406 		if (nowait == 0) {
8407 			elem->time += fcp_offline_delay;
8408 		}
8409 		elem->plun = plun;
8410 		elem->link_cnt = link_cnt;
8411 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8412 		elem->next = pptr->port_offline_luns;
8413 		pptr->port_offline_luns = elem;
8414 	} else {
8415 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8416 	}
8417 }
8418 
8419 
8420 static void
8421 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8422 {
8423 	struct fcp_pkt	*head = NULL;
8424 
8425 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8426 
8427 	mutex_exit(&LUN_TGT->tgt_mutex);
8428 
8429 	head = fcp_scan_commands(plun);
8430 	if (head != NULL) {
8431 		fcp_abort_commands(head, LUN_PORT);
8432 	}
8433 
8434 	mutex_enter(&LUN_TGT->tgt_mutex);
8435 
8436 	if (plun->lun_cip && plun->lun_mpxio) {
8437 		/*
8438 		 * Intimate MPxIO lun busy is cleared
8439 		 */
8440 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8441 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8442 		    0, 0)) {
8443 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8444 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8445 			    LUN_TGT->tgt_d_id, plun->lun_num);
8446 		}
8447 		/*
8448 		 * Intimate MPxIO that the lun is now marked for offline
8449 		 */
8450 		mutex_exit(&LUN_TGT->tgt_mutex);
8451 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8452 		mutex_enter(&LUN_TGT->tgt_mutex);
8453 	}
8454 }
8455 
8456 static void
8457 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8458     int flags)
8459 {
8460 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8461 
8462 	mutex_exit(&LUN_TGT->tgt_mutex);
8463 	fcp_update_offline_flags(plun);
8464 	mutex_enter(&LUN_TGT->tgt_mutex);
8465 
8466 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8467 
8468 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8469 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8470 	    "offline_lun: passing OFFLINE elem to HP thread");
8471 
8472 	if (plun->lun_cip) {
8473 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8474 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8475 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8476 		    LUN_TGT->tgt_trace);
8477 
8478 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8479 		    link_cnt, tgt_cnt, flags, 0)) {
8480 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8481 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8482 			    LUN_TGT->tgt_d_id, plun->lun_num);
8483 		}
8484 	}
8485 }
8486 
8487 static void
8488 fcp_scan_offline_luns(struct fcp_port *pptr)
8489 {
8490 	struct fcp_lun_elem	*elem;
8491 	struct fcp_lun_elem	*prev;
8492 	struct fcp_lun_elem	*next;
8493 
8494 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8495 
8496 	prev = NULL;
8497 	elem = pptr->port_offline_luns;
8498 	while (elem) {
8499 		next = elem->next;
8500 		if (elem->time <= fcp_watchdog_time) {
8501 			int			changed = 1;
8502 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8503 
8504 			mutex_enter(&ptgt->tgt_mutex);
8505 			if (pptr->port_link_cnt == elem->link_cnt &&
8506 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8507 				changed = 0;
8508 			}
8509 
8510 			if (!changed &&
8511 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8512 				fcp_offline_lun_now(elem->plun,
8513 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8514 			}
8515 			mutex_exit(&ptgt->tgt_mutex);
8516 
8517 			kmem_free(elem, sizeof (*elem));
8518 
8519 			if (prev) {
8520 				prev->next = next;
8521 			} else {
8522 				pptr->port_offline_luns = next;
8523 			}
8524 		} else {
8525 			prev = elem;
8526 		}
8527 		elem = next;
8528 	}
8529 }
8530 
8531 
8532 static void
8533 fcp_scan_offline_tgts(struct fcp_port *pptr)
8534 {
8535 	struct fcp_tgt_elem	*elem;
8536 	struct fcp_tgt_elem	*prev;
8537 	struct fcp_tgt_elem	*next;
8538 
8539 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8540 
8541 	prev = NULL;
8542 	elem = pptr->port_offline_tgts;
8543 	while (elem) {
8544 		next = elem->next;
8545 		if (elem->time <= fcp_watchdog_time) {
8546 			int		outdated = 1;
8547 			struct fcp_tgt	*ptgt = elem->ptgt;
8548 
8549 			mutex_enter(&ptgt->tgt_mutex);
8550 
8551 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8552 				/* No change on tgt since elem was created. */
8553 				outdated = 0;
8554 			} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8555 			    pptr->port_link_cnt == elem->link_cnt + 1 &&
8556 			    ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8557 				/*
8558 				 * Exactly one thing happened to the target
8559 				 * inbetween: the local port went offline.
8560 				 * For fp the remote port is already gone so
8561 				 * it will not tell us again to offline the
8562 				 * target. We must offline it now.
8563 				 */
8564 				outdated = 0;
8565 			}
8566 
8567 			if (!outdated && !(ptgt->tgt_state &
8568 			    FCP_TGT_OFFLINE)) {
8569 				fcp_offline_target_now(pptr,
8570 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8571 				    elem->flags);
8572 			}
8573 
8574 			mutex_exit(&ptgt->tgt_mutex);
8575 
8576 			kmem_free(elem, sizeof (*elem));
8577 
8578 			if (prev) {
8579 				prev->next = next;
8580 			} else {
8581 				pptr->port_offline_tgts = next;
8582 			}
8583 		} else {
8584 			prev = elem;
8585 		}
8586 		elem = next;
8587 	}
8588 }
8589 
8590 
8591 static void
8592 fcp_update_offline_flags(struct fcp_lun *plun)
8593 {
8594 	struct fcp_port	*pptr = LUN_PORT;
8595 	ASSERT(plun != NULL);
8596 
8597 	mutex_enter(&LUN_TGT->tgt_mutex);
8598 	plun->lun_state |= FCP_LUN_OFFLINE;
8599 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8600 
8601 	mutex_enter(&plun->lun_mutex);
8602 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8603 		dev_info_t *cdip = NULL;
8604 
8605 		mutex_exit(&LUN_TGT->tgt_mutex);
8606 
8607 		if (plun->lun_mpxio == 0) {
8608 			cdip = DIP(plun->lun_cip);
8609 		} else if (plun->lun_cip) {
8610 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8611 		}
8612 
8613 		mutex_exit(&plun->lun_mutex);
8614 		if (cdip) {
8615 			(void) ndi_event_retrieve_cookie(
8616 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8617 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8618 			(void) ndi_event_run_callbacks(
8619 			    pptr->port_ndi_event_hdl, cdip,
8620 			    fcp_remove_eid, NULL);
8621 		}
8622 	} else {
8623 		mutex_exit(&plun->lun_mutex);
8624 		mutex_exit(&LUN_TGT->tgt_mutex);
8625 	}
8626 }
8627 
8628 
8629 /*
8630  * Scan all of the command pkts for this port, moving pkts that
8631  * match our LUN onto our own list (headed by "head")
8632  */
8633 static struct fcp_pkt *
8634 fcp_scan_commands(struct fcp_lun *plun)
8635 {
8636 	struct fcp_port	*pptr = LUN_PORT;
8637 
8638 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8639 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8640 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8641 
8642 	struct fcp_pkt	*head = NULL;	/* head of our list */
8643 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8644 
8645 	int			cmds_found = 0;
8646 
8647 	mutex_enter(&pptr->port_pkt_mutex);
8648 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8649 		struct fcp_lun *tlun =
8650 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8651 
8652 		ncmd = cmd->cmd_next;	/* set next command */
8653 
8654 		/*
8655 		 * if this pkt is for a different LUN  or the
8656 		 * command is sent down, skip it.
8657 		 */
8658 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8659 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8660 			pcmd = cmd;
8661 			continue;
8662 		}
8663 		cmds_found++;
8664 		if (pcmd != NULL) {
8665 			ASSERT(pptr->port_pkt_head != cmd);
8666 			pcmd->cmd_next = cmd->cmd_next;
8667 		} else {
8668 			ASSERT(cmd == pptr->port_pkt_head);
8669 			pptr->port_pkt_head = cmd->cmd_next;
8670 		}
8671 
8672 		if (cmd == pptr->port_pkt_tail) {
8673 			pptr->port_pkt_tail = pcmd;
8674 			if (pcmd) {
8675 				pcmd->cmd_next = NULL;
8676 			}
8677 		}
8678 
8679 		if (head == NULL) {
8680 			head = tail = cmd;
8681 		} else {
8682 			ASSERT(tail != NULL);
8683 
8684 			tail->cmd_next = cmd;
8685 			tail = cmd;
8686 		}
8687 		cmd->cmd_next = NULL;
8688 	}
8689 	mutex_exit(&pptr->port_pkt_mutex);
8690 
8691 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8692 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8693 	    "scan commands: %d cmd(s) found", cmds_found);
8694 
8695 	return (head);
8696 }
8697 
8698 
8699 /*
8700  * Abort all the commands in the command queue
8701  */
8702 static void
8703 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8704 {
8705 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8706 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8707 
8708 	ASSERT(mutex_owned(&pptr->port_mutex));
8709 
8710 	/* scan through the pkts and invalid them */
8711 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8712 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8713 
8714 		ncmd = cmd->cmd_next;
8715 		ASSERT(pkt != NULL);
8716 
8717 		/*
8718 		 * The lun is going to be marked offline. Indicate
8719 		 * the target driver not to requeue or retry this command
8720 		 * as the device is going to be offlined pretty soon.
8721 		 */
8722 		pkt->pkt_reason = CMD_DEV_GONE;
8723 		pkt->pkt_statistics = 0;
8724 		pkt->pkt_state = 0;
8725 
8726 		/* reset cmd flags/state */
8727 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8728 		cmd->cmd_state = FCP_PKT_IDLE;
8729 
8730 		/*
8731 		 * ensure we have a packet completion routine,
8732 		 * then call it.
8733 		 */
8734 		ASSERT(pkt->pkt_comp != NULL);
8735 
8736 		mutex_exit(&pptr->port_mutex);
8737 		fcp_post_callback(cmd);
8738 		mutex_enter(&pptr->port_mutex);
8739 	}
8740 }
8741 
8742 
8743 /*
8744  * the pkt_comp callback for command packets
8745  */
8746 static void
8747 fcp_cmd_callback(fc_packet_t *fpkt)
8748 {
8749 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8750 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8751 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8752 
8753 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8754 
8755 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8756 		cmn_err(CE_PANIC, "Packet already completed %p",
8757 		    (void *)cmd);
8758 	}
8759 
8760 	/*
8761 	 * Watch thread should be freeing the packet, ignore the pkt.
8762 	 */
8763 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8764 		fcp_log(CE_CONT, pptr->port_dip,
8765 		    "!FCP: Pkt completed while aborting\n");
8766 		return;
8767 	}
8768 	cmd->cmd_state = FCP_PKT_IDLE;
8769 
8770 	fcp_complete_pkt(fpkt);
8771 
8772 #ifdef	DEBUG
8773 	mutex_enter(&pptr->port_pkt_mutex);
8774 	pptr->port_npkts--;
8775 	mutex_exit(&pptr->port_pkt_mutex);
8776 #endif /* DEBUG */
8777 
8778 	fcp_post_callback(cmd);
8779 }
8780 
8781 
8782 static void
8783 fcp_complete_pkt(fc_packet_t *fpkt)
8784 {
8785 	int			error = 0;
8786 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8787 	    fpkt->pkt_ulp_private;
8788 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8789 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8790 	struct fcp_lun	*plun;
8791 	struct fcp_tgt	*ptgt;
8792 	struct fcp_rsp		*rsp;
8793 	struct scsi_address	save;
8794 
8795 #ifdef	DEBUG
8796 	save = pkt->pkt_address;
8797 #endif /* DEBUG */
8798 
8799 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8800 
8801 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8802 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8803 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8804 			    sizeof (struct fcp_rsp));
8805 		}
8806 
8807 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8808 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8809 
8810 		pkt->pkt_resid = 0;
8811 
8812 		if (fpkt->pkt_datalen) {
8813 			pkt->pkt_state |= STATE_XFERRED_DATA;
8814 			if (fpkt->pkt_data_resid) {
8815 				error++;
8816 			}
8817 		}
8818 
8819 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8820 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8821 			/*
8822 			 * The next two checks make sure that if there
8823 			 * is no sense data or a valid response and
8824 			 * the command came back with check condition,
8825 			 * the command should be retried.
8826 			 */
8827 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8828 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8829 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8830 				pkt->pkt_resid = cmd->cmd_dmacount;
8831 			}
8832 		}
8833 
8834 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8835 			return;
8836 		}
8837 
8838 		plun = ADDR2LUN(&pkt->pkt_address);
8839 		ptgt = plun->lun_tgt;
8840 		ASSERT(ptgt != NULL);
8841 
8842 		/*
8843 		 * Update the transfer resid, if appropriate
8844 		 */
8845 		if (rsp->fcp_u.fcp_status.resid_over ||
8846 		    rsp->fcp_u.fcp_status.resid_under) {
8847 			pkt->pkt_resid = rsp->fcp_resid;
8848 		}
8849 
8850 		/*
8851 		 * First see if we got a FCP protocol error.
8852 		 */
8853 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8854 			struct fcp_rsp_info	*bep;
8855 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8856 			    sizeof (struct fcp_rsp));
8857 
8858 			if (fcp_validate_fcp_response(rsp, pptr) !=
8859 			    FC_SUCCESS) {
8860 				pkt->pkt_reason = CMD_CMPLT;
8861 				*(pkt->pkt_scbp) = STATUS_CHECK;
8862 
8863 				fcp_log(CE_WARN, pptr->port_dip,
8864 				    "!SCSI command to d_id=0x%x lun=0x%x"
8865 				    " failed, Bad FCP response values:"
8866 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8867 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8868 				    ptgt->tgt_d_id, plun->lun_num,
8869 				    rsp->reserved_0, rsp->reserved_1,
8870 				    rsp->fcp_u.fcp_status.reserved_0,
8871 				    rsp->fcp_u.fcp_status.reserved_1,
8872 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8873 
8874 				return;
8875 			}
8876 
8877 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8878 				FCP_CP_IN(fpkt->pkt_resp +
8879 				    sizeof (struct fcp_rsp), bep,
8880 				    fpkt->pkt_resp_acc,
8881 				    sizeof (struct fcp_rsp_info));
8882 			}
8883 
8884 			if (bep->rsp_code != FCP_NO_FAILURE) {
8885 				child_info_t	*cip;
8886 
8887 				pkt->pkt_reason = CMD_TRAN_ERR;
8888 
8889 				mutex_enter(&plun->lun_mutex);
8890 				cip = plun->lun_cip;
8891 				mutex_exit(&plun->lun_mutex);
8892 
8893 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8894 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8895 				    "FCP response error on cmd=%p"
8896 				    " target=0x%x, cip=%p", cmd,
8897 				    ptgt->tgt_d_id, cip);
8898 			}
8899 		}
8900 
8901 		/*
8902 		 * See if we got a SCSI error with sense data
8903 		 */
8904 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8905 			uchar_t				rqlen;
8906 			caddr_t				sense_from;
8907 			child_info_t			*cip;
8908 			timeout_id_t			tid;
8909 			struct scsi_arq_status		*arq;
8910 			struct scsi_extended_sense	*sense_to;
8911 
8912 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8913 			sense_to = &arq->sts_sensedata;
8914 
8915 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8916 			    sizeof (struct scsi_extended_sense));
8917 
8918 			sense_from = (caddr_t)fpkt->pkt_resp +
8919 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8920 
8921 			if (fcp_validate_fcp_response(rsp, pptr) !=
8922 			    FC_SUCCESS) {
8923 				pkt->pkt_reason = CMD_CMPLT;
8924 				*(pkt->pkt_scbp) = STATUS_CHECK;
8925 
8926 				fcp_log(CE_WARN, pptr->port_dip,
8927 				    "!SCSI command to d_id=0x%x lun=0x%x"
8928 				    " failed, Bad FCP response values:"
8929 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8930 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8931 				    ptgt->tgt_d_id, plun->lun_num,
8932 				    rsp->reserved_0, rsp->reserved_1,
8933 				    rsp->fcp_u.fcp_status.reserved_0,
8934 				    rsp->fcp_u.fcp_status.reserved_1,
8935 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8936 
8937 				return;
8938 			}
8939 
8940 			/*
8941 			 * copy in sense information
8942 			 */
8943 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8944 				FCP_CP_IN(sense_from, sense_to,
8945 				    fpkt->pkt_resp_acc, rqlen);
8946 			} else {
8947 				bcopy(sense_from, sense_to, rqlen);
8948 			}
8949 
8950 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8951 			    (FCP_SENSE_NO_LUN(sense_to))) {
8952 				mutex_enter(&ptgt->tgt_mutex);
8953 				if (ptgt->tgt_tid == NULL) {
8954 					/*
8955 					 * Kick off rediscovery
8956 					 */
8957 					tid = timeout(fcp_reconfigure_luns,
8958 					    (caddr_t)ptgt, drv_usectohz(1));
8959 
8960 					ptgt->tgt_tid = tid;
8961 					ptgt->tgt_state |= FCP_TGT_BUSY;
8962 				}
8963 				mutex_exit(&ptgt->tgt_mutex);
8964 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8965 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8966 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8967 					    "!FCP: Report Lun Has Changed"
8968 					    " target=%x", ptgt->tgt_d_id);
8969 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8970 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8971 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8972 					    "!FCP: LU Not Supported"
8973 					    " target=%x", ptgt->tgt_d_id);
8974 				}
8975 			}
8976 			ASSERT(pkt->pkt_scbp != NULL);
8977 
8978 			pkt->pkt_state |= STATE_ARQ_DONE;
8979 
8980 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8981 
8982 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8983 			arq->sts_rqpkt_reason = 0;
8984 			arq->sts_rqpkt_statistics = 0;
8985 
8986 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8987 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8988 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8989 			    STATE_XFERRED_DATA;
8990 
8991 			mutex_enter(&plun->lun_mutex);
8992 			cip = plun->lun_cip;
8993 			mutex_exit(&plun->lun_mutex);
8994 
8995 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8996 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8997 			    "SCSI Check condition on cmd=%p target=0x%x"
8998 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8999 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9000 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
9001 			    rsp->fcp_u.fcp_status.scsi_status,
9002 			    sense_to->es_key, sense_to->es_add_code,
9003 			    sense_to->es_qual_code);
9004 		}
9005 	} else {
9006 		plun = ADDR2LUN(&pkt->pkt_address);
9007 		ptgt = plun->lun_tgt;
9008 		ASSERT(ptgt != NULL);
9009 
9010 		/*
9011 		 * Work harder to translate errors into target driver
9012 		 * understandable ones. Note with despair that the target
9013 		 * drivers don't decode pkt_state and pkt_reason exhaustively
9014 		 * They resort to using the big hammer most often, which
9015 		 * may not get fixed in the life time of this driver.
9016 		 */
9017 		pkt->pkt_state = 0;
9018 		pkt->pkt_statistics = 0;
9019 
9020 		switch (fpkt->pkt_state) {
9021 		case FC_PKT_TRAN_ERROR:
9022 			switch (fpkt->pkt_reason) {
9023 			case FC_REASON_OVERRUN:
9024 				pkt->pkt_reason = CMD_CMD_OVR;
9025 				pkt->pkt_statistics |= STAT_ABORTED;
9026 				break;
9027 
9028 			case FC_REASON_XCHG_BSY: {
9029 				caddr_t ptr;
9030 
9031 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9032 
9033 				ptr = (caddr_t)pkt->pkt_scbp;
9034 				if (ptr) {
9035 					*ptr = STATUS_BUSY;
9036 				}
9037 				break;
9038 			}
9039 
9040 			case FC_REASON_ABORTED:
9041 				pkt->pkt_reason = CMD_TRAN_ERR;
9042 				pkt->pkt_statistics |= STAT_ABORTED;
9043 				break;
9044 
9045 			case FC_REASON_ABORT_FAILED:
9046 				pkt->pkt_reason = CMD_ABORT_FAIL;
9047 				break;
9048 
9049 			case FC_REASON_NO_SEQ_INIT:
9050 			case FC_REASON_CRC_ERROR:
9051 				pkt->pkt_reason = CMD_TRAN_ERR;
9052 				pkt->pkt_statistics |= STAT_ABORTED;
9053 				break;
9054 			default:
9055 				pkt->pkt_reason = CMD_TRAN_ERR;
9056 				break;
9057 			}
9058 			break;
9059 
9060 		case FC_PKT_PORT_OFFLINE: {
9061 			dev_info_t	*cdip = NULL;
9062 			caddr_t		ptr;
9063 
9064 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9065 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9066 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
9067 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9068 				    ptgt->tgt_d_id);
9069 			}
9070 
9071 			mutex_enter(&plun->lun_mutex);
9072 			if (plun->lun_mpxio == 0) {
9073 				cdip = DIP(plun->lun_cip);
9074 			} else if (plun->lun_cip) {
9075 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9076 			}
9077 
9078 			mutex_exit(&plun->lun_mutex);
9079 
9080 			if (cdip) {
9081 				(void) ndi_event_retrieve_cookie(
9082 				    pptr->port_ndi_event_hdl, cdip,
9083 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9084 				    NDI_EVENT_NOPASS);
9085 				(void) ndi_event_run_callbacks(
9086 				    pptr->port_ndi_event_hdl, cdip,
9087 				    fcp_remove_eid, NULL);
9088 			}
9089 
9090 			/*
9091 			 * If the link goes off-line for a lip,
9092 			 * this will cause a error to the ST SG
9093 			 * SGEN drivers. By setting BUSY we will
9094 			 * give the drivers the chance to retry
9095 			 * before it blows of the job. ST will
9096 			 * remember how many times it has retried.
9097 			 */
9098 
9099 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9100 			    (plun->lun_type == DTYPE_CHANGER)) {
9101 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9102 				ptr = (caddr_t)pkt->pkt_scbp;
9103 				if (ptr) {
9104 					*ptr = STATUS_BUSY;
9105 				}
9106 			} else {
9107 				pkt->pkt_reason = CMD_TRAN_ERR;
9108 				pkt->pkt_statistics |= STAT_BUS_RESET;
9109 			}
9110 			break;
9111 		}
9112 
9113 		case FC_PKT_TRAN_BSY:
9114 			/*
9115 			 * Use the ssd Qfull handling here.
9116 			 */
9117 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9118 			pkt->pkt_state = STATE_GOT_BUS;
9119 			break;
9120 
9121 		case FC_PKT_TIMEOUT:
9122 			pkt->pkt_reason = CMD_TIMEOUT;
9123 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9124 				pkt->pkt_statistics |= STAT_TIMEOUT;
9125 			} else {
9126 				pkt->pkt_statistics |= STAT_ABORTED;
9127 			}
9128 			break;
9129 
9130 		case FC_PKT_LOCAL_RJT:
9131 			switch (fpkt->pkt_reason) {
9132 			case FC_REASON_OFFLINE: {
9133 				dev_info_t	*cdip = NULL;
9134 
9135 				mutex_enter(&plun->lun_mutex);
9136 				if (plun->lun_mpxio == 0) {
9137 					cdip = DIP(plun->lun_cip);
9138 				} else if (plun->lun_cip) {
9139 					cdip = mdi_pi_get_client(
9140 					    PIP(plun->lun_cip));
9141 				}
9142 				mutex_exit(&plun->lun_mutex);
9143 
9144 				if (cdip) {
9145 					(void) ndi_event_retrieve_cookie(
9146 					    pptr->port_ndi_event_hdl, cdip,
9147 					    FCAL_REMOVE_EVENT,
9148 					    &fcp_remove_eid,
9149 					    NDI_EVENT_NOPASS);
9150 					(void) ndi_event_run_callbacks(
9151 					    pptr->port_ndi_event_hdl,
9152 					    cdip, fcp_remove_eid, NULL);
9153 				}
9154 
9155 				pkt->pkt_reason = CMD_TRAN_ERR;
9156 				pkt->pkt_statistics |= STAT_BUS_RESET;
9157 
9158 				break;
9159 			}
9160 
9161 			case FC_REASON_NOMEM:
9162 			case FC_REASON_QFULL: {
9163 				caddr_t ptr;
9164 
9165 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9166 				ptr = (caddr_t)pkt->pkt_scbp;
9167 				if (ptr) {
9168 					*ptr = STATUS_BUSY;
9169 				}
9170 				break;
9171 			}
9172 
9173 			case FC_REASON_DMA_ERROR:
9174 				pkt->pkt_reason = CMD_DMA_DERR;
9175 				pkt->pkt_statistics |= STAT_ABORTED;
9176 				break;
9177 
9178 			case FC_REASON_CRC_ERROR:
9179 			case FC_REASON_UNDERRUN: {
9180 				uchar_t		status;
9181 				/*
9182 				 * Work around for Bugid: 4240945.
9183 				 * IB on A5k doesn't set the Underrun bit
9184 				 * in the fcp status, when it is transferring
9185 				 * less than requested amount of data. Work
9186 				 * around the ses problem to keep luxadm
9187 				 * happy till ibfirmware is fixed.
9188 				 */
9189 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9190 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9191 					    fpkt->pkt_resp_acc,
9192 					    sizeof (struct fcp_rsp));
9193 				}
9194 				status = rsp->fcp_u.fcp_status.scsi_status;
9195 				if (((plun->lun_type & DTYPE_MASK) ==
9196 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9197 					pkt->pkt_reason = CMD_CMPLT;
9198 					*pkt->pkt_scbp = status;
9199 					pkt->pkt_resid = 0;
9200 				} else {
9201 					pkt->pkt_reason = CMD_TRAN_ERR;
9202 					pkt->pkt_statistics |= STAT_ABORTED;
9203 				}
9204 				break;
9205 			}
9206 
9207 			case FC_REASON_NO_CONNECTION:
9208 			case FC_REASON_UNSUPPORTED:
9209 			case FC_REASON_ILLEGAL_REQ:
9210 			case FC_REASON_BAD_SID:
9211 			case FC_REASON_DIAG_BUSY:
9212 			case FC_REASON_FCAL_OPN_FAIL:
9213 			case FC_REASON_BAD_XID:
9214 			default:
9215 				pkt->pkt_reason = CMD_TRAN_ERR;
9216 				pkt->pkt_statistics |= STAT_ABORTED;
9217 				break;
9218 
9219 			}
9220 			break;
9221 
9222 		case FC_PKT_NPORT_RJT:
9223 		case FC_PKT_FABRIC_RJT:
9224 		case FC_PKT_NPORT_BSY:
9225 		case FC_PKT_FABRIC_BSY:
9226 		default:
9227 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9228 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9229 			    "FC Status 0x%x, reason 0x%x",
9230 			    fpkt->pkt_state, fpkt->pkt_reason);
9231 			pkt->pkt_reason = CMD_TRAN_ERR;
9232 			pkt->pkt_statistics |= STAT_ABORTED;
9233 			break;
9234 		}
9235 
9236 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9237 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9238 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9239 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9240 		    fpkt->pkt_reason);
9241 	}
9242 
9243 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9244 }
9245 
9246 
9247 static int
9248 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9249 {
9250 	if (rsp->reserved_0 || rsp->reserved_1 ||
9251 	    rsp->fcp_u.fcp_status.reserved_0 ||
9252 	    rsp->fcp_u.fcp_status.reserved_1) {
9253 		/*
9254 		 * These reserved fields should ideally be zero. FCP-2 does say
9255 		 * that the recipient need not check for reserved fields to be
9256 		 * zero. If they are not zero, we will not make a fuss about it
9257 		 * - just log it (in debug to both trace buffer and messages
9258 		 * file and to trace buffer only in non-debug) and move on.
9259 		 *
9260 		 * Non-zero reserved fields were seen with minnows.
9261 		 *
9262 		 * qlc takes care of some of this but we cannot assume that all
9263 		 * FCAs will do so.
9264 		 */
9265 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9266 		    FCP_BUF_LEVEL_5, 0,
9267 		    "Got fcp response packet with non-zero reserved fields "
9268 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9269 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9270 		    rsp->reserved_0, rsp->reserved_1,
9271 		    rsp->fcp_u.fcp_status.reserved_0,
9272 		    rsp->fcp_u.fcp_status.reserved_1);
9273 	}
9274 
9275 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9276 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9277 		return (FC_FAILURE);
9278 	}
9279 
9280 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9281 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9282 	    sizeof (struct fcp_rsp))) {
9283 		return (FC_FAILURE);
9284 	}
9285 
9286 	return (FC_SUCCESS);
9287 }
9288 
9289 
9290 /*
9291  * This is called when there is a change the in device state. The case we're
9292  * handling here is, if the d_id s does not match, offline this tgt and online
9293  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9294  * port_mutex held.
9295  */
9296 static int
9297 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9298     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9299 {
9300 	ASSERT(mutex_owned(&pptr->port_mutex));
9301 
9302 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9303 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9304 	    "Starting fcp_device_changed...");
9305 
9306 	/*
9307 	 * The two cases where the port_device_changed is called is
9308 	 * either it changes it's d_id or it's hard address.
9309 	 */
9310 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9311 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9312 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9313 
9314 		/* offline this target */
9315 		mutex_enter(&ptgt->tgt_mutex);
9316 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9317 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9318 			    0, 1, NDI_DEVI_REMOVE);
9319 		}
9320 		mutex_exit(&ptgt->tgt_mutex);
9321 
9322 		fcp_log(CE_NOTE, pptr->port_dip,
9323 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9324 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9325 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9326 		    map_entry->map_hard_addr.hard_addr);
9327 	}
9328 
9329 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9330 	    link_cnt, tgt_cnt, cause));
9331 }
9332 
9333 /*
9334  *     Function: fcp_alloc_lun
9335  *
9336  *  Description: Creates a new lun structure and adds it to the list
9337  *		 of luns of the target.
9338  *
9339  *     Argument: ptgt		Target the lun will belong to.
9340  *
9341  * Return Value: NULL		Failed
9342  *		 Not NULL	Succeeded
9343  *
9344  *	Context: Kernel context
9345  */
9346 static struct fcp_lun *
9347 fcp_alloc_lun(struct fcp_tgt *ptgt)
9348 {
9349 	struct fcp_lun *plun;
9350 
9351 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9352 	if (plun != NULL) {
9353 		/*
9354 		 * Initialize the mutex before putting in the target list
9355 		 * especially before releasing the target mutex.
9356 		 */
9357 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9358 		plun->lun_tgt = ptgt;
9359 
9360 		mutex_enter(&ptgt->tgt_mutex);
9361 		plun->lun_next = ptgt->tgt_lun;
9362 		ptgt->tgt_lun = plun;
9363 		plun->lun_old_guid = NULL;
9364 		plun->lun_old_guid_size = 0;
9365 		mutex_exit(&ptgt->tgt_mutex);
9366 	}
9367 
9368 	return (plun);
9369 }
9370 
9371 /*
9372  *     Function: fcp_dealloc_lun
9373  *
9374  *  Description: Frees the LUN structure passed by the caller.
9375  *
9376  *     Argument: plun		LUN structure to free.
9377  *
9378  * Return Value: None
9379  *
9380  *	Context: Kernel context.
9381  */
9382 static void
9383 fcp_dealloc_lun(struct fcp_lun *plun)
9384 {
9385 	mutex_enter(&plun->lun_mutex);
9386 	if (plun->lun_cip) {
9387 		fcp_remove_child(plun);
9388 	}
9389 	mutex_exit(&plun->lun_mutex);
9390 
9391 	mutex_destroy(&plun->lun_mutex);
9392 	if (plun->lun_guid) {
9393 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9394 	}
9395 	if (plun->lun_old_guid) {
9396 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9397 	}
9398 	kmem_free(plun, sizeof (*plun));
9399 }
9400 
9401 /*
9402  *     Function: fcp_alloc_tgt
9403  *
9404  *  Description: Creates a new target structure and adds it to the port
9405  *		 hash list.
9406  *
9407  *     Argument: pptr		fcp port structure
9408  *		 *map_entry	entry describing the target to create
9409  *		 link_cnt	Link state change counter
9410  *
9411  * Return Value: NULL		Failed
9412  *		 Not NULL	Succeeded
9413  *
9414  *	Context: Kernel context.
9415  */
9416 static struct fcp_tgt *
9417 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9418 {
9419 	int			hash;
9420 	uchar_t			*wwn;
9421 	struct fcp_tgt	*ptgt;
9422 
9423 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9424 	if (ptgt != NULL) {
9425 		mutex_enter(&pptr->port_mutex);
9426 		if (link_cnt != pptr->port_link_cnt) {
9427 			/*
9428 			 * oh oh -- another link reset
9429 			 * in progress -- give up
9430 			 */
9431 			mutex_exit(&pptr->port_mutex);
9432 			kmem_free(ptgt, sizeof (*ptgt));
9433 			ptgt = NULL;
9434 		} else {
9435 			/*
9436 			 * initialize the mutex before putting in the port
9437 			 * wwn list, especially before releasing the port
9438 			 * mutex.
9439 			 */
9440 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9441 
9442 			/* add new target entry to the port's hash list */
9443 			wwn = (uchar_t *)&map_entry->map_pwwn;
9444 			hash = FCP_HASH(wwn);
9445 
9446 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9447 			pptr->port_tgt_hash_table[hash] = ptgt;
9448 
9449 			/* save cross-ptr */
9450 			ptgt->tgt_port = pptr;
9451 
9452 			ptgt->tgt_change_cnt = 1;
9453 
9454 			/* initialize the target manual_config_only flag */
9455 			if (fcp_enable_auto_configuration) {
9456 				ptgt->tgt_manual_config_only = 0;
9457 			} else {
9458 				ptgt->tgt_manual_config_only = 1;
9459 			}
9460 
9461 			mutex_exit(&pptr->port_mutex);
9462 		}
9463 	}
9464 
9465 	return (ptgt);
9466 }
9467 
9468 /*
9469  *     Function: fcp_dealloc_tgt
9470  *
9471  *  Description: Frees the target structure passed by the caller.
9472  *
9473  *     Argument: ptgt		Target structure to free.
9474  *
9475  * Return Value: None
9476  *
9477  *	Context: Kernel context.
9478  */
9479 static void
9480 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9481 {
9482 	mutex_destroy(&ptgt->tgt_mutex);
9483 	kmem_free(ptgt, sizeof (*ptgt));
9484 }
9485 
9486 
9487 /*
9488  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9489  *
9490  *	Device discovery commands will not be retried for-ever as
9491  *	this will have repercussions on other devices that need to
9492  *	be submitted to the hotplug thread. After a quick glance
9493  *	at the SCSI-3 spec, it was found that the spec doesn't
9494  *	mandate a forever retry, rather recommends a delayed retry.
9495  *
9496  *	Since Photon IB is single threaded, STATUS_BUSY is common
9497  *	in a 4+initiator environment. Make sure the total time
9498  *	spent on retries (including command timeout) does not
9499  *	60 seconds
9500  */
9501 static void
9502 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9503 {
9504 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9505 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9506 
9507 	mutex_enter(&pptr->port_mutex);
9508 	mutex_enter(&ptgt->tgt_mutex);
9509 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9510 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9511 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9512 		    "fcp_queue_ipkt,1:state change occured"
9513 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9514 		mutex_exit(&ptgt->tgt_mutex);
9515 		mutex_exit(&pptr->port_mutex);
9516 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9517 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9518 		fcp_icmd_free(pptr, icmd);
9519 		return;
9520 	}
9521 	mutex_exit(&ptgt->tgt_mutex);
9522 
9523 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9524 
9525 	if (pptr->port_ipkt_list != NULL) {
9526 		/* add pkt to front of doubly-linked list */
9527 		pptr->port_ipkt_list->ipkt_prev = icmd;
9528 		icmd->ipkt_next = pptr->port_ipkt_list;
9529 		pptr->port_ipkt_list = icmd;
9530 		icmd->ipkt_prev = NULL;
9531 	} else {
9532 		/* this is the first/only pkt on the list */
9533 		pptr->port_ipkt_list = icmd;
9534 		icmd->ipkt_next = NULL;
9535 		icmd->ipkt_prev = NULL;
9536 	}
9537 	mutex_exit(&pptr->port_mutex);
9538 }
9539 
9540 /*
9541  *     Function: fcp_transport
9542  *
9543  *  Description: This function submits the Fibre Channel packet to the transort
9544  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9545  *		 fails the submission, the treatment depends on the value of
9546  *		 the variable internal.
9547  *
9548  *     Argument: port_handle	fp/fctl port handle.
9549  *		 *fpkt		Packet to submit to the transport layer.
9550  *		 internal	Not zero when it's an internal packet.
9551  *
9552  * Return Value: FC_TRAN_BUSY
9553  *		 FC_STATEC_BUSY
9554  *		 FC_OFFLINE
9555  *		 FC_LOGINREQ
9556  *		 FC_DEVICE_BUSY
9557  *		 FC_SUCCESS
9558  */
9559 static int
9560 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9561 {
9562 	int	rval;
9563 
9564 	rval = fc_ulp_transport(port_handle, fpkt);
9565 	if (rval == FC_SUCCESS) {
9566 		return (rval);
9567 	}
9568 
9569 	/*
9570 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9571 	 * a command, if the underlying modules see that there is a state
9572 	 * change, or if a port is OFFLINE, that means, that state change
9573 	 * hasn't reached FCP yet, so re-queue the command for deferred
9574 	 * submission.
9575 	 */
9576 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9577 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9578 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9579 		/*
9580 		 * Defer packet re-submission. Life hang is possible on
9581 		 * internal commands if the port driver sends FC_STATEC_BUSY
9582 		 * for ever, but that shouldn't happen in a good environment.
9583 		 * Limiting re-transport for internal commands is probably a
9584 		 * good idea..
9585 		 * A race condition can happen when a port sees barrage of
9586 		 * link transitions offline to online. If the FCTL has
9587 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9588 		 * internal commands should be queued to do the discovery.
9589 		 * The race condition is when an online comes and FCP starts
9590 		 * its internal discovery and the link goes offline. It is
9591 		 * possible that the statec_callback has not reached FCP
9592 		 * and FCP is carrying on with its internal discovery.
9593 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9594 		 * that the link has gone offline. At this point FCP should
9595 		 * drop all the internal commands and wait for the
9596 		 * statec_callback. It will be facilitated by incrementing
9597 		 * port_link_cnt.
9598 		 *
9599 		 * For external commands, the (FC)pkt_timeout is decremented
9600 		 * by the QUEUE Delay added by our driver, Care is taken to
9601 		 * ensure that it doesn't become zero (zero means no timeout)
9602 		 * If the time expires right inside driver queue itself,
9603 		 * the watch thread will return it to the original caller
9604 		 * indicating that the command has timed-out.
9605 		 */
9606 		if (internal) {
9607 			char			*op;
9608 			struct fcp_ipkt	*icmd;
9609 
9610 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9611 			switch (icmd->ipkt_opcode) {
9612 			case SCMD_REPORT_LUN:
9613 				op = "REPORT LUN";
9614 				break;
9615 
9616 			case SCMD_INQUIRY:
9617 				op = "INQUIRY";
9618 				break;
9619 
9620 			case SCMD_INQUIRY_PAGE83:
9621 				op = "INQUIRY-83";
9622 				break;
9623 
9624 			default:
9625 				op = "Internal SCSI COMMAND";
9626 				break;
9627 			}
9628 
9629 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9630 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9631 				rval = FC_SUCCESS;
9632 			}
9633 		} else {
9634 			struct fcp_pkt *cmd;
9635 			struct fcp_port *pptr;
9636 
9637 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9638 			cmd->cmd_state = FCP_PKT_IDLE;
9639 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9640 
9641 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9642 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9643 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9644 				    "fcp_transport: xport busy for pkt %p",
9645 				    cmd->cmd_pkt);
9646 				rval = FC_TRAN_BUSY;
9647 			} else {
9648 				fcp_queue_pkt(pptr, cmd);
9649 				rval = FC_SUCCESS;
9650 			}
9651 		}
9652 	}
9653 
9654 	return (rval);
9655 }
9656 
9657 /*VARARGS3*/
9658 static void
9659 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9660 {
9661 	char		buf[256];
9662 	va_list		ap;
9663 
9664 	if (dip == NULL) {
9665 		dip = fcp_global_dip;
9666 	}
9667 
9668 	va_start(ap, fmt);
9669 	(void) vsprintf(buf, fmt, ap);
9670 	va_end(ap);
9671 
9672 	scsi_log(dip, "fcp", level, buf);
9673 }
9674 
9675 /*
9676  * This function retries NS registry of FC4 type.
9677  * It assumes that fcp_mutex is held.
9678  * The function does nothing if topology is not fabric
9679  * So, the topology has to be set before this function can be called
9680  */
9681 static void
9682 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9683 {
9684 	int	rval;
9685 
9686 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9687 
9688 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9689 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9690 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9691 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9692 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9693 		}
9694 		return;
9695 	}
9696 	mutex_exit(&pptr->port_mutex);
9697 	rval = fcp_do_ns_registry(pptr, s_id);
9698 	mutex_enter(&pptr->port_mutex);
9699 
9700 	if (rval == 0) {
9701 		/* Registry successful. Reset flag */
9702 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9703 	}
9704 }
9705 
9706 /*
9707  * This function registers the ULP with the switch by calling transport i/f
9708  */
9709 static int
9710 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9711 {
9712 	fc_ns_cmd_t		ns_cmd;
9713 	ns_rfc_type_t		rfc;
9714 	uint32_t		types[8];
9715 
9716 	/*
9717 	 * Prepare the Name server structure to
9718 	 * register with the transport in case of
9719 	 * Fabric configuration.
9720 	 */
9721 	bzero(&rfc, sizeof (rfc));
9722 	bzero(types, sizeof (types));
9723 
9724 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9725 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9726 
9727 	rfc.rfc_port_id.port_id = s_id;
9728 	bcopy(types, rfc.rfc_types, sizeof (types));
9729 
9730 	ns_cmd.ns_flags = 0;
9731 	ns_cmd.ns_cmd = NS_RFT_ID;
9732 	ns_cmd.ns_req_len = sizeof (rfc);
9733 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9734 	ns_cmd.ns_resp_len = 0;
9735 	ns_cmd.ns_resp_payload = NULL;
9736 
9737 	/*
9738 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9739 	 */
9740 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9741 		fcp_log(CE_WARN, pptr->port_dip,
9742 		    "!ns_registry: failed name server registration");
9743 		return (1);
9744 	}
9745 
9746 	return (0);
9747 }
9748 
9749 /*
9750  *     Function: fcp_handle_port_attach
9751  *
9752  *  Description: This function is called from fcp_port_attach() to attach a
9753  *		 new port. This routine does the following:
9754  *
9755  *		1) Allocates an fcp_port structure and initializes it.
9756  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9757  *		   server.
9758  *		3) Kicks off the enumeration of the targets/luns visible
9759  *		   through this new port.  That is done by calling
9760  *		   fcp_statec_callback() if the port is online.
9761  *
9762  *     Argument: ulph		fp/fctl port handle.
9763  *		 *pinfo		Port information.
9764  *		 s_id		Port ID.
9765  *		 instance	Device instance number for the local port
9766  *				(returned by ddi_get_instance()).
9767  *
9768  * Return Value: DDI_SUCCESS
9769  *		 DDI_FAILURE
9770  *
9771  *	Context: User and Kernel context.
9772  */
9773 /*ARGSUSED*/
9774 int
9775 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9776     uint32_t s_id, int instance)
9777 {
9778 	int			res = DDI_FAILURE;
9779 	scsi_hba_tran_t		*tran;
9780 	int			mutex_initted = FALSE;
9781 	int			hba_attached = FALSE;
9782 	int			soft_state_linked = FALSE;
9783 	int			event_bind = FALSE;
9784 	struct fcp_port		*pptr;
9785 	fc_portmap_t		*tmp_list = NULL;
9786 	uint32_t		max_cnt, alloc_cnt;
9787 	uchar_t			*boot_wwn = NULL;
9788 	uint_t			nbytes;
9789 	int			manual_cfg;
9790 
9791 	/*
9792 	 * this port instance attaching for the first time (or after
9793 	 * being detached before)
9794 	 */
9795 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9796 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9797 
9798 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9799 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9800 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9801 		    instance);
9802 		return (res);
9803 	}
9804 
9805 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9806 		/* this shouldn't happen */
9807 		ddi_soft_state_free(fcp_softstate, instance);
9808 		cmn_err(CE_WARN, "fcp: bad soft state");
9809 		return (res);
9810 	}
9811 
9812 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9813 
9814 	/*
9815 	 * Make a copy of ulp_port_info as fctl allocates
9816 	 * a temp struct.
9817 	 */
9818 	(void) fcp_cp_pinfo(pptr, pinfo);
9819 
9820 	/*
9821 	 * Check for manual_configuration_only property.
9822 	 * Enable manual configurtion if the property is
9823 	 * set to 1, otherwise disable manual configuration.
9824 	 */
9825 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9826 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9827 	    MANUAL_CFG_ONLY,
9828 	    -1)) != -1) {
9829 		if (manual_cfg == 1) {
9830 			char	*pathname;
9831 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9832 			(void) ddi_pathname(pptr->port_dip, pathname);
9833 			cmn_err(CE_NOTE,
9834 			    "%s (%s%d) %s is enabled via %s.conf.",
9835 			    pathname,
9836 			    ddi_driver_name(pptr->port_dip),
9837 			    ddi_get_instance(pptr->port_dip),
9838 			    MANUAL_CFG_ONLY,
9839 			    ddi_driver_name(pptr->port_dip));
9840 			fcp_enable_auto_configuration = 0;
9841 			kmem_free(pathname, MAXPATHLEN);
9842 		}
9843 	}
9844 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9845 	pptr->port_link_cnt = 1;
9846 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9847 	pptr->port_id = s_id;
9848 	pptr->port_instance = instance;
9849 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9850 	pptr->port_state = FCP_STATE_INIT;
9851 	if (pinfo->port_acc_attr == NULL) {
9852 		/*
9853 		 * The corresponding FCA doesn't support DMA at all
9854 		 */
9855 		pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9856 	}
9857 
9858 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9859 
9860 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9861 		/*
9862 		 * If FCA supports DMA in SCSI data phase, we need preallocate
9863 		 * dma cookie, so stash the cookie size
9864 		 */
9865 		pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9866 		    pptr->port_data_dma_attr.dma_attr_sgllen;
9867 	}
9868 
9869 	/*
9870 	 * The two mutexes of fcp_port are initialized.	 The variable
9871 	 * mutex_initted is incremented to remember that fact.	That variable
9872 	 * is checked when the routine fails and the mutexes have to be
9873 	 * destroyed.
9874 	 */
9875 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9876 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9877 	mutex_initted++;
9878 
9879 	/*
9880 	 * The SCSI tran structure is allocate and initialized now.
9881 	 */
9882 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9883 		fcp_log(CE_WARN, pptr->port_dip,
9884 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9885 		goto fail;
9886 	}
9887 
9888 	/* link in the transport structure then fill it in */
9889 	pptr->port_tran = tran;
9890 	tran->tran_hba_private		= pptr;
9891 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9892 	tran->tran_tgt_probe		= NULL;
9893 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9894 	tran->tran_start		= fcp_scsi_start;
9895 	tran->tran_reset		= fcp_scsi_reset;
9896 	tran->tran_abort		= fcp_scsi_abort;
9897 	tran->tran_getcap		= fcp_scsi_getcap;
9898 	tran->tran_setcap		= fcp_scsi_setcap;
9899 	tran->tran_init_pkt		= NULL;
9900 	tran->tran_destroy_pkt		= NULL;
9901 	tran->tran_dmafree		= NULL;
9902 	tran->tran_sync_pkt		= NULL;
9903 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9904 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9905 	tran->tran_get_name		= fcp_scsi_get_name;
9906 	tran->tran_clear_aca		= NULL;
9907 	tran->tran_clear_task_set	= NULL;
9908 	tran->tran_terminate_task	= NULL;
9909 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9910 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9911 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9912 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9913 	tran->tran_quiesce		= NULL;
9914 	tran->tran_unquiesce		= NULL;
9915 	tran->tran_bus_reset		= NULL;
9916 	tran->tran_bus_config		= fcp_scsi_bus_config;
9917 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9918 	tran->tran_bus_power		= NULL;
9919 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9920 
9921 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9922 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9923 	tran->tran_setup_pkt		= fcp_pkt_setup;
9924 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9925 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9926 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9927 	if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9928 		/*
9929 		 * If FCA don't support DMA, then we use different vectors to
9930 		 * minimize the effects on DMA code flow path
9931 		 */
9932 		tran->tran_start	   = fcp_pseudo_start;
9933 		tran->tran_init_pkt	   = fcp_pseudo_init_pkt;
9934 		tran->tran_destroy_pkt	   = fcp_pseudo_destroy_pkt;
9935 		tran->tran_sync_pkt	   = fcp_pseudo_sync_pkt;
9936 		tran->tran_dmafree	   = fcp_pseudo_dmafree;
9937 		tran->tran_setup_pkt	   = NULL;
9938 		tran->tran_teardown_pkt	   = NULL;
9939 		tran->tran_pkt_constructor = NULL;
9940 		tran->tran_pkt_destructor  = NULL;
9941 		pptr->port_data_dma_attr   = pseudo_fca_dma_attr;
9942 	}
9943 
9944 	/*
9945 	 * Allocate an ndi event handle
9946 	 */
9947 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9948 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9949 
9950 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9951 	    sizeof (fcp_ndi_event_defs));
9952 
9953 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9954 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9955 
9956 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9957 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9958 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9959 
9960 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9961 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9962 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9963 		goto fail;
9964 	}
9965 	event_bind++;	/* Checked in fail case */
9966 
9967 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9968 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9969 	    != DDI_SUCCESS) {
9970 		fcp_log(CE_WARN, pptr->port_dip,
9971 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9972 		goto fail;
9973 	}
9974 	hba_attached++;	/* Checked in fail case */
9975 
9976 	pptr->port_mpxio = 0;
9977 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9978 	    MDI_SUCCESS) {
9979 		pptr->port_mpxio++;
9980 	}
9981 
9982 	/*
9983 	 * The following code is putting the new port structure in the global
9984 	 * list of ports and, if it is the first port to attach, it start the
9985 	 * fcp_watchdog_tick.
9986 	 *
9987 	 * Why put this new port in the global before we are done attaching it?
9988 	 * We are actually making the structure globally known before we are
9989 	 * done attaching it.  The reason for that is: because of the code that
9990 	 * follows.  At this point the resources to handle the port are
9991 	 * allocated.  This function is now going to do the following:
9992 	 *
9993 	 *   1) It is going to try to register with the name server advertizing
9994 	 *	the new FCP capability of the port.
9995 	 *   2) It is going to play the role of the fp/fctl layer by building
9996 	 *	a list of worlwide names reachable through this port and call
9997 	 *	itself on fcp_statec_callback().  That requires the port to
9998 	 *	be part of the global list.
9999 	 */
10000 	mutex_enter(&fcp_global_mutex);
10001 	if (fcp_port_head == NULL) {
10002 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10003 	}
10004 	pptr->port_next = fcp_port_head;
10005 	fcp_port_head = pptr;
10006 	soft_state_linked++;
10007 
10008 	if (fcp_watchdog_init++ == 0) {
10009 		fcp_watchdog_tick = fcp_watchdog_timeout *
10010 		    drv_usectohz(1000000);
10011 		fcp_watchdog_id = timeout(fcp_watch, NULL,
10012 		    fcp_watchdog_tick);
10013 	}
10014 	mutex_exit(&fcp_global_mutex);
10015 
10016 	/*
10017 	 * Here an attempt is made to register with the name server, the new
10018 	 * FCP capability.  That is done using an RTF_ID to the name server.
10019 	 * It is done synchronously.  The function fcp_do_ns_registry()
10020 	 * doesn't return till the name server responded.
10021 	 * On failures, just ignore it for now and it will get retried during
10022 	 * state change callbacks. We'll set a flag to show this failure
10023 	 */
10024 	if (fcp_do_ns_registry(pptr, s_id)) {
10025 		mutex_enter(&pptr->port_mutex);
10026 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10027 		mutex_exit(&pptr->port_mutex);
10028 	} else {
10029 		mutex_enter(&pptr->port_mutex);
10030 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10031 		mutex_exit(&pptr->port_mutex);
10032 	}
10033 
10034 	/*
10035 	 * Lookup for boot WWN property
10036 	 */
10037 	if (modrootloaded != 1) {
10038 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10039 		    ddi_get_parent(pinfo->port_dip),
10040 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10041 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10042 		    (nbytes == FC_WWN_SIZE)) {
10043 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10044 		}
10045 		if (boot_wwn) {
10046 			ddi_prop_free(boot_wwn);
10047 		}
10048 	}
10049 
10050 	/*
10051 	 * Handle various topologies and link states.
10052 	 */
10053 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10054 	case FC_STATE_OFFLINE:
10055 
10056 		/*
10057 		 * we're attaching a port where the link is offline
10058 		 *
10059 		 * Wait for ONLINE, at which time a state
10060 		 * change will cause a statec_callback
10061 		 *
10062 		 * in the mean time, do not do anything
10063 		 */
10064 		res = DDI_SUCCESS;
10065 		pptr->port_state |= FCP_STATE_OFFLINE;
10066 		break;
10067 
10068 	case FC_STATE_ONLINE: {
10069 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
10070 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10071 			res = DDI_SUCCESS;
10072 			break;
10073 		}
10074 		/*
10075 		 * discover devices and create nodes (a private
10076 		 * loop or point-to-point)
10077 		 */
10078 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10079 
10080 		/*
10081 		 * At this point we are going to build a list of all the ports
10082 		 * that	can be reached through this local port.	 It looks like
10083 		 * we cannot handle more than FCP_MAX_DEVICES per local port
10084 		 * (128).
10085 		 */
10086 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10087 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10088 		    KM_NOSLEEP)) == NULL) {
10089 			fcp_log(CE_WARN, pptr->port_dip,
10090 			    "!fcp%d: failed to allocate portmap",
10091 			    instance);
10092 			goto fail;
10093 		}
10094 
10095 		/*
10096 		 * fc_ulp_getportmap() is going to provide us with the list of
10097 		 * remote ports in the buffer we just allocated.  The way the
10098 		 * list is going to be retrieved depends on the topology.
10099 		 * However, if we are connected to a Fabric, a name server
10100 		 * request may be sent to get the list of FCP capable ports.
10101 		 * It should be noted that is the case the request is
10102 		 * synchronous.	 This means we are stuck here till the name
10103 		 * server replies.  A lot of things can change during that time
10104 		 * and including, may be, being called on
10105 		 * fcp_statec_callback() for different reasons. I'm not sure
10106 		 * the code can handle that.
10107 		 */
10108 		max_cnt = FCP_MAX_DEVICES;
10109 		alloc_cnt = FCP_MAX_DEVICES;
10110 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10111 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10112 		    FC_SUCCESS) {
10113 			caddr_t msg;
10114 
10115 			(void) fc_ulp_error(res, &msg);
10116 
10117 			/*
10118 			 * this	 just means the transport is
10119 			 * busy perhaps building a portmap so,
10120 			 * for now, succeed this port attach
10121 			 * when the transport has a new map,
10122 			 * it'll send us a state change then
10123 			 */
10124 			fcp_log(CE_WARN, pptr->port_dip,
10125 			    "!failed to get port map : %s", msg);
10126 
10127 			res = DDI_SUCCESS;
10128 			break;	/* go return result */
10129 		}
10130 		if (max_cnt > alloc_cnt) {
10131 			alloc_cnt = max_cnt;
10132 		}
10133 
10134 		/*
10135 		 * We are now going to call fcp_statec_callback() ourselves.
10136 		 * By issuing this call we are trying to kick off the enumera-
10137 		 * tion process.
10138 		 */
10139 		/*
10140 		 * let the state change callback do the SCSI device
10141 		 * discovery and create the devinfos
10142 		 */
10143 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10144 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10145 		    max_cnt, pptr->port_id);
10146 
10147 		res = DDI_SUCCESS;
10148 		break;
10149 	}
10150 
10151 	default:
10152 		/* unknown port state */
10153 		fcp_log(CE_WARN, pptr->port_dip,
10154 		    "!fcp%d: invalid port state at attach=0x%x",
10155 		    instance, pptr->port_phys_state);
10156 
10157 		mutex_enter(&pptr->port_mutex);
10158 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10159 		mutex_exit(&pptr->port_mutex);
10160 
10161 		res = DDI_SUCCESS;
10162 		break;
10163 	}
10164 
10165 	/* free temp list if used */
10166 	if (tmp_list != NULL) {
10167 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10168 	}
10169 
10170 	/* note the attach time */
10171 	pptr->port_attach_time = ddi_get_lbolt64();
10172 
10173 	/* all done */
10174 	return (res);
10175 
10176 	/* a failure we have to clean up after */
10177 fail:
10178 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10179 
10180 	if (soft_state_linked) {
10181 		/* remove this fcp_port from the linked list */
10182 		(void) fcp_soft_state_unlink(pptr);
10183 	}
10184 
10185 	/* unbind and free event set */
10186 	if (pptr->port_ndi_event_hdl) {
10187 		if (event_bind) {
10188 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10189 			    &pptr->port_ndi_events, NDI_SLEEP);
10190 		}
10191 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10192 	}
10193 
10194 	if (pptr->port_ndi_event_defs) {
10195 		(void) kmem_free(pptr->port_ndi_event_defs,
10196 		    sizeof (fcp_ndi_event_defs));
10197 	}
10198 
10199 	/*
10200 	 * Clean up mpxio stuff
10201 	 */
10202 	if (pptr->port_mpxio) {
10203 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10204 		pptr->port_mpxio--;
10205 	}
10206 
10207 	/* undo SCSI HBA setup */
10208 	if (hba_attached) {
10209 		(void) scsi_hba_detach(pptr->port_dip);
10210 	}
10211 	if (pptr->port_tran != NULL) {
10212 		scsi_hba_tran_free(pptr->port_tran);
10213 	}
10214 
10215 	mutex_enter(&fcp_global_mutex);
10216 
10217 	/*
10218 	 * We check soft_state_linked, because it is incremented right before
10219 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10220 	 * soft_state_linked is still FALSE, we do not want to decrement
10221 	 * fcp_watchdog_init or possibly call untimeout.
10222 	 */
10223 
10224 	if (soft_state_linked) {
10225 		if (--fcp_watchdog_init == 0) {
10226 			timeout_id_t	tid = fcp_watchdog_id;
10227 
10228 			mutex_exit(&fcp_global_mutex);
10229 			(void) untimeout(tid);
10230 		} else {
10231 			mutex_exit(&fcp_global_mutex);
10232 		}
10233 	} else {
10234 		mutex_exit(&fcp_global_mutex);
10235 	}
10236 
10237 	if (mutex_initted) {
10238 		mutex_destroy(&pptr->port_mutex);
10239 		mutex_destroy(&pptr->port_pkt_mutex);
10240 	}
10241 
10242 	if (tmp_list != NULL) {
10243 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10244 	}
10245 
10246 	/* this makes pptr invalid */
10247 	ddi_soft_state_free(fcp_softstate, instance);
10248 
10249 	return (DDI_FAILURE);
10250 }
10251 
10252 
10253 static int
10254 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10255 {
10256 	int count = 0;
10257 
10258 	mutex_enter(&pptr->port_mutex);
10259 
10260 	/*
10261 	 * if the port is powered down or suspended, nothing else
10262 	 * to do; just return.
10263 	 */
10264 	if (flag != FCP_STATE_DETACHING) {
10265 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10266 		    FCP_STATE_SUSPENDED)) {
10267 			pptr->port_state |= flag;
10268 			mutex_exit(&pptr->port_mutex);
10269 			return (FC_SUCCESS);
10270 		}
10271 	}
10272 
10273 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10274 		mutex_exit(&pptr->port_mutex);
10275 		return (FC_FAILURE);
10276 	}
10277 
10278 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10279 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10280 	    "fcp_handle_port_detach: port is detaching");
10281 
10282 	pptr->port_state |= flag;
10283 
10284 	/*
10285 	 * Wait for any ongoing reconfig/ipkt to complete, that
10286 	 * ensures the freeing to targets/luns is safe.
10287 	 * No more ref to this port should happen from statec/ioctl
10288 	 * after that as it was removed from the global port list.
10289 	 */
10290 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10291 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10292 		/*
10293 		 * Let's give sufficient time for reconfig/ipkt
10294 		 * to complete.
10295 		 */
10296 		if (count++ >= FCP_ICMD_DEADLINE) {
10297 			break;
10298 		}
10299 		mutex_exit(&pptr->port_mutex);
10300 		delay(drv_usectohz(1000000));
10301 		mutex_enter(&pptr->port_mutex);
10302 	}
10303 
10304 	/*
10305 	 * if the driver is still busy then fail to
10306 	 * suspend/power down.
10307 	 */
10308 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10309 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10310 		pptr->port_state &= ~flag;
10311 		mutex_exit(&pptr->port_mutex);
10312 		return (FC_FAILURE);
10313 	}
10314 
10315 	if (flag == FCP_STATE_DETACHING) {
10316 		pptr = fcp_soft_state_unlink(pptr);
10317 		ASSERT(pptr != NULL);
10318 	}
10319 
10320 	pptr->port_link_cnt++;
10321 	pptr->port_state |= FCP_STATE_OFFLINE;
10322 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10323 
10324 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10325 	    FCP_CAUSE_LINK_DOWN);
10326 	mutex_exit(&pptr->port_mutex);
10327 
10328 	/* kill watch dog timer if we're the last */
10329 	mutex_enter(&fcp_global_mutex);
10330 	if (--fcp_watchdog_init == 0) {
10331 		timeout_id_t	tid = fcp_watchdog_id;
10332 		mutex_exit(&fcp_global_mutex);
10333 		(void) untimeout(tid);
10334 	} else {
10335 		mutex_exit(&fcp_global_mutex);
10336 	}
10337 
10338 	/* clean up the port structures */
10339 	if (flag == FCP_STATE_DETACHING) {
10340 		fcp_cleanup_port(pptr, instance);
10341 	}
10342 
10343 	return (FC_SUCCESS);
10344 }
10345 
10346 
10347 static void
10348 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10349 {
10350 	ASSERT(pptr != NULL);
10351 
10352 	/* unbind and free event set */
10353 	if (pptr->port_ndi_event_hdl) {
10354 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10355 		    &pptr->port_ndi_events, NDI_SLEEP);
10356 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10357 	}
10358 
10359 	if (pptr->port_ndi_event_defs) {
10360 		(void) kmem_free(pptr->port_ndi_event_defs,
10361 		    sizeof (fcp_ndi_event_defs));
10362 	}
10363 
10364 	/* free the lun/target structures and devinfos */
10365 	fcp_free_targets(pptr);
10366 
10367 	/*
10368 	 * Clean up mpxio stuff
10369 	 */
10370 	if (pptr->port_mpxio) {
10371 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10372 		pptr->port_mpxio--;
10373 	}
10374 
10375 	/* clean up SCSA stuff */
10376 	(void) scsi_hba_detach(pptr->port_dip);
10377 	if (pptr->port_tran != NULL) {
10378 		scsi_hba_tran_free(pptr->port_tran);
10379 	}
10380 
10381 #ifdef	KSTATS_CODE
10382 	/* clean up kstats */
10383 	if (pptr->fcp_ksp != NULL) {
10384 		kstat_delete(pptr->fcp_ksp);
10385 	}
10386 #endif
10387 
10388 	/* clean up soft state mutexes/condition variables */
10389 	mutex_destroy(&pptr->port_mutex);
10390 	mutex_destroy(&pptr->port_pkt_mutex);
10391 
10392 	/* all done with soft state */
10393 	ddi_soft_state_free(fcp_softstate, instance);
10394 }
10395 
10396 /*
10397  *     Function: fcp_kmem_cache_constructor
10398  *
10399  *  Description: This function allocates and initializes the resources required
10400  *		 to build a scsi_pkt structure the target driver.  The result
10401  *		 of the allocation and initialization will be cached in the
10402  *		 memory cache.	As DMA resources may be allocated here, that
10403  *		 means DMA resources will be tied up in the cache manager.
10404  *		 This is a tradeoff that has been made for performance reasons.
10405  *
10406  *     Argument: *buf		Memory to preinitialize.
10407  *		 *arg		FCP port structure (fcp_port).
10408  *		 kmflags	Value passed to kmem_cache_alloc() and
10409  *				propagated to the constructor.
10410  *
10411  * Return Value: 0	Allocation/Initialization was successful.
10412  *		 -1	Allocation or Initialization failed.
10413  *
10414  *
10415  * If the returned value is 0, the buffer is initialized like this:
10416  *
10417  *		    +================================+
10418  *	     +----> |	      struct scsi_pkt	     |
10419  *	     |	    |				     |
10420  *	     | +--- | pkt_ha_private		     |
10421  *	     | |    |				     |
10422  *	     | |    +================================+
10423  *	     | |
10424  *	     | |    +================================+
10425  *	     | +--> |	    struct fcp_pkt	     | <---------+
10426  *	     |	    |				     |		 |
10427  *	     +----- | cmd_pkt			     |		 |
10428  *		    |			  cmd_fp_pkt | ---+	 |
10429  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10430  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10431  *	  |    |    |--------------------------------|	  |	 |
10432  *	  |    |    |	      struct fc_packet	     | <--+	 |
10433  *	  |    |    |				     |		 |
10434  *	  |    |    |		     pkt_ulp_private | ----------+
10435  *	  |    |    |		     pkt_fca_private | -----+
10436  *	  |    |    |		     pkt_data_cookie | ---+ |
10437  *	  |    |    | pkt_cmdlen		     |	  | |
10438  *	  |    |(a) | pkt_rsplen		     |	  | |
10439  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10440  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10441  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10442  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10443  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10444  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10445  *		    +================================+	  | |  |   |   |    |
10446  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10447  *		    |				     |	    |  |   |   |    |
10448  *		    +================================+	    |  |   |   |    |
10449  *		    |	      fca_private	     | <----+  |   |   |    |
10450  *		    |				     |	       |   |   |    |
10451  *		    +================================+	       |   |   |    |
10452  *							       |   |   |    |
10453  *							       |   |   |    |
10454  *		    +================================+	 (d)   |   |   |    |
10455  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10456  *		    |				     |		   |   |    |
10457  *		    +================================+		   |   |    |
10458  *								   |   |    |
10459  *		    +================================+	 (d)	   |   |    |
10460  *		    |		fcp_resp	     | <-----------+   |    |
10461  *		    |	(DMA resources associated)   |		       |    |
10462  *		    +================================+		       |    |
10463  *								       |    |
10464  *								       |    |
10465  *								       |    |
10466  *		    +================================+	 (c)	       |    |
10467  *		    |	     fcp_cmd cookies	     | <---------------+    |
10468  *		    |				     |			    |
10469  *		    +================================+			    |
10470  *									    |
10471  *		    +================================+	 (c)		    |
10472  *		    |		 fcp_cmd	     | <--------------------+
10473  *		    |	(DMA resources associated)   |
10474  *		    +================================+
10475  *
10476  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10477  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10478  * (c) Only if DMA is used for the FCP_CMD buffer.
10479  * (d) Only if DMA is used for the FCP_RESP buffer
10480  */
10481 static int
10482 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10483     int kmflags)
10484 {
10485 	struct fcp_pkt	*cmd;
10486 	struct fcp_port	*pptr;
10487 	fc_packet_t	*fpkt;
10488 
10489 	pptr = (struct fcp_port *)tran->tran_hba_private;
10490 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10491 	bzero(cmd, tran->tran_hba_len);
10492 
10493 	cmd->cmd_pkt = pkt;
10494 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10495 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10496 	cmd->cmd_fp_pkt = fpkt;
10497 
10498 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10499 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10500 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10501 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10502 
10503 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10504 	    sizeof (struct fcp_pkt));
10505 
10506 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10507 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10508 
10509 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10510 		/*
10511 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10512 		 * fcp_resp.  The transfer of information will be done by
10513 		 * bcopy.
10514 		 * The naming of the flags (that is actually a value) is
10515 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10516 		 * DMA" but instead "NO DMA".
10517 		 */
10518 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10519 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10520 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10521 	} else {
10522 		/*
10523 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10524 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10525 		 * interfaces.
10526 		 */
10527 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10528 			return (-1);
10529 		}
10530 	}
10531 
10532 	return (0);
10533 }
10534 
10535 /*
10536  *     Function: fcp_kmem_cache_destructor
10537  *
10538  *  Description: Called by the destructor of the cache managed by SCSA.
10539  *		 All the resources pre-allocated in fcp_pkt_constructor
10540  *		 and the data also pre-initialized in fcp_pkt_constructor
10541  *		 are freed and uninitialized here.
10542  *
10543  *     Argument: *buf		Memory to uninitialize.
10544  *		 *arg		FCP port structure (fcp_port).
10545  *
10546  * Return Value: None
10547  *
10548  *	Context: kernel
10549  */
10550 static void
10551 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10552 {
10553 	struct fcp_pkt	*cmd;
10554 	struct fcp_port	*pptr;
10555 
10556 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10557 	cmd = pkt->pkt_ha_private;
10558 
10559 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10560 		/*
10561 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10562 		 * buffer and DMA resources allocated to do so are released.
10563 		 */
10564 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10565 	}
10566 }
10567 
10568 /*
10569  *     Function: fcp_alloc_cmd_resp
10570  *
10571  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10572  *		 will be DMAed by the HBA.  The buffer is allocated applying
10573  *		 the DMA requirements for the HBA.  The buffers allocated will
10574  *		 also be bound.	 DMA resources are allocated in the process.
10575  *		 They will be released by fcp_free_cmd_resp().
10576  *
10577  *     Argument: *pptr	FCP port.
10578  *		 *fpkt	fc packet for which the cmd and resp packet should be
10579  *			allocated.
10580  *		 flags	Allocation flags.
10581  *
10582  * Return Value: FC_FAILURE
10583  *		 FC_SUCCESS
10584  *
10585  *	Context: User or Kernel context only if flags == KM_SLEEP.
10586  *		 Interrupt context if the KM_SLEEP is not specified.
10587  */
10588 static int
10589 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10590 {
10591 	int			rval;
10592 	int			cmd_len;
10593 	int			resp_len;
10594 	ulong_t			real_len;
10595 	int			(*cb) (caddr_t);
10596 	ddi_dma_cookie_t	pkt_cookie;
10597 	ddi_dma_cookie_t	*cp;
10598 	uint32_t		cnt;
10599 
10600 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10601 
10602 	cmd_len = fpkt->pkt_cmdlen;
10603 	resp_len = fpkt->pkt_rsplen;
10604 
10605 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10606 
10607 	/* Allocation of a DMA handle used in subsequent calls. */
10608 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10609 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10610 		return (FC_FAILURE);
10611 	}
10612 
10613 	/* A buffer is allocated that satisfies the DMA requirements. */
10614 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10615 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10616 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10617 
10618 	if (rval != DDI_SUCCESS) {
10619 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10620 		return (FC_FAILURE);
10621 	}
10622 
10623 	if (real_len < cmd_len) {
10624 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10625 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10626 		return (FC_FAILURE);
10627 	}
10628 
10629 	/* The buffer allocated is DMA bound. */
10630 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10631 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10632 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10633 
10634 	if (rval != DDI_DMA_MAPPED) {
10635 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10636 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10637 		return (FC_FAILURE);
10638 	}
10639 
10640 	if (fpkt->pkt_cmd_cookie_cnt >
10641 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10642 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10643 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10644 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10645 		return (FC_FAILURE);
10646 	}
10647 
10648 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10649 
10650 	/*
10651 	 * The buffer where the scatter/gather list is going to be built is
10652 	 * allocated.
10653 	 */
10654 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10655 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10656 	    KM_NOSLEEP);
10657 
10658 	if (cp == NULL) {
10659 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10660 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10661 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10662 		return (FC_FAILURE);
10663 	}
10664 
10665 	/*
10666 	 * The scatter/gather list for the buffer we just allocated is built
10667 	 * here.
10668 	 */
10669 	*cp = pkt_cookie;
10670 	cp++;
10671 
10672 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10673 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10674 		    &pkt_cookie);
10675 		*cp = pkt_cookie;
10676 	}
10677 
10678 	ASSERT(fpkt->pkt_resp_dma == NULL);
10679 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10680 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10681 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10682 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10683 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10684 		return (FC_FAILURE);
10685 	}
10686 
10687 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10688 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10689 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10690 	    &fpkt->pkt_resp_acc);
10691 
10692 	if (rval != DDI_SUCCESS) {
10693 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10694 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10695 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10696 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10697 		kmem_free(fpkt->pkt_cmd_cookie,
10698 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10699 		return (FC_FAILURE);
10700 	}
10701 
10702 	if (real_len < resp_len) {
10703 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10704 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10705 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10706 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10707 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10708 		kmem_free(fpkt->pkt_cmd_cookie,
10709 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10710 		return (FC_FAILURE);
10711 	}
10712 
10713 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10714 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10715 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10716 
10717 	if (rval != DDI_DMA_MAPPED) {
10718 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10719 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10720 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10721 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10722 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10723 		kmem_free(fpkt->pkt_cmd_cookie,
10724 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10725 		return (FC_FAILURE);
10726 	}
10727 
10728 	if (fpkt->pkt_resp_cookie_cnt >
10729 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10730 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10731 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10732 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10733 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10734 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10735 		kmem_free(fpkt->pkt_cmd_cookie,
10736 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10737 		return (FC_FAILURE);
10738 	}
10739 
10740 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10741 
10742 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10743 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10744 	    KM_NOSLEEP);
10745 
10746 	if (cp == NULL) {
10747 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10748 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10749 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10750 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10751 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10752 		kmem_free(fpkt->pkt_cmd_cookie,
10753 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10754 		return (FC_FAILURE);
10755 	}
10756 
10757 	*cp = pkt_cookie;
10758 	cp++;
10759 
10760 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10761 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10762 		    &pkt_cookie);
10763 		*cp = pkt_cookie;
10764 	}
10765 
10766 	return (FC_SUCCESS);
10767 }
10768 
10769 /*
10770  *     Function: fcp_free_cmd_resp
10771  *
10772  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10773  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10774  *		 associated with them.	That includes the DMA resources and the
10775  *		 buffer allocated for the cookies of each one of them.
10776  *
10777  *     Argument: *pptr		FCP port context.
10778  *		 *fpkt		fc packet containing the cmd and resp packet
10779  *				to be released.
10780  *
10781  * Return Value: None
10782  *
10783  *	Context: Interrupt, User and Kernel context.
10784  */
10785 /* ARGSUSED */
10786 static void
10787 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10788 {
10789 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10790 
10791 	if (fpkt->pkt_resp_dma) {
10792 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10793 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10794 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10795 	}
10796 
10797 	if (fpkt->pkt_resp_cookie) {
10798 		kmem_free(fpkt->pkt_resp_cookie,
10799 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10800 		fpkt->pkt_resp_cookie = NULL;
10801 	}
10802 
10803 	if (fpkt->pkt_cmd_dma) {
10804 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10805 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10806 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10807 	}
10808 
10809 	if (fpkt->pkt_cmd_cookie) {
10810 		kmem_free(fpkt->pkt_cmd_cookie,
10811 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10812 		fpkt->pkt_cmd_cookie = NULL;
10813 	}
10814 }
10815 
10816 
10817 /*
10818  * called by the transport to do our own target initialization
10819  *
10820  * can acquire and release the global mutex
10821  */
10822 /* ARGSUSED */
10823 static int
10824 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10825     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10826 {
10827 	uchar_t			*bytes;
10828 	uint_t			nbytes;
10829 	uint16_t		lun_num;
10830 	struct fcp_tgt	*ptgt;
10831 	struct fcp_lun	*plun;
10832 	struct fcp_port	*pptr = (struct fcp_port *)
10833 	    hba_tran->tran_hba_private;
10834 
10835 	ASSERT(pptr != NULL);
10836 
10837 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10838 	    FCP_BUF_LEVEL_8, 0,
10839 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10840 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10841 
10842 	/* get our port WWN property */
10843 	bytes = NULL;
10844 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10845 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10846 	    (nbytes != FC_WWN_SIZE)) {
10847 		/* no port WWN property */
10848 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10849 		    FCP_BUF_LEVEL_8, 0,
10850 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10851 		    " for %s (instance %d): bytes=%p nbytes=%x",
10852 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10853 		    nbytes);
10854 
10855 		if (bytes != NULL) {
10856 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10857 		}
10858 
10859 		return (DDI_NOT_WELL_FORMED);
10860 	}
10861 	ASSERT(bytes != NULL);
10862 
10863 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10864 	    LUN_PROP, 0xFFFF);
10865 	if (lun_num == 0xFFFF) {
10866 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10867 		    FCP_BUF_LEVEL_8, 0,
10868 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10869 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10870 		    ddi_get_instance(tgt_dip));
10871 
10872 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10873 		return (DDI_NOT_WELL_FORMED);
10874 	}
10875 
10876 	mutex_enter(&pptr->port_mutex);
10877 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10878 		mutex_exit(&pptr->port_mutex);
10879 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10880 		    FCP_BUF_LEVEL_8, 0,
10881 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10882 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10883 		    ddi_get_instance(tgt_dip));
10884 
10885 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10886 		return (DDI_FAILURE);
10887 	}
10888 
10889 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10890 	    FC_WWN_SIZE) == 0);
10891 	ASSERT(plun->lun_num == lun_num);
10892 
10893 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10894 
10895 	ptgt = plun->lun_tgt;
10896 
10897 	mutex_enter(&ptgt->tgt_mutex);
10898 	plun->lun_tgt_count++;
10899 	scsi_device_hba_private_set(sd, plun);
10900 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10901 	plun->lun_sd = sd;
10902 	mutex_exit(&ptgt->tgt_mutex);
10903 	mutex_exit(&pptr->port_mutex);
10904 
10905 	return (DDI_SUCCESS);
10906 }
10907 
10908 /*ARGSUSED*/
10909 static int
10910 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10911     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10912 {
10913 	uchar_t			*bytes;
10914 	uint_t			nbytes;
10915 	uint16_t		lun_num;
10916 	struct fcp_tgt	*ptgt;
10917 	struct fcp_lun	*plun;
10918 	struct fcp_port	*pptr = (struct fcp_port *)
10919 	    hba_tran->tran_hba_private;
10920 	child_info_t		*cip;
10921 
10922 	ASSERT(pptr != NULL);
10923 
10924 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10925 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10926 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10927 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10928 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10929 
10930 	cip = (child_info_t *)sd->sd_pathinfo;
10931 	if (cip == NULL) {
10932 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10933 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10934 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10935 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10936 		    ddi_get_instance(tgt_dip));
10937 
10938 		return (DDI_NOT_WELL_FORMED);
10939 	}
10940 
10941 	/* get our port WWN property */
10942 	bytes = NULL;
10943 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10944 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10945 	    (nbytes != FC_WWN_SIZE)) {
10946 		if (bytes) {
10947 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10948 		}
10949 		return (DDI_NOT_WELL_FORMED);
10950 	}
10951 
10952 	ASSERT(bytes != NULL);
10953 
10954 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10955 	    LUN_PROP, 0xFFFF);
10956 	if (lun_num == 0xFFFF) {
10957 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10958 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10959 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10960 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10961 		    ddi_get_instance(tgt_dip));
10962 
10963 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10964 		return (DDI_NOT_WELL_FORMED);
10965 	}
10966 
10967 	mutex_enter(&pptr->port_mutex);
10968 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10969 		mutex_exit(&pptr->port_mutex);
10970 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10971 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10972 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10973 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10974 		    ddi_get_instance(tgt_dip));
10975 
10976 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10977 		return (DDI_FAILURE);
10978 	}
10979 
10980 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10981 	    FC_WWN_SIZE) == 0);
10982 	ASSERT(plun->lun_num == lun_num);
10983 
10984 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10985 
10986 	ptgt = plun->lun_tgt;
10987 
10988 	mutex_enter(&ptgt->tgt_mutex);
10989 	plun->lun_tgt_count++;
10990 	scsi_device_hba_private_set(sd, plun);
10991 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10992 	plun->lun_sd = sd;
10993 	mutex_exit(&ptgt->tgt_mutex);
10994 	mutex_exit(&pptr->port_mutex);
10995 
10996 	return (DDI_SUCCESS);
10997 }
10998 
10999 
11000 /*
11001  * called by the transport to do our own target initialization
11002  *
11003  * can acquire and release the global mutex
11004  */
11005 /* ARGSUSED */
11006 static int
11007 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11008     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11009 {
11010 	struct fcp_port	*pptr = (struct fcp_port *)
11011 	    hba_tran->tran_hba_private;
11012 	int			rval;
11013 
11014 	ASSERT(pptr != NULL);
11015 
11016 	/*
11017 	 * Child node is getting initialized.  Look at the mpxio component
11018 	 * type on the child device to see if this device is mpxio managed
11019 	 * or not.
11020 	 */
11021 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11022 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11023 	} else {
11024 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11025 	}
11026 
11027 	return (rval);
11028 }
11029 
11030 
11031 /* ARGSUSED */
11032 static void
11033 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11034     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11035 {
11036 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
11037 	struct fcp_tgt	*ptgt;
11038 
11039 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11040 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
11041 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11042 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11043 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11044 
11045 	if (plun == NULL) {
11046 		return;
11047 	}
11048 	ptgt = plun->lun_tgt;
11049 
11050 	ASSERT(ptgt != NULL);
11051 
11052 	mutex_enter(&ptgt->tgt_mutex);
11053 	ASSERT(plun->lun_tgt_count > 0);
11054 
11055 	if (--plun->lun_tgt_count == 0) {
11056 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11057 	}
11058 	plun->lun_sd = NULL;
11059 	mutex_exit(&ptgt->tgt_mutex);
11060 }
11061 
11062 /*
11063  *     Function: fcp_scsi_start
11064  *
11065  *  Description: This function is called by the target driver to request a
11066  *		 command to be sent.
11067  *
11068  *     Argument: *ap		SCSI address of the device.
11069  *		 *pkt		SCSI packet containing the cmd to send.
11070  *
11071  * Return Value: TRAN_ACCEPT
11072  *		 TRAN_BUSY
11073  *		 TRAN_BADPKT
11074  *		 TRAN_FATAL_ERROR
11075  */
11076 static int
11077 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11078 {
11079 	struct fcp_port	*pptr = ADDR2FCP(ap);
11080 	struct fcp_lun	*plun = ADDR2LUN(ap);
11081 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
11082 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11083 	int			rval;
11084 
11085 	/* ensure command isn't already issued */
11086 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11087 
11088 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11089 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
11090 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11091 
11092 	/*
11093 	 * It is strange that we enter the fcp_port mutex and the target
11094 	 * mutex to check the lun state (which has a mutex of its own).
11095 	 */
11096 	mutex_enter(&pptr->port_mutex);
11097 	mutex_enter(&ptgt->tgt_mutex);
11098 
11099 	/*
11100 	 * If the device is offline and is not in the process of coming
11101 	 * online, fail the request.
11102 	 */
11103 
11104 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11105 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
11106 		mutex_exit(&ptgt->tgt_mutex);
11107 		mutex_exit(&pptr->port_mutex);
11108 
11109 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11110 			pkt->pkt_reason = CMD_DEV_GONE;
11111 		}
11112 
11113 		return (TRAN_FATAL_ERROR);
11114 	}
11115 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11116 
11117 	/*
11118 	 * If we are suspended, kernel is trying to dump, so don't
11119 	 * block, fail or defer requests - send them down right away.
11120 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11121 	 * assume we have been suspended.  There is hardware such as
11122 	 * the v880 that doesn't do PM.	 Thus, the check for
11123 	 * ddi_in_panic.
11124 	 *
11125 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11126 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11127 	 * either the device will have gone away or changed and we can fail
11128 	 * the request, or we can proceed if the device didn't change.
11129 	 *
11130 	 * If the pd in the target or the packet is NULL it's probably
11131 	 * because the device has gone away, we allow the request to be
11132 	 * put on the internal queue here in case the device comes back within
11133 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11134 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11135 	 * could be NULL because the device was disappearing during or since
11136 	 * packet initialization.
11137 	 */
11138 
11139 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11140 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11141 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11142 	    (ptgt->tgt_pd_handle == NULL) ||
11143 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11144 		/*
11145 		 * If ((LUN is busy AND
11146 		 *	LUN not suspended AND
11147 		 *	The system is not in panic state) OR
11148 		 *	(The port is coming up))
11149 		 *
11150 		 * We check to see if the any of the flags FLAG_NOINTR or
11151 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11152 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11153 		 */
11154 		mutex_exit(&ptgt->tgt_mutex);
11155 		mutex_exit(&pptr->port_mutex);
11156 
11157 		/* see if using interrupts is allowed (so queueing'll work) */
11158 		if (pkt->pkt_flags & FLAG_NOINTR) {
11159 			pkt->pkt_resid = 0;
11160 			return (TRAN_BUSY);
11161 		}
11162 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11163 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11164 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11165 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11166 			return (TRAN_BUSY);
11167 		}
11168 #ifdef	DEBUG
11169 		mutex_enter(&pptr->port_pkt_mutex);
11170 		pptr->port_npkts++;
11171 		mutex_exit(&pptr->port_pkt_mutex);
11172 #endif /* DEBUG */
11173 
11174 		/* got queue up the pkt for later */
11175 		fcp_queue_pkt(pptr, cmd);
11176 		return (TRAN_ACCEPT);
11177 	}
11178 	cmd->cmd_state = FCP_PKT_ISSUED;
11179 
11180 	mutex_exit(&ptgt->tgt_mutex);
11181 	mutex_exit(&pptr->port_mutex);
11182 
11183 	/*
11184 	 * Now that we released the mutexes, what was protected by them can
11185 	 * change.
11186 	 */
11187 
11188 	/*
11189 	 * If there is a reconfiguration in progress, wait for it to complete.
11190 	 */
11191 	fcp_reconfig_wait(pptr);
11192 
11193 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11194 	    pkt->pkt_time : 0;
11195 
11196 	/* prepare the packet */
11197 
11198 	fcp_prepare_pkt(pptr, cmd, plun);
11199 
11200 	if (cmd->cmd_pkt->pkt_time) {
11201 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11202 	} else {
11203 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11204 	}
11205 
11206 	/*
11207 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11208 	 * have to do polled I/O
11209 	 */
11210 	if (pkt->pkt_flags & FLAG_NOINTR) {
11211 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11212 		return (fcp_dopoll(pptr, cmd));
11213 	}
11214 
11215 #ifdef	DEBUG
11216 	mutex_enter(&pptr->port_pkt_mutex);
11217 	pptr->port_npkts++;
11218 	mutex_exit(&pptr->port_pkt_mutex);
11219 #endif /* DEBUG */
11220 
11221 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11222 	if (rval == FC_SUCCESS) {
11223 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11224 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11225 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11226 		return (TRAN_ACCEPT);
11227 	}
11228 
11229 	cmd->cmd_state = FCP_PKT_IDLE;
11230 
11231 #ifdef	DEBUG
11232 	mutex_enter(&pptr->port_pkt_mutex);
11233 	pptr->port_npkts--;
11234 	mutex_exit(&pptr->port_pkt_mutex);
11235 #endif /* DEBUG */
11236 
11237 	/*
11238 	 * For lack of clearer definitions, choose
11239 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11240 	 */
11241 
11242 	if (rval == FC_TRAN_BUSY) {
11243 		pkt->pkt_resid = 0;
11244 		rval = TRAN_BUSY;
11245 	} else {
11246 		mutex_enter(&ptgt->tgt_mutex);
11247 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11248 			child_info_t	*cip;
11249 
11250 			mutex_enter(&plun->lun_mutex);
11251 			cip = plun->lun_cip;
11252 			mutex_exit(&plun->lun_mutex);
11253 
11254 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11255 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11256 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11257 			    plun->lun_tgt->tgt_d_id, rval, cip);
11258 
11259 			rval = TRAN_FATAL_ERROR;
11260 		} else {
11261 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11262 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11263 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11264 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11265 				    pkt);
11266 				rval = TRAN_BUSY;
11267 			} else {
11268 				rval = TRAN_ACCEPT;
11269 				fcp_queue_pkt(pptr, cmd);
11270 			}
11271 		}
11272 		mutex_exit(&ptgt->tgt_mutex);
11273 	}
11274 
11275 	return (rval);
11276 }
11277 
11278 /*
11279  * called by the transport to abort a packet
11280  */
11281 /*ARGSUSED*/
11282 static int
11283 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11284 {
11285 	int tgt_cnt;
11286 	struct fcp_port		*pptr = ADDR2FCP(ap);
11287 	struct fcp_lun	*plun = ADDR2LUN(ap);
11288 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11289 
11290 	if (pkt == NULL) {
11291 		if (ptgt) {
11292 			mutex_enter(&ptgt->tgt_mutex);
11293 			tgt_cnt = ptgt->tgt_change_cnt;
11294 			mutex_exit(&ptgt->tgt_mutex);
11295 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11296 			return (TRUE);
11297 		}
11298 	}
11299 	return (FALSE);
11300 }
11301 
11302 
11303 /*
11304  * Perform reset
11305  */
11306 int
11307 fcp_scsi_reset(struct scsi_address *ap, int level)
11308 {
11309 	int			rval = 0;
11310 	struct fcp_port		*pptr = ADDR2FCP(ap);
11311 	struct fcp_lun	*plun = ADDR2LUN(ap);
11312 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11313 
11314 	if (level == RESET_ALL) {
11315 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11316 			rval = 1;
11317 		}
11318 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11319 		/*
11320 		 * If we are in the middle of discovery, return
11321 		 * SUCCESS as this target will be rediscovered
11322 		 * anyway
11323 		 */
11324 		mutex_enter(&ptgt->tgt_mutex);
11325 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11326 			mutex_exit(&ptgt->tgt_mutex);
11327 			return (1);
11328 		}
11329 		mutex_exit(&ptgt->tgt_mutex);
11330 
11331 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11332 			rval = 1;
11333 		}
11334 	}
11335 	return (rval);
11336 }
11337 
11338 
11339 /*
11340  * called by the framework to get a SCSI capability
11341  */
11342 static int
11343 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11344 {
11345 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11346 }
11347 
11348 
11349 /*
11350  * called by the framework to set a SCSI capability
11351  */
11352 static int
11353 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11354 {
11355 	return (fcp_commoncap(ap, cap, value, whom, 1));
11356 }
11357 
11358 /*
11359  *     Function: fcp_pkt_setup
11360  *
11361  *  Description: This function sets up the scsi_pkt structure passed by the
11362  *		 caller. This function assumes fcp_pkt_constructor has been
11363  *		 called previously for the packet passed by the caller.	 If
11364  *		 successful this call will have the following results:
11365  *
11366  *		   - The resources needed that will be constant through out
11367  *		     the whole transaction are allocated.
11368  *		   - The fields that will be constant through out the whole
11369  *		     transaction are initialized.
11370  *		   - The scsi packet will be linked to the LUN structure
11371  *		     addressed by the transaction.
11372  *
11373  *     Argument:
11374  *		 *pkt		Pointer to a scsi_pkt structure.
11375  *		 callback
11376  *		 arg
11377  *
11378  * Return Value: 0	Success
11379  *		 !0	Failure
11380  *
11381  *	Context: Kernel context or interrupt context
11382  */
11383 /* ARGSUSED */
11384 static int
11385 fcp_pkt_setup(struct scsi_pkt *pkt,
11386     int (*callback)(caddr_t arg),
11387     caddr_t arg)
11388 {
11389 	struct fcp_pkt	*cmd;
11390 	struct fcp_port	*pptr;
11391 	struct fcp_lun	*plun;
11392 	struct fcp_tgt	*ptgt;
11393 	int		kf;
11394 	fc_packet_t	*fpkt;
11395 	fc_frame_hdr_t	*hp;
11396 
11397 	pptr = ADDR2FCP(&pkt->pkt_address);
11398 	plun = ADDR2LUN(&pkt->pkt_address);
11399 	ptgt = plun->lun_tgt;
11400 
11401 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11402 	fpkt = cmd->cmd_fp_pkt;
11403 
11404 	/*
11405 	 * this request is for dma allocation only
11406 	 */
11407 	/*
11408 	 * First step of fcp_scsi_init_pkt: pkt allocation
11409 	 * We determine if the caller is willing to wait for the
11410 	 * resources.
11411 	 */
11412 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11413 
11414 	/*
11415 	 * Selective zeroing of the pkt.
11416 	 */
11417 	cmd->cmd_back = NULL;
11418 	cmd->cmd_next = NULL;
11419 
11420 	/*
11421 	 * Zero out fcp command
11422 	 */
11423 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11424 
11425 	cmd->cmd_state = FCP_PKT_IDLE;
11426 
11427 	fpkt = cmd->cmd_fp_pkt;
11428 	fpkt->pkt_data_acc = NULL;
11429 
11430 	/*
11431 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11432 	 * could be destroyed.	We need fail pkt_setup.
11433 	 */
11434 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11435 		return (-1);
11436 	}
11437 
11438 	mutex_enter(&ptgt->tgt_mutex);
11439 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11440 
11441 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11442 	    != FC_SUCCESS) {
11443 		mutex_exit(&ptgt->tgt_mutex);
11444 		return (-1);
11445 	}
11446 
11447 	mutex_exit(&ptgt->tgt_mutex);
11448 
11449 	/* Fill in the Fabric Channel Header */
11450 	hp = &fpkt->pkt_cmd_fhdr;
11451 	hp->r_ctl = R_CTL_COMMAND;
11452 	hp->rsvd = 0;
11453 	hp->type = FC_TYPE_SCSI_FCP;
11454 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11455 	hp->seq_id = 0;
11456 	hp->df_ctl  = 0;
11457 	hp->seq_cnt = 0;
11458 	hp->ox_id = 0xffff;
11459 	hp->rx_id = 0xffff;
11460 	hp->ro = 0;
11461 
11462 	/*
11463 	 * A doubly linked list (cmd_forw, cmd_back) is built
11464 	 * out of every allocated packet on a per-lun basis
11465 	 *
11466 	 * The packets are maintained in the list so as to satisfy
11467 	 * scsi_abort() requests. At present (which is unlikely to
11468 	 * change in the future) nobody performs a real scsi_abort
11469 	 * in the SCSI target drivers (as they don't keep the packets
11470 	 * after doing scsi_transport - so they don't know how to
11471 	 * abort a packet other than sending a NULL to abort all
11472 	 * outstanding packets)
11473 	 */
11474 	mutex_enter(&plun->lun_mutex);
11475 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11476 		plun->lun_pkt_head->cmd_back = cmd;
11477 	} else {
11478 		plun->lun_pkt_tail = cmd;
11479 	}
11480 	plun->lun_pkt_head = cmd;
11481 	mutex_exit(&plun->lun_mutex);
11482 	return (0);
11483 }
11484 
11485 /*
11486  *     Function: fcp_pkt_teardown
11487  *
11488  *  Description: This function releases a scsi_pkt structure and all the
11489  *		 resources attached to it.
11490  *
11491  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11492  *
11493  * Return Value: None
11494  *
11495  *	Context: User, Kernel or Interrupt context.
11496  */
11497 static void
11498 fcp_pkt_teardown(struct scsi_pkt *pkt)
11499 {
11500 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11501 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11502 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11503 
11504 	/*
11505 	 * Remove the packet from the per-lun list
11506 	 */
11507 	mutex_enter(&plun->lun_mutex);
11508 	if (cmd->cmd_back) {
11509 		ASSERT(cmd != plun->lun_pkt_head);
11510 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11511 	} else {
11512 		ASSERT(cmd == plun->lun_pkt_head);
11513 		plun->lun_pkt_head = cmd->cmd_forw;
11514 	}
11515 
11516 	if (cmd->cmd_forw) {
11517 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11518 	} else {
11519 		ASSERT(cmd == plun->lun_pkt_tail);
11520 		plun->lun_pkt_tail = cmd->cmd_back;
11521 	}
11522 
11523 	mutex_exit(&plun->lun_mutex);
11524 
11525 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11526 }
11527 
11528 /*
11529  * Routine for reset notification setup, to register or cancel.
11530  * This function is called by SCSA
11531  */
11532 /*ARGSUSED*/
11533 static int
11534 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11535     void (*callback)(caddr_t), caddr_t arg)
11536 {
11537 	struct fcp_port *pptr = ADDR2FCP(ap);
11538 
11539 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11540 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11541 }
11542 
11543 
11544 static int
11545 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11546     ddi_eventcookie_t *event_cookiep)
11547 {
11548 	struct fcp_port *pptr = fcp_dip2port(dip);
11549 
11550 	if (pptr == NULL) {
11551 		return (DDI_FAILURE);
11552 	}
11553 
11554 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11555 	    event_cookiep, NDI_EVENT_NOPASS));
11556 }
11557 
11558 
11559 static int
11560 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11561     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11562     ddi_callback_id_t *cb_id)
11563 {
11564 	struct fcp_port *pptr = fcp_dip2port(dip);
11565 
11566 	if (pptr == NULL) {
11567 		return (DDI_FAILURE);
11568 	}
11569 
11570 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11571 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11572 }
11573 
11574 
11575 static int
11576 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11577 {
11578 
11579 	struct fcp_port *pptr = fcp_dip2port(dip);
11580 
11581 	if (pptr == NULL) {
11582 		return (DDI_FAILURE);
11583 	}
11584 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11585 }
11586 
11587 
11588 /*
11589  * called by the transport to post an event
11590  */
11591 static int
11592 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11593     ddi_eventcookie_t eventid, void *impldata)
11594 {
11595 	struct fcp_port *pptr = fcp_dip2port(dip);
11596 
11597 	if (pptr == NULL) {
11598 		return (DDI_FAILURE);
11599 	}
11600 
11601 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11602 	    eventid, impldata));
11603 }
11604 
11605 
11606 /*
11607  * A target in in many cases in Fibre Channel has a one to one relation
11608  * with a port identifier (which is also known as D_ID and also as AL_PA
11609  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11610  * will most likely result in resetting all LUNs (which means a reset will
11611  * occur on all the SCSI devices connected at the other end of the bridge)
11612  * That is the latest favorite topic for discussion, for, one can debate as
11613  * hot as one likes and come up with arguably a best solution to one's
11614  * satisfaction
11615  *
11616  * To stay on track and not digress much, here are the problems stated
11617  * briefly:
11618  *
11619  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11620  *	target drivers use RESET_TARGET even if their instance is on a
11621  *	LUN. Doesn't that sound a bit broken ?
11622  *
11623  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11624  *	control fields of an FCP_CMND structure. It should have been
11625  *	fixed right there, giving flexibility to the initiators to
11626  *	minimize havoc that could be caused by resetting a target.
11627  */
11628 static int
11629 fcp_reset_target(struct scsi_address *ap, int level)
11630 {
11631 	int			rval = FC_FAILURE;
11632 	char			lun_id[25];
11633 	struct fcp_port		*pptr = ADDR2FCP(ap);
11634 	struct fcp_lun	*plun = ADDR2LUN(ap);
11635 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11636 	struct scsi_pkt		*pkt;
11637 	struct fcp_pkt	*cmd;
11638 	struct fcp_rsp		*rsp;
11639 	uint32_t		tgt_cnt;
11640 	struct fcp_rsp_info	*rsp_info;
11641 	struct fcp_reset_elem	*p;
11642 	int			bval;
11643 
11644 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11645 	    KM_NOSLEEP)) == NULL) {
11646 		return (rval);
11647 	}
11648 
11649 	mutex_enter(&ptgt->tgt_mutex);
11650 	if (level == RESET_TARGET) {
11651 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11652 			mutex_exit(&ptgt->tgt_mutex);
11653 			kmem_free(p, sizeof (struct fcp_reset_elem));
11654 			return (rval);
11655 		}
11656 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11657 		(void) strcpy(lun_id, " ");
11658 	} else {
11659 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11660 			mutex_exit(&ptgt->tgt_mutex);
11661 			kmem_free(p, sizeof (struct fcp_reset_elem));
11662 			return (rval);
11663 		}
11664 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11665 
11666 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11667 	}
11668 	tgt_cnt = ptgt->tgt_change_cnt;
11669 
11670 	mutex_exit(&ptgt->tgt_mutex);
11671 
11672 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11673 	    0, 0, NULL, 0)) == NULL) {
11674 		kmem_free(p, sizeof (struct fcp_reset_elem));
11675 		mutex_enter(&ptgt->tgt_mutex);
11676 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11677 		mutex_exit(&ptgt->tgt_mutex);
11678 		return (rval);
11679 	}
11680 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11681 
11682 	/* fill in cmd part of packet */
11683 	cmd = PKT2CMD(pkt);
11684 	if (level == RESET_TARGET) {
11685 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11686 	} else {
11687 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11688 	}
11689 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11690 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11691 
11692 	/* prepare a packet for transport */
11693 	fcp_prepare_pkt(pptr, cmd, plun);
11694 
11695 	if (cmd->cmd_pkt->pkt_time) {
11696 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11697 	} else {
11698 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11699 	}
11700 
11701 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11702 	bval = fcp_dopoll(pptr, cmd);
11703 	fc_ulp_idle_port(pptr->port_fp_handle);
11704 
11705 	/* submit the packet */
11706 	if (bval == TRAN_ACCEPT) {
11707 		int error = 3;
11708 
11709 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11710 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11711 		    sizeof (struct fcp_rsp));
11712 
11713 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11714 			if (fcp_validate_fcp_response(rsp, pptr) ==
11715 			    FC_SUCCESS) {
11716 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11717 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11718 					    sizeof (struct fcp_rsp), rsp_info,
11719 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11720 					    sizeof (struct fcp_rsp_info));
11721 				}
11722 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11723 					rval = FC_SUCCESS;
11724 					error = 0;
11725 				} else {
11726 					error = 1;
11727 				}
11728 			} else {
11729 				error = 2;
11730 			}
11731 		}
11732 
11733 		switch (error) {
11734 		case 0:
11735 			fcp_log(CE_WARN, pptr->port_dip,
11736 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11737 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11738 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11739 			break;
11740 
11741 		case 1:
11742 			fcp_log(CE_WARN, pptr->port_dip,
11743 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11744 			    " response code=%x",
11745 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11746 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11747 			    rsp_info->rsp_code);
11748 			break;
11749 
11750 		case 2:
11751 			fcp_log(CE_WARN, pptr->port_dip,
11752 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11753 			    " Bad FCP response values: rsvd1=%x,"
11754 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11755 			    " rsplen=%x, senselen=%x",
11756 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11757 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11758 			    rsp->reserved_0, rsp->reserved_1,
11759 			    rsp->fcp_u.fcp_status.reserved_0,
11760 			    rsp->fcp_u.fcp_status.reserved_1,
11761 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11762 			break;
11763 
11764 		default:
11765 			fcp_log(CE_WARN, pptr->port_dip,
11766 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11767 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11768 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11769 			break;
11770 		}
11771 	}
11772 	scsi_destroy_pkt(pkt);
11773 
11774 	if (rval == FC_FAILURE) {
11775 		mutex_enter(&ptgt->tgt_mutex);
11776 		if (level == RESET_TARGET) {
11777 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11778 		} else {
11779 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11780 		}
11781 		mutex_exit(&ptgt->tgt_mutex);
11782 		kmem_free(p, sizeof (struct fcp_reset_elem));
11783 		return (rval);
11784 	}
11785 
11786 	mutex_enter(&pptr->port_mutex);
11787 	if (level == RESET_TARGET) {
11788 		p->tgt = ptgt;
11789 		p->lun = NULL;
11790 	} else {
11791 		p->tgt = NULL;
11792 		p->lun = plun;
11793 	}
11794 	p->tgt = ptgt;
11795 	p->tgt_cnt = tgt_cnt;
11796 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11797 	p->next = pptr->port_reset_list;
11798 	pptr->port_reset_list = p;
11799 
11800 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11801 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11802 	    "Notify ssd of the reset to reinstate the reservations");
11803 
11804 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11805 	    &pptr->port_reset_notify_listf);
11806 
11807 	mutex_exit(&pptr->port_mutex);
11808 
11809 	return (rval);
11810 }
11811 
11812 
11813 /*
11814  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11815  * SCSI capabilities
11816  */
11817 /* ARGSUSED */
11818 static int
11819 fcp_commoncap(struct scsi_address *ap, char *cap,
11820     int val, int tgtonly, int doset)
11821 {
11822 	struct fcp_port		*pptr = ADDR2FCP(ap);
11823 	struct fcp_lun	*plun = ADDR2LUN(ap);
11824 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11825 	int			cidx;
11826 	int			rval = FALSE;
11827 
11828 	if (cap == (char *)0) {
11829 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11830 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11831 		    "fcp_commoncap: invalid arg");
11832 		return (rval);
11833 	}
11834 
11835 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11836 		return (UNDEFINED);
11837 	}
11838 
11839 	/*
11840 	 * Process setcap request.
11841 	 */
11842 	if (doset) {
11843 		/*
11844 		 * At present, we can only set binary (0/1) values
11845 		 */
11846 		switch (cidx) {
11847 		case SCSI_CAP_ARQ:
11848 			if (val == 0) {
11849 				rval = FALSE;
11850 			} else {
11851 				rval = TRUE;
11852 			}
11853 			break;
11854 
11855 		case SCSI_CAP_LUN_RESET:
11856 			if (val) {
11857 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11858 			} else {
11859 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11860 			}
11861 			rval = TRUE;
11862 			break;
11863 
11864 		case SCSI_CAP_SECTOR_SIZE:
11865 			rval = TRUE;
11866 			break;
11867 		default:
11868 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11869 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11870 			    "fcp_setcap: unsupported %d", cidx);
11871 			rval = UNDEFINED;
11872 			break;
11873 		}
11874 
11875 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11876 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11877 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11878 		    "0x%x/0x%x/0x%x/%d",
11879 		    cap, val, tgtonly, doset, rval);
11880 
11881 	} else {
11882 		/*
11883 		 * Process getcap request.
11884 		 */
11885 		switch (cidx) {
11886 		case SCSI_CAP_DMA_MAX:
11887 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11888 
11889 			/*
11890 			 * Need to make an adjustment qlc is uint_t 64
11891 			 * st is int, so we will make the adjustment here
11892 			 * being as nobody wants to touch this.
11893 			 * It still leaves the max single block length
11894 			 * of 2 gig. This should last .
11895 			 */
11896 
11897 			if (rval == -1) {
11898 				rval = MAX_INT_DMA;
11899 			}
11900 
11901 			break;
11902 
11903 		case SCSI_CAP_INITIATOR_ID:
11904 			rval = pptr->port_id;
11905 			break;
11906 
11907 		case SCSI_CAP_ARQ:
11908 		case SCSI_CAP_RESET_NOTIFICATION:
11909 		case SCSI_CAP_TAGGED_QING:
11910 			rval = TRUE;
11911 			break;
11912 
11913 		case SCSI_CAP_SCSI_VERSION:
11914 			rval = 3;
11915 			break;
11916 
11917 		case SCSI_CAP_INTERCONNECT_TYPE:
11918 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11919 			    (ptgt->tgt_hard_addr == 0)) {
11920 				rval = INTERCONNECT_FABRIC;
11921 			} else {
11922 				rval = INTERCONNECT_FIBRE;
11923 			}
11924 			break;
11925 
11926 		case SCSI_CAP_LUN_RESET:
11927 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11928 			    TRUE : FALSE;
11929 			break;
11930 
11931 		default:
11932 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11933 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11934 			    "fcp_getcap: unsupported %d", cidx);
11935 			rval = UNDEFINED;
11936 			break;
11937 		}
11938 
11939 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11940 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11941 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11942 		    "0x%x/0x%x/0x%x/%d",
11943 		    cap, val, tgtonly, doset, rval);
11944 	}
11945 
11946 	return (rval);
11947 }
11948 
11949 /*
11950  * called by the transport to get the port-wwn and lun
11951  * properties of this device, and to create a "name" based on them
11952  *
11953  * these properties don't exist on sun4m
11954  *
11955  * return 1 for success else return 0
11956  */
11957 /* ARGSUSED */
11958 static int
11959 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11960 {
11961 	int			i;
11962 	int			*lun;
11963 	int			numChars;
11964 	uint_t			nlun;
11965 	uint_t			count;
11966 	uint_t			nbytes;
11967 	uchar_t			*bytes;
11968 	uint16_t		lun_num;
11969 	uint32_t		tgt_id;
11970 	char			**conf_wwn;
11971 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11972 	uchar_t			barray[FC_WWN_SIZE];
11973 	dev_info_t		*tgt_dip;
11974 	struct fcp_tgt	*ptgt;
11975 	struct fcp_port	*pptr;
11976 	struct fcp_lun	*plun;
11977 
11978 	ASSERT(sd != NULL);
11979 	ASSERT(name != NULL);
11980 
11981 	tgt_dip = sd->sd_dev;
11982 	pptr = ddi_get_soft_state(fcp_softstate,
11983 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11984 	if (pptr == NULL) {
11985 		return (0);
11986 	}
11987 
11988 	ASSERT(tgt_dip != NULL);
11989 
11990 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11991 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11992 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11993 		name[0] = '\0';
11994 		return (0);
11995 	}
11996 
11997 	if (nlun == 0) {
11998 		ddi_prop_free(lun);
11999 		return (0);
12000 	}
12001 
12002 	lun_num = lun[0];
12003 	ddi_prop_free(lun);
12004 
12005 	/*
12006 	 * Lookup for .conf WWN property
12007 	 */
12008 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12009 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12010 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12011 		ASSERT(count >= 1);
12012 
12013 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12014 		ddi_prop_free(conf_wwn);
12015 		mutex_enter(&pptr->port_mutex);
12016 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12017 			mutex_exit(&pptr->port_mutex);
12018 			return (0);
12019 		}
12020 		ptgt = plun->lun_tgt;
12021 		mutex_exit(&pptr->port_mutex);
12022 
12023 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12024 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12025 
12026 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12027 		    ptgt->tgt_hard_addr != 0) {
12028 			tgt_id = (uint32_t)fcp_alpa_to_switch[
12029 			    ptgt->tgt_hard_addr];
12030 		} else {
12031 			tgt_id = ptgt->tgt_d_id;
12032 		}
12033 
12034 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12035 		    TARGET_PROP, tgt_id);
12036 	}
12037 
12038 	/* get the our port-wwn property */
12039 	bytes = NULL;
12040 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12041 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12042 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12043 		if (bytes != NULL) {
12044 			ddi_prop_free(bytes);
12045 		}
12046 		return (0);
12047 	}
12048 
12049 	for (i = 0; i < FC_WWN_SIZE; i++) {
12050 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12051 	}
12052 
12053 	/* Stick in the address of the form "wWWN,LUN" */
12054 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12055 
12056 	ASSERT(numChars < len);
12057 	if (numChars >= len) {
12058 		fcp_log(CE_WARN, pptr->port_dip,
12059 		    "!fcp_scsi_get_name: "
12060 		    "name parameter length too small, it needs to be %d",
12061 		    numChars+1);
12062 	}
12063 
12064 	ddi_prop_free(bytes);
12065 
12066 	return (1);
12067 }
12068 
12069 
12070 /*
12071  * called by the transport to get the SCSI target id value, returning
12072  * it in "name"
12073  *
12074  * this isn't needed/used on sun4m
12075  *
12076  * return 1 for success else return 0
12077  */
12078 /* ARGSUSED */
12079 static int
12080 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12081 {
12082 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
12083 	struct fcp_tgt	*ptgt;
12084 	int    numChars;
12085 
12086 	if (plun == NULL) {
12087 		return (0);
12088 	}
12089 
12090 	if ((ptgt = plun->lun_tgt) == NULL) {
12091 		return (0);
12092 	}
12093 
12094 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12095 
12096 	ASSERT(numChars < len);
12097 	if (numChars >= len) {
12098 		fcp_log(CE_WARN, NULL,
12099 		    "!fcp_scsi_get_bus_addr: "
12100 		    "name parameter length too small, it needs to be %d",
12101 		    numChars+1);
12102 	}
12103 
12104 	return (1);
12105 }
12106 
12107 
12108 /*
12109  * called internally to reset the link where the specified port lives
12110  */
12111 static int
12112 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12113 {
12114 	la_wwn_t		wwn;
12115 	struct fcp_lun	*plun;
12116 	struct fcp_tgt	*ptgt;
12117 
12118 	/* disable restart of lip if we're suspended */
12119 	mutex_enter(&pptr->port_mutex);
12120 
12121 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12122 	    FCP_STATE_POWER_DOWN)) {
12123 		mutex_exit(&pptr->port_mutex);
12124 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12125 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12126 		    "fcp_linkreset, fcp%d: link reset "
12127 		    "disabled due to DDI_SUSPEND",
12128 		    ddi_get_instance(pptr->port_dip));
12129 		return (FC_FAILURE);
12130 	}
12131 
12132 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12133 		mutex_exit(&pptr->port_mutex);
12134 		return (FC_SUCCESS);
12135 	}
12136 
12137 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12138 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12139 
12140 	/*
12141 	 * If ap == NULL assume local link reset.
12142 	 */
12143 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12144 		plun = ADDR2LUN(ap);
12145 		ptgt = plun->lun_tgt;
12146 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12147 	} else {
12148 		bzero((caddr_t)&wwn, sizeof (wwn));
12149 	}
12150 	mutex_exit(&pptr->port_mutex);
12151 
12152 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12153 }
12154 
12155 
12156 /*
12157  * called from fcp_port_attach() to resume a port
12158  * return DDI_* success/failure status
12159  * acquires and releases the global mutex
12160  * acquires and releases the port mutex
12161  */
12162 /*ARGSUSED*/
12163 
12164 static int
12165 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12166     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12167 {
12168 	int			res = DDI_FAILURE; /* default result */
12169 	struct fcp_port	*pptr;		/* port state ptr */
12170 	uint32_t		alloc_cnt;
12171 	uint32_t		max_cnt;
12172 	fc_portmap_t		*tmp_list = NULL;
12173 
12174 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12175 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12176 	    instance);
12177 
12178 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12179 		cmn_err(CE_WARN, "fcp: bad soft state");
12180 		return (res);
12181 	}
12182 
12183 	mutex_enter(&pptr->port_mutex);
12184 	switch (cmd) {
12185 	case FC_CMD_RESUME:
12186 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12187 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12188 		break;
12189 
12190 	case FC_CMD_POWER_UP:
12191 		/*
12192 		 * If the port is DDI_SUSPENded, defer rediscovery
12193 		 * until DDI_RESUME occurs
12194 		 */
12195 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12196 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12197 			mutex_exit(&pptr->port_mutex);
12198 			return (DDI_SUCCESS);
12199 		}
12200 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12201 	}
12202 	pptr->port_id = s_id;
12203 	pptr->port_state = FCP_STATE_INIT;
12204 	mutex_exit(&pptr->port_mutex);
12205 
12206 	/*
12207 	 * Make a copy of ulp_port_info as fctl allocates
12208 	 * a temp struct.
12209 	 */
12210 	(void) fcp_cp_pinfo(pptr, pinfo);
12211 
12212 	mutex_enter(&fcp_global_mutex);
12213 	if (fcp_watchdog_init++ == 0) {
12214 		fcp_watchdog_tick = fcp_watchdog_timeout *
12215 		    drv_usectohz(1000000);
12216 		fcp_watchdog_id = timeout(fcp_watch,
12217 		    NULL, fcp_watchdog_tick);
12218 	}
12219 	mutex_exit(&fcp_global_mutex);
12220 
12221 	/*
12222 	 * Handle various topologies and link states.
12223 	 */
12224 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12225 	case FC_STATE_OFFLINE:
12226 		/*
12227 		 * Wait for ONLINE, at which time a state
12228 		 * change will cause a statec_callback
12229 		 */
12230 		res = DDI_SUCCESS;
12231 		break;
12232 
12233 	case FC_STATE_ONLINE:
12234 
12235 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12236 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12237 			res = DDI_SUCCESS;
12238 			break;
12239 		}
12240 
12241 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12242 		    !fcp_enable_auto_configuration) {
12243 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12244 			if (tmp_list == NULL) {
12245 				if (!alloc_cnt) {
12246 					res = DDI_SUCCESS;
12247 				}
12248 				break;
12249 			}
12250 			max_cnt = alloc_cnt;
12251 		} else {
12252 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12253 
12254 			alloc_cnt = FCP_MAX_DEVICES;
12255 
12256 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12257 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12258 			    KM_NOSLEEP)) == NULL) {
12259 				fcp_log(CE_WARN, pptr->port_dip,
12260 				    "!fcp%d: failed to allocate portmap",
12261 				    instance);
12262 				break;
12263 			}
12264 
12265 			max_cnt = alloc_cnt;
12266 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12267 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12268 			    FC_SUCCESS) {
12269 				caddr_t msg;
12270 
12271 				(void) fc_ulp_error(res, &msg);
12272 
12273 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12274 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12275 				    "resume failed getportmap: reason=0x%x",
12276 				    res);
12277 
12278 				fcp_log(CE_WARN, pptr->port_dip,
12279 				    "!failed to get port map : %s", msg);
12280 				break;
12281 			}
12282 			if (max_cnt > alloc_cnt) {
12283 				alloc_cnt = max_cnt;
12284 			}
12285 		}
12286 
12287 		/*
12288 		 * do the SCSI device discovery and create
12289 		 * the devinfos
12290 		 */
12291 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12292 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12293 		    max_cnt, pptr->port_id);
12294 
12295 		res = DDI_SUCCESS;
12296 		break;
12297 
12298 	default:
12299 		fcp_log(CE_WARN, pptr->port_dip,
12300 		    "!fcp%d: invalid port state at attach=0x%x",
12301 		    instance, pptr->port_phys_state);
12302 
12303 		mutex_enter(&pptr->port_mutex);
12304 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12305 		mutex_exit(&pptr->port_mutex);
12306 		res = DDI_SUCCESS;
12307 
12308 		break;
12309 	}
12310 
12311 	if (tmp_list != NULL) {
12312 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12313 	}
12314 
12315 	return (res);
12316 }
12317 
12318 
12319 static void
12320 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12321 {
12322 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12323 	pptr->port_dip = pinfo->port_dip;
12324 	pptr->port_fp_handle = pinfo->port_handle;
12325 	if (pinfo->port_acc_attr != NULL) {
12326 		/*
12327 		 * FCA supports DMA
12328 		 */
12329 		pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12330 		pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12331 		pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12332 		pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12333 	}
12334 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12335 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12336 	pptr->port_phys_state = pinfo->port_state;
12337 	pptr->port_topology = pinfo->port_flags;
12338 	pptr->port_reset_action = pinfo->port_reset_action;
12339 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12340 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12341 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12342 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12343 
12344 	/* Clear FMA caps to avoid fm-capability ereport */
12345 	if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12346 		pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12347 	if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12348 		pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12349 	if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12350 		pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12351 }
12352 
12353 /*
12354  * If the elements wait field is set to 1 then
12355  * another thread is waiting for the operation to complete. Once
12356  * it is complete, the waiting thread is signaled and the element is
12357  * freed by the waiting thread. If the elements wait field is set to 0
12358  * the element is freed.
12359  */
12360 static void
12361 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12362 {
12363 	ASSERT(elem != NULL);
12364 	mutex_enter(&elem->mutex);
12365 	elem->result = result;
12366 	if (elem->wait) {
12367 		elem->wait = 0;
12368 		cv_signal(&elem->cv);
12369 		mutex_exit(&elem->mutex);
12370 	} else {
12371 		mutex_exit(&elem->mutex);
12372 		cv_destroy(&elem->cv);
12373 		mutex_destroy(&elem->mutex);
12374 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12375 	}
12376 }
12377 
12378 /*
12379  * This function is invoked from the taskq thread to allocate
12380  * devinfo nodes and to online/offline them.
12381  */
12382 static void
12383 fcp_hp_task(void *arg)
12384 {
12385 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12386 	struct fcp_lun	*plun = elem->lun;
12387 	struct fcp_port		*pptr = elem->port;
12388 	int			result;
12389 
12390 	ASSERT(elem->what == FCP_ONLINE ||
12391 	    elem->what == FCP_OFFLINE ||
12392 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12393 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12394 
12395 	mutex_enter(&pptr->port_mutex);
12396 	mutex_enter(&plun->lun_mutex);
12397 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12398 	    plun->lun_event_count != elem->event_cnt) ||
12399 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12400 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12401 		mutex_exit(&plun->lun_mutex);
12402 		mutex_exit(&pptr->port_mutex);
12403 		fcp_process_elem(elem, NDI_FAILURE);
12404 		return;
12405 	}
12406 	mutex_exit(&plun->lun_mutex);
12407 	mutex_exit(&pptr->port_mutex);
12408 
12409 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12410 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12411 	fcp_process_elem(elem, result);
12412 }
12413 
12414 
12415 static child_info_t *
12416 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12417     int tcount)
12418 {
12419 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12420 
12421 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12422 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12423 
12424 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12425 		/*
12426 		 * Child has not been created yet. Create the child device
12427 		 * based on the per-Lun flags.
12428 		 */
12429 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12430 			plun->lun_cip =
12431 			    CIP(fcp_create_dip(plun, lcount, tcount));
12432 			plun->lun_mpxio = 0;
12433 		} else {
12434 			plun->lun_cip =
12435 			    CIP(fcp_create_pip(plun, lcount, tcount));
12436 			plun->lun_mpxio = 1;
12437 		}
12438 	} else {
12439 		plun->lun_cip = cip;
12440 	}
12441 
12442 	return (plun->lun_cip);
12443 }
12444 
12445 
12446 static int
12447 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12448 {
12449 	int		rval = FC_FAILURE;
12450 	dev_info_t	*pdip;
12451 	struct dev_info	*dip;
12452 	int		circular;
12453 
12454 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12455 
12456 	pdip = plun->lun_tgt->tgt_port->port_dip;
12457 
12458 	if (plun->lun_cip == NULL) {
12459 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12460 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12461 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12462 		    "plun: %p lun state: %x num: %d target state: %x",
12463 		    plun, plun->lun_state, plun->lun_num,
12464 		    plun->lun_tgt->tgt_port->port_state);
12465 		return (rval);
12466 	}
12467 	ndi_devi_enter(pdip, &circular);
12468 	dip = DEVI(pdip)->devi_child;
12469 	while (dip) {
12470 		if (dip == DEVI(cdip)) {
12471 			rval = FC_SUCCESS;
12472 			break;
12473 		}
12474 		dip = dip->devi_sibling;
12475 	}
12476 	ndi_devi_exit(pdip, circular);
12477 	return (rval);
12478 }
12479 
12480 static int
12481 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12482 {
12483 	int		rval = FC_FAILURE;
12484 
12485 	ASSERT(plun != NULL);
12486 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12487 
12488 	if (plun->lun_mpxio == 0) {
12489 		rval = fcp_is_dip_present(plun, DIP(cip));
12490 	} else {
12491 		rval = fcp_is_pip_present(plun, PIP(cip));
12492 	}
12493 
12494 	return (rval);
12495 }
12496 
12497 /*
12498  *     Function: fcp_create_dip
12499  *
12500  *  Description: Creates a dev_info_t structure for the LUN specified by the
12501  *		 caller.
12502  *
12503  *     Argument: plun		Lun structure
12504  *		 link_cnt	Link state count.
12505  *		 tgt_cnt	Target state change count.
12506  *
12507  * Return Value: NULL if it failed
12508  *		 dev_info_t structure address if it succeeded
12509  *
12510  *	Context: Kernel context
12511  */
12512 static dev_info_t *
12513 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12514 {
12515 	int			failure = 0;
12516 	uint32_t		tgt_id;
12517 	uint64_t		sam_lun;
12518 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12519 	struct fcp_port	*pptr = ptgt->tgt_port;
12520 	dev_info_t		*pdip = pptr->port_dip;
12521 	dev_info_t		*cdip = NULL;
12522 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12523 	char			*nname = NULL;
12524 	char			**compatible = NULL;
12525 	int			ncompatible;
12526 	char			*scsi_binding_set;
12527 	char			t_pwwn[17];
12528 
12529 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12530 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12531 
12532 	/* get the 'scsi-binding-set' property */
12533 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12534 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12535 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12536 		scsi_binding_set = NULL;
12537 	}
12538 
12539 	/* determine the node name and compatible */
12540 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12541 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12542 	if (scsi_binding_set) {
12543 		ddi_prop_free(scsi_binding_set);
12544 	}
12545 
12546 	if (nname == NULL) {
12547 #ifdef	DEBUG
12548 		cmn_err(CE_WARN, "%s%d: no driver for "
12549 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12550 		    "	 compatible: %s",
12551 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12552 		    ptgt->tgt_port_wwn.raw_wwn[0],
12553 		    ptgt->tgt_port_wwn.raw_wwn[1],
12554 		    ptgt->tgt_port_wwn.raw_wwn[2],
12555 		    ptgt->tgt_port_wwn.raw_wwn[3],
12556 		    ptgt->tgt_port_wwn.raw_wwn[4],
12557 		    ptgt->tgt_port_wwn.raw_wwn[5],
12558 		    ptgt->tgt_port_wwn.raw_wwn[6],
12559 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12560 		    *compatible);
12561 #endif	/* DEBUG */
12562 		failure++;
12563 		goto end_of_fcp_create_dip;
12564 	}
12565 
12566 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12567 
12568 	/*
12569 	 * if the old_dip does not match the cdip, that means there is
12570 	 * some property change. since we'll be using the cdip, we need
12571 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12572 	 * then the dtype for the device has been updated. Offline the
12573 	 * the old device and create a new device with the new device type
12574 	 * Refer to bug: 4764752
12575 	 */
12576 	if (old_dip && (cdip != old_dip ||
12577 	    plun->lun_state & FCP_LUN_CHANGED)) {
12578 		plun->lun_state &= ~(FCP_LUN_INIT);
12579 		mutex_exit(&plun->lun_mutex);
12580 		mutex_exit(&pptr->port_mutex);
12581 
12582 		mutex_enter(&ptgt->tgt_mutex);
12583 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12584 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12585 		mutex_exit(&ptgt->tgt_mutex);
12586 
12587 #ifdef DEBUG
12588 		if (cdip != NULL) {
12589 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12590 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12591 			    "Old dip=%p; New dip=%p don't match", old_dip,
12592 			    cdip);
12593 		} else {
12594 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12595 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12596 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12597 		}
12598 #endif
12599 
12600 		mutex_enter(&pptr->port_mutex);
12601 		mutex_enter(&plun->lun_mutex);
12602 	}
12603 
12604 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12605 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12606 		if (ndi_devi_alloc(pptr->port_dip, nname,
12607 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12608 			failure++;
12609 			goto end_of_fcp_create_dip;
12610 		}
12611 	}
12612 
12613 	/*
12614 	 * Previously all the properties for the devinfo were destroyed here
12615 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12616 	 * the devid property (and other properties established by the target
12617 	 * driver or framework) which the code does not always recreate, this
12618 	 * call was removed.
12619 	 * This opens a theoretical possibility that we may return with a
12620 	 * stale devid on the node if the scsi entity behind the fibre channel
12621 	 * lun has changed.
12622 	 */
12623 
12624 	/* decorate the node with compatible */
12625 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12626 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12627 		failure++;
12628 		goto end_of_fcp_create_dip;
12629 	}
12630 
12631 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12632 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12633 		failure++;
12634 		goto end_of_fcp_create_dip;
12635 	}
12636 
12637 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12638 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12639 		failure++;
12640 		goto end_of_fcp_create_dip;
12641 	}
12642 
12643 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12644 	t_pwwn[16] = '\0';
12645 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12646 	    != DDI_PROP_SUCCESS) {
12647 		failure++;
12648 		goto end_of_fcp_create_dip;
12649 	}
12650 
12651 	/*
12652 	 * If there is no hard address - We might have to deal with
12653 	 * that by using WWN - Having said that it is important to
12654 	 * recognize this problem early so ssd can be informed of
12655 	 * the right interconnect type.
12656 	 */
12657 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12658 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12659 	} else {
12660 		tgt_id = ptgt->tgt_d_id;
12661 	}
12662 
12663 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12664 	    tgt_id) != DDI_PROP_SUCCESS) {
12665 		failure++;
12666 		goto end_of_fcp_create_dip;
12667 	}
12668 
12669 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12670 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12671 		failure++;
12672 		goto end_of_fcp_create_dip;
12673 	}
12674 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12675 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12676 	    sam_lun) != DDI_PROP_SUCCESS) {
12677 		failure++;
12678 		goto end_of_fcp_create_dip;
12679 	}
12680 
12681 end_of_fcp_create_dip:
12682 	scsi_hba_nodename_compatible_free(nname, compatible);
12683 
12684 	if (cdip != NULL && failure) {
12685 		(void) ndi_prop_remove_all(cdip);
12686 		(void) ndi_devi_free(cdip);
12687 		cdip = NULL;
12688 	}
12689 
12690 	return (cdip);
12691 }
12692 
12693 /*
12694  *     Function: fcp_create_pip
12695  *
12696  *  Description: Creates a Path Id for the LUN specified by the caller.
12697  *
12698  *     Argument: plun		Lun structure
12699  *		 link_cnt	Link state count.
12700  *		 tgt_cnt	Target state count.
12701  *
12702  * Return Value: NULL if it failed
12703  *		 mdi_pathinfo_t structure address if it succeeded
12704  *
12705  *	Context: Kernel context
12706  */
12707 static mdi_pathinfo_t *
12708 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12709 {
12710 	int			i;
12711 	char			buf[MAXNAMELEN];
12712 	char			uaddr[MAXNAMELEN];
12713 	int			failure = 0;
12714 	uint32_t		tgt_id;
12715 	uint64_t		sam_lun;
12716 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12717 	struct fcp_port	*pptr = ptgt->tgt_port;
12718 	dev_info_t		*pdip = pptr->port_dip;
12719 	mdi_pathinfo_t		*pip = NULL;
12720 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12721 	char			*nname = NULL;
12722 	char			**compatible = NULL;
12723 	int			ncompatible;
12724 	char			*scsi_binding_set;
12725 	char			t_pwwn[17];
12726 
12727 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12728 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12729 
12730 	scsi_binding_set = "vhci";
12731 
12732 	/* determine the node name and compatible */
12733 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12734 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12735 
12736 	if (nname == NULL) {
12737 #ifdef	DEBUG
12738 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12739 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12740 		    "	 compatible: %s",
12741 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12742 		    ptgt->tgt_port_wwn.raw_wwn[0],
12743 		    ptgt->tgt_port_wwn.raw_wwn[1],
12744 		    ptgt->tgt_port_wwn.raw_wwn[2],
12745 		    ptgt->tgt_port_wwn.raw_wwn[3],
12746 		    ptgt->tgt_port_wwn.raw_wwn[4],
12747 		    ptgt->tgt_port_wwn.raw_wwn[5],
12748 		    ptgt->tgt_port_wwn.raw_wwn[6],
12749 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12750 		    *compatible);
12751 #endif	/* DEBUG */
12752 		failure++;
12753 		goto end_of_fcp_create_pip;
12754 	}
12755 
12756 	pip = fcp_find_existing_pip(plun, pdip);
12757 
12758 	/*
12759 	 * if the old_dip does not match the cdip, that means there is
12760 	 * some property change. since we'll be using the cdip, we need
12761 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12762 	 * then the dtype for the device has been updated. Offline the
12763 	 * the old device and create a new device with the new device type
12764 	 * Refer to bug: 4764752
12765 	 */
12766 	if (old_pip && (pip != old_pip ||
12767 	    plun->lun_state & FCP_LUN_CHANGED)) {
12768 		plun->lun_state &= ~(FCP_LUN_INIT);
12769 		mutex_exit(&plun->lun_mutex);
12770 		mutex_exit(&pptr->port_mutex);
12771 
12772 		mutex_enter(&ptgt->tgt_mutex);
12773 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12774 		    FCP_OFFLINE, lcount, tcount,
12775 		    NDI_DEVI_REMOVE, 0);
12776 		mutex_exit(&ptgt->tgt_mutex);
12777 
12778 		if (pip != NULL) {
12779 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12780 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12781 			    "Old pip=%p; New pip=%p don't match",
12782 			    old_pip, pip);
12783 		} else {
12784 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12785 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12786 			    "Old pip=%p; New pip=NULL don't match",
12787 			    old_pip);
12788 		}
12789 
12790 		mutex_enter(&pptr->port_mutex);
12791 		mutex_enter(&plun->lun_mutex);
12792 	}
12793 
12794 	/*
12795 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12796 	 * lun_guid_size which is dependent on the target, I don't
12797 	 * believe the same trancation happens here UNLESS the standards
12798 	 * change the FC_WWN_SIZE value to something larger than
12799 	 * MAXNAMELEN(currently 255 bytes).
12800 	 */
12801 
12802 	for (i = 0; i < FC_WWN_SIZE; i++) {
12803 		(void) sprintf(&buf[i << 1], "%02x",
12804 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12805 	}
12806 
12807 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12808 	    buf, plun->lun_num);
12809 
12810 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12811 		/*
12812 		 * Release the locks before calling into
12813 		 * mdi_pi_alloc_compatible() since this can result in a
12814 		 * callback into fcp which can result in a deadlock
12815 		 * (see bug # 4870272).
12816 		 *
12817 		 * Basically, what we are trying to avoid is the scenario where
12818 		 * one thread does ndi_devi_enter() and tries to grab
12819 		 * fcp_mutex and another does it the other way round.
12820 		 *
12821 		 * But before we do that, make sure that nobody releases the
12822 		 * port in the meantime. We can do this by setting a flag.
12823 		 */
12824 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12825 		pptr->port_state |= FCP_STATE_IN_MDI;
12826 		mutex_exit(&plun->lun_mutex);
12827 		mutex_exit(&pptr->port_mutex);
12828 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12829 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12830 			fcp_log(CE_WARN, pptr->port_dip,
12831 			    "!path alloc failed:0x%x", plun);
12832 			mutex_enter(&pptr->port_mutex);
12833 			mutex_enter(&plun->lun_mutex);
12834 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12835 			failure++;
12836 			goto end_of_fcp_create_pip;
12837 		}
12838 		mutex_enter(&pptr->port_mutex);
12839 		mutex_enter(&plun->lun_mutex);
12840 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12841 	} else {
12842 		(void) mdi_prop_remove(pip, NULL);
12843 	}
12844 
12845 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12846 
12847 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12848 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12849 	    != DDI_PROP_SUCCESS) {
12850 		failure++;
12851 		goto end_of_fcp_create_pip;
12852 	}
12853 
12854 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12855 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12856 	    != DDI_PROP_SUCCESS) {
12857 		failure++;
12858 		goto end_of_fcp_create_pip;
12859 	}
12860 
12861 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12862 	t_pwwn[16] = '\0';
12863 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12864 	    != DDI_PROP_SUCCESS) {
12865 		failure++;
12866 		goto end_of_fcp_create_pip;
12867 	}
12868 
12869 	/*
12870 	 * If there is no hard address - We might have to deal with
12871 	 * that by using WWN - Having said that it is important to
12872 	 * recognize this problem early so ssd can be informed of
12873 	 * the right interconnect type.
12874 	 */
12875 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12876 	    ptgt->tgt_hard_addr != 0) {
12877 		tgt_id = (uint32_t)
12878 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12879 	} else {
12880 		tgt_id = ptgt->tgt_d_id;
12881 	}
12882 
12883 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12884 	    != DDI_PROP_SUCCESS) {
12885 		failure++;
12886 		goto end_of_fcp_create_pip;
12887 	}
12888 
12889 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12890 	    != DDI_PROP_SUCCESS) {
12891 		failure++;
12892 		goto end_of_fcp_create_pip;
12893 	}
12894 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12895 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12896 	    != DDI_PROP_SUCCESS) {
12897 		failure++;
12898 		goto end_of_fcp_create_pip;
12899 	}
12900 
12901 end_of_fcp_create_pip:
12902 	scsi_hba_nodename_compatible_free(nname, compatible);
12903 
12904 	if (pip != NULL && failure) {
12905 		(void) mdi_prop_remove(pip, NULL);
12906 		mutex_exit(&plun->lun_mutex);
12907 		mutex_exit(&pptr->port_mutex);
12908 		(void) mdi_pi_free(pip, 0);
12909 		mutex_enter(&pptr->port_mutex);
12910 		mutex_enter(&plun->lun_mutex);
12911 		pip = NULL;
12912 	}
12913 
12914 	return (pip);
12915 }
12916 
12917 static dev_info_t *
12918 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12919 {
12920 	uint_t			nbytes;
12921 	uchar_t			*bytes;
12922 	uint_t			nwords;
12923 	uint32_t		tgt_id;
12924 	int			*words;
12925 	dev_info_t		*cdip;
12926 	dev_info_t		*ndip;
12927 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12928 	struct fcp_port	*pptr = ptgt->tgt_port;
12929 	int			circular;
12930 
12931 	ndi_devi_enter(pdip, &circular);
12932 
12933 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12934 	while ((cdip = ndip) != NULL) {
12935 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12936 
12937 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12938 			continue;
12939 		}
12940 
12941 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12942 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12943 		    &nbytes) != DDI_PROP_SUCCESS) {
12944 			continue;
12945 		}
12946 
12947 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12948 			if (bytes != NULL) {
12949 				ddi_prop_free(bytes);
12950 			}
12951 			continue;
12952 		}
12953 		ASSERT(bytes != NULL);
12954 
12955 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12956 			ddi_prop_free(bytes);
12957 			continue;
12958 		}
12959 
12960 		ddi_prop_free(bytes);
12961 
12962 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12963 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12964 		    &nbytes) != DDI_PROP_SUCCESS) {
12965 			continue;
12966 		}
12967 
12968 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12969 			if (bytes != NULL) {
12970 				ddi_prop_free(bytes);
12971 			}
12972 			continue;
12973 		}
12974 		ASSERT(bytes != NULL);
12975 
12976 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12977 			ddi_prop_free(bytes);
12978 			continue;
12979 		}
12980 
12981 		ddi_prop_free(bytes);
12982 
12983 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12984 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12985 		    &nwords) != DDI_PROP_SUCCESS) {
12986 			continue;
12987 		}
12988 
12989 		if (nwords != 1 || words == NULL) {
12990 			if (words != NULL) {
12991 				ddi_prop_free(words);
12992 			}
12993 			continue;
12994 		}
12995 		ASSERT(words != NULL);
12996 
12997 		/*
12998 		 * If there is no hard address - We might have to deal with
12999 		 * that by using WWN - Having said that it is important to
13000 		 * recognize this problem early so ssd can be informed of
13001 		 * the right interconnect type.
13002 		 */
13003 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13004 		    ptgt->tgt_hard_addr != 0) {
13005 			tgt_id =
13006 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13007 		} else {
13008 			tgt_id = ptgt->tgt_d_id;
13009 		}
13010 
13011 		if (tgt_id != (uint32_t)*words) {
13012 			ddi_prop_free(words);
13013 			continue;
13014 		}
13015 		ddi_prop_free(words);
13016 
13017 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13018 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13019 		    &nwords) != DDI_PROP_SUCCESS) {
13020 			continue;
13021 		}
13022 
13023 		if (nwords != 1 || words == NULL) {
13024 			if (words != NULL) {
13025 				ddi_prop_free(words);
13026 			}
13027 			continue;
13028 		}
13029 		ASSERT(words != NULL);
13030 
13031 		if (plun->lun_num == (uint16_t)*words) {
13032 			ddi_prop_free(words);
13033 			break;
13034 		}
13035 		ddi_prop_free(words);
13036 	}
13037 	ndi_devi_exit(pdip, circular);
13038 
13039 	return (cdip);
13040 }
13041 
13042 
13043 static int
13044 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13045 {
13046 	dev_info_t	*pdip;
13047 	char		buf[MAXNAMELEN];
13048 	char		uaddr[MAXNAMELEN];
13049 	int		rval = FC_FAILURE;
13050 
13051 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13052 
13053 	pdip = plun->lun_tgt->tgt_port->port_dip;
13054 
13055 	/*
13056 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13057 	 * non-NULL even when the LUN is not there as in the case when a LUN is
13058 	 * configured and then deleted on the device end (for T3/T4 case). In
13059 	 * such cases, pip will be NULL.
13060 	 *
13061 	 * If the device generates an RSCN, it will end up getting offlined when
13062 	 * it disappeared and a new LUN will get created when it is rediscovered
13063 	 * on the device. If we check for lun_cip here, the LUN will not end
13064 	 * up getting onlined since this function will end up returning a
13065 	 * FC_SUCCESS.
13066 	 *
13067 	 * The behavior is different on other devices. For instance, on a HDS,
13068 	 * there was no RSCN generated by the device but the next I/O generated
13069 	 * a check condition and rediscovery got triggered that way. So, in
13070 	 * such cases, this path will not be exercised
13071 	 */
13072 	if (pip == NULL) {
13073 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13074 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
13075 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
13076 		    "plun: %p lun state: %x num: %d target state: %x",
13077 		    plun, plun->lun_state, plun->lun_num,
13078 		    plun->lun_tgt->tgt_port->port_state);
13079 		return (rval);
13080 	}
13081 
13082 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13083 
13084 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13085 
13086 	if (plun->lun_old_guid) {
13087 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13088 			rval = FC_SUCCESS;
13089 		}
13090 	} else {
13091 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13092 			rval = FC_SUCCESS;
13093 		}
13094 	}
13095 	return (rval);
13096 }
13097 
13098 static mdi_pathinfo_t *
13099 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13100 {
13101 	char			buf[MAXNAMELEN];
13102 	char			uaddr[MAXNAMELEN];
13103 	mdi_pathinfo_t		*pip;
13104 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13105 	struct fcp_port	*pptr = ptgt->tgt_port;
13106 
13107 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13108 
13109 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13110 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13111 
13112 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13113 
13114 	return (pip);
13115 }
13116 
13117 
13118 static int
13119 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13120     int tcount, int flags, int *circ)
13121 {
13122 	int			rval;
13123 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13124 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13125 	dev_info_t		*cdip = NULL;
13126 
13127 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13128 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13129 
13130 	if (plun->lun_cip == NULL) {
13131 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13132 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13133 		    "fcp_online_child: plun->lun_cip is NULL: "
13134 		    "plun: %p state: %x num: %d target state: %x",
13135 		    plun, plun->lun_state, plun->lun_num,
13136 		    plun->lun_tgt->tgt_port->port_state);
13137 		return (NDI_FAILURE);
13138 	}
13139 again:
13140 	if (plun->lun_mpxio == 0) {
13141 		cdip = DIP(cip);
13142 		mutex_exit(&plun->lun_mutex);
13143 		mutex_exit(&pptr->port_mutex);
13144 
13145 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13146 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13147 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13148 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13149 
13150 		/*
13151 		 * We could check for FCP_LUN_INIT here but chances
13152 		 * of getting here when it's already in FCP_LUN_INIT
13153 		 * is rare and a duplicate ndi_devi_online wouldn't
13154 		 * hurt either (as the node would already have been
13155 		 * in CF2)
13156 		 */
13157 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13158 			rval = ndi_devi_bind_driver(cdip, flags);
13159 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13160 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13161 			    "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13162 		} else {
13163 			rval = ndi_devi_online(cdip, flags);
13164 		}
13165 
13166 		/*
13167 		 * We log the message into trace buffer if the device
13168 		 * is "ses" and into syslog for any other device
13169 		 * type. This is to prevent the ndi_devi_online failure
13170 		 * message that appears for V880/A5K ses devices.
13171 		 */
13172 		if (rval == NDI_SUCCESS) {
13173 			mutex_enter(&ptgt->tgt_mutex);
13174 			plun->lun_state |= FCP_LUN_INIT;
13175 			mutex_exit(&ptgt->tgt_mutex);
13176 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13177 			fcp_log(CE_NOTE, pptr->port_dip,
13178 			    "!ndi_devi_online:"
13179 			    " failed for %s: target=%x lun=%x %x",
13180 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13181 			    plun->lun_num, rval);
13182 		} else {
13183 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13184 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13185 			    " !ndi_devi_online:"
13186 			    " failed for %s: target=%x lun=%x %x",
13187 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13188 			    plun->lun_num, rval);
13189 		}
13190 	} else {
13191 		cdip = mdi_pi_get_client(PIP(cip));
13192 		mutex_exit(&plun->lun_mutex);
13193 		mutex_exit(&pptr->port_mutex);
13194 
13195 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13196 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13197 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13198 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13199 
13200 		/*
13201 		 * Hold path and exit phci to avoid deadlock with power
13202 		 * management code during mdi_pi_online.
13203 		 */
13204 		mdi_hold_path(PIP(cip));
13205 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13206 
13207 		rval = mdi_pi_online(PIP(cip), flags);
13208 
13209 		mdi_devi_enter_phci(pptr->port_dip, circ);
13210 		mdi_rele_path(PIP(cip));
13211 
13212 		if (rval == MDI_SUCCESS) {
13213 			mutex_enter(&ptgt->tgt_mutex);
13214 			plun->lun_state |= FCP_LUN_INIT;
13215 			mutex_exit(&ptgt->tgt_mutex);
13216 
13217 			/*
13218 			 * Clear MPxIO path permanent disable in case
13219 			 * fcp hotplug dropped the offline event.
13220 			 */
13221 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13222 
13223 		} else if (rval == MDI_NOT_SUPPORTED) {
13224 			child_info_t	*old_cip = cip;
13225 
13226 			/*
13227 			 * MPxIO does not support this device yet.
13228 			 * Enumerate in legacy mode.
13229 			 */
13230 			mutex_enter(&pptr->port_mutex);
13231 			mutex_enter(&plun->lun_mutex);
13232 			plun->lun_mpxio = 0;
13233 			plun->lun_cip = NULL;
13234 			cdip = fcp_create_dip(plun, lcount, tcount);
13235 			plun->lun_cip = cip = CIP(cdip);
13236 			if (cip == NULL) {
13237 				fcp_log(CE_WARN, pptr->port_dip,
13238 				    "!fcp_online_child: "
13239 				    "Create devinfo failed for LU=%p", plun);
13240 				mutex_exit(&plun->lun_mutex);
13241 
13242 				mutex_enter(&ptgt->tgt_mutex);
13243 				plun->lun_state |= FCP_LUN_OFFLINE;
13244 				mutex_exit(&ptgt->tgt_mutex);
13245 
13246 				mutex_exit(&pptr->port_mutex);
13247 
13248 				/*
13249 				 * free the mdi_pathinfo node
13250 				 */
13251 				(void) mdi_pi_free(PIP(old_cip), 0);
13252 			} else {
13253 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13254 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13255 				    "fcp_online_child: creating devinfo "
13256 				    "node 0x%p for plun 0x%p",
13257 				    cip, plun);
13258 				mutex_exit(&plun->lun_mutex);
13259 				mutex_exit(&pptr->port_mutex);
13260 				/*
13261 				 * free the mdi_pathinfo node
13262 				 */
13263 				(void) mdi_pi_free(PIP(old_cip), 0);
13264 				mutex_enter(&pptr->port_mutex);
13265 				mutex_enter(&plun->lun_mutex);
13266 				goto again;
13267 			}
13268 		} else {
13269 			if (cdip) {
13270 				fcp_log(CE_NOTE, pptr->port_dip,
13271 				    "!fcp_online_child: mdi_pi_online:"
13272 				    " failed for %s: target=%x lun=%x %x",
13273 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13274 				    plun->lun_num, rval);
13275 			}
13276 		}
13277 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13278 	}
13279 
13280 	if (rval == NDI_SUCCESS) {
13281 		if (cdip) {
13282 			(void) ndi_event_retrieve_cookie(
13283 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13284 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13285 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13286 			    cdip, fcp_insert_eid, NULL);
13287 		}
13288 	}
13289 	mutex_enter(&pptr->port_mutex);
13290 	mutex_enter(&plun->lun_mutex);
13291 	return (rval);
13292 }
13293 
13294 /* ARGSUSED */
13295 static int
13296 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13297     int tcount, int flags, int *circ)
13298 {
13299 	int rval;
13300 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13301 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13302 	dev_info_t		*cdip;
13303 
13304 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13305 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13306 
13307 	if (plun->lun_cip == NULL) {
13308 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13309 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13310 		    "fcp_offline_child: plun->lun_cip is NULL: "
13311 		    "plun: %p lun state: %x num: %d target state: %x",
13312 		    plun, plun->lun_state, plun->lun_num,
13313 		    plun->lun_tgt->tgt_port->port_state);
13314 		return (NDI_FAILURE);
13315 	}
13316 
13317 	if (plun->lun_mpxio == 0) {
13318 		cdip = DIP(cip);
13319 		mutex_exit(&plun->lun_mutex);
13320 		mutex_exit(&pptr->port_mutex);
13321 		rval = ndi_devi_offline(DIP(cip), flags);
13322 		if (rval != NDI_SUCCESS) {
13323 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13324 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13325 			    "fcp_offline_child: ndi_devi_offline failed "
13326 			    "rval=%x cip=%p", rval, cip);
13327 		}
13328 	} else {
13329 		cdip = mdi_pi_get_client(PIP(cip));
13330 		mutex_exit(&plun->lun_mutex);
13331 		mutex_exit(&pptr->port_mutex);
13332 
13333 		/*
13334 		 * Exit phci to avoid deadlock with power management code
13335 		 * during mdi_pi_offline
13336 		 */
13337 		mdi_hold_path(PIP(cip));
13338 		mdi_devi_exit_phci(pptr->port_dip, *circ);
13339 
13340 		rval = mdi_pi_offline(PIP(cip), flags);
13341 
13342 		mdi_devi_enter_phci(pptr->port_dip, circ);
13343 		mdi_rele_path(PIP(cip));
13344 
13345 		if (rval == MDI_SUCCESS) {
13346 			/*
13347 			 * Clear MPxIO path permanent disable as the path is
13348 			 * already offlined.
13349 			 */
13350 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13351 
13352 			if (flags & NDI_DEVI_REMOVE) {
13353 				(void) mdi_pi_free(PIP(cip), 0);
13354 			}
13355 		} else {
13356 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13357 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13358 			    "fcp_offline_child: mdi_pi_offline failed "
13359 			    "rval=%x cip=%p", rval, cip);
13360 		}
13361 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13362 	}
13363 
13364 	mutex_enter(&ptgt->tgt_mutex);
13365 	plun->lun_state &= ~FCP_LUN_INIT;
13366 	mutex_exit(&ptgt->tgt_mutex);
13367 
13368 	mutex_enter(&pptr->port_mutex);
13369 	mutex_enter(&plun->lun_mutex);
13370 
13371 	if (rval == NDI_SUCCESS) {
13372 		cdip = NULL;
13373 		if (flags & NDI_DEVI_REMOVE) {
13374 			/*
13375 			 * If the guid of the LUN changes, lun_cip will not
13376 			 * equal to cip, and after offlining the LUN with the
13377 			 * old guid, we should keep lun_cip since it's the cip
13378 			 * of the LUN with the new guid.
13379 			 * Otherwise remove our reference to child node.
13380 			 */
13381 			if (plun->lun_cip == cip) {
13382 				plun->lun_cip = NULL;
13383 			}
13384 			if (plun->lun_old_guid) {
13385 				kmem_free(plun->lun_old_guid,
13386 				    plun->lun_old_guid_size);
13387 				plun->lun_old_guid = NULL;
13388 				plun->lun_old_guid_size = 0;
13389 			}
13390 		}
13391 	}
13392 
13393 	if (cdip) {
13394 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13395 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13396 		    " target=%x lun=%x", "ndi_offline",
13397 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13398 	}
13399 
13400 	return (rval);
13401 }
13402 
13403 static void
13404 fcp_remove_child(struct fcp_lun *plun)
13405 {
13406 	int circ;
13407 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13408 
13409 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13410 		if (plun->lun_mpxio == 0) {
13411 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13412 			(void) ndi_devi_free(DIP(plun->lun_cip));
13413 		} else {
13414 			mutex_exit(&plun->lun_mutex);
13415 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13416 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13417 
13418 			mdi_devi_enter(
13419 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13420 
13421 			/*
13422 			 * Exit phci to avoid deadlock with power management
13423 			 * code during mdi_pi_offline
13424 			 */
13425 			mdi_hold_path(PIP(plun->lun_cip));
13426 			mdi_devi_exit_phci(
13427 			    plun->lun_tgt->tgt_port->port_dip, circ);
13428 			(void) mdi_pi_offline(PIP(plun->lun_cip),
13429 			    NDI_DEVI_REMOVE);
13430 			mdi_devi_enter_phci(
13431 			    plun->lun_tgt->tgt_port->port_dip, &circ);
13432 			mdi_rele_path(PIP(plun->lun_cip));
13433 
13434 			mdi_devi_exit(
13435 			    plun->lun_tgt->tgt_port->port_dip, circ);
13436 
13437 			FCP_TRACE(fcp_logq,
13438 			    plun->lun_tgt->tgt_port->port_instbuf,
13439 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13440 			    "lun=%p pip freed %p", plun, plun->lun_cip);
13441 			(void) mdi_prop_remove(PIP(plun->lun_cip), NULL);
13442 			(void) mdi_pi_free(PIP(plun->lun_cip), 0);
13443 
13444 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13445 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13446 			mutex_enter(&plun->lun_mutex);
13447 		}
13448 	}
13449 
13450 	plun->lun_cip = NULL;
13451 }
13452 
13453 /*
13454  * called when a timeout occurs
13455  *
13456  * can be scheduled during an attach or resume (if not already running)
13457  *
13458  * one timeout is set up for all ports
13459  *
13460  * acquires and releases the global mutex
13461  */
13462 /*ARGSUSED*/
13463 static void
13464 fcp_watch(void *arg)
13465 {
13466 	struct fcp_port	*pptr;
13467 	struct fcp_ipkt	*icmd;
13468 	struct fcp_ipkt	*nicmd;
13469 	struct fcp_pkt	*cmd;
13470 	struct fcp_pkt	*ncmd;
13471 	struct fcp_pkt	*tail;
13472 	struct fcp_pkt	*pcmd;
13473 	struct fcp_pkt	*save_head;
13474 	struct fcp_port	*save_port;
13475 
13476 	/* increment global watchdog time */
13477 	fcp_watchdog_time += fcp_watchdog_timeout;
13478 
13479 	mutex_enter(&fcp_global_mutex);
13480 
13481 	/* scan each port in our list */
13482 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13483 		save_port = fcp_port_head;
13484 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13485 		mutex_exit(&fcp_global_mutex);
13486 
13487 		mutex_enter(&pptr->port_mutex);
13488 		if (pptr->port_ipkt_list == NULL &&
13489 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13490 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13491 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13492 			mutex_exit(&pptr->port_mutex);
13493 			mutex_enter(&fcp_global_mutex);
13494 			goto end_of_watchdog;
13495 		}
13496 
13497 		/*
13498 		 * We check if a list of targets need to be offlined.
13499 		 */
13500 		if (pptr->port_offline_tgts) {
13501 			fcp_scan_offline_tgts(pptr);
13502 		}
13503 
13504 		/*
13505 		 * We check if a list of luns need to be offlined.
13506 		 */
13507 		if (pptr->port_offline_luns) {
13508 			fcp_scan_offline_luns(pptr);
13509 		}
13510 
13511 		/*
13512 		 * We check if a list of targets or luns need to be reset.
13513 		 */
13514 		if (pptr->port_reset_list) {
13515 			fcp_check_reset_delay(pptr);
13516 		}
13517 
13518 		mutex_exit(&pptr->port_mutex);
13519 
13520 		/*
13521 		 * This is where the pending commands (pkt) are checked for
13522 		 * timeout.
13523 		 */
13524 		mutex_enter(&pptr->port_pkt_mutex);
13525 		tail = pptr->port_pkt_tail;
13526 
13527 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13528 		    cmd != NULL; cmd = ncmd) {
13529 			ncmd = cmd->cmd_next;
13530 			/*
13531 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13532 			 * must be set.
13533 			 */
13534 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13535 			/*
13536 			 * FCP_INVALID_TIMEOUT will be set for those
13537 			 * command that need to be failed. Mostly those
13538 			 * cmds that could not be queued down for the
13539 			 * "timeout" value. cmd->cmd_timeout is used
13540 			 * to try and requeue the command regularly.
13541 			 */
13542 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13543 				/*
13544 				 * This command hasn't timed out yet.  Let's
13545 				 * go to the next one.
13546 				 */
13547 				pcmd = cmd;
13548 				goto end_of_loop;
13549 			}
13550 
13551 			if (cmd == pptr->port_pkt_head) {
13552 				ASSERT(pcmd == NULL);
13553 				pptr->port_pkt_head = cmd->cmd_next;
13554 			} else {
13555 				ASSERT(pcmd != NULL);
13556 				pcmd->cmd_next = cmd->cmd_next;
13557 			}
13558 
13559 			if (cmd == pptr->port_pkt_tail) {
13560 				ASSERT(cmd->cmd_next == NULL);
13561 				pptr->port_pkt_tail = pcmd;
13562 				if (pcmd) {
13563 					pcmd->cmd_next = NULL;
13564 				}
13565 			}
13566 			cmd->cmd_next = NULL;
13567 
13568 			/*
13569 			 * save the current head before dropping the
13570 			 * mutex - If the head doesn't remain the
13571 			 * same after re acquiring the mutex, just
13572 			 * bail out and revisit on next tick.
13573 			 *
13574 			 * PS: The tail pointer can change as the commands
13575 			 * get requeued after failure to retransport
13576 			 */
13577 			save_head = pptr->port_pkt_head;
13578 			mutex_exit(&pptr->port_pkt_mutex);
13579 
13580 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13581 			    FCP_INVALID_TIMEOUT) {
13582 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13583 				struct fcp_lun	*plun;
13584 				struct fcp_tgt	*ptgt;
13585 
13586 				plun = ADDR2LUN(&pkt->pkt_address);
13587 				ptgt = plun->lun_tgt;
13588 
13589 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13590 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13591 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13592 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13593 
13594 				cmd->cmd_state == FCP_PKT_ABORTING ?
13595 				    fcp_fail_cmd(cmd, CMD_RESET,
13596 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13597 				    CMD_TIMEOUT, STAT_ABORTED);
13598 			} else {
13599 				fcp_retransport_cmd(pptr, cmd);
13600 			}
13601 			mutex_enter(&pptr->port_pkt_mutex);
13602 			if (save_head && save_head != pptr->port_pkt_head) {
13603 				/*
13604 				 * Looks like linked list got changed (mostly
13605 				 * happens when an an OFFLINE LUN code starts
13606 				 * returning overflow queue commands in
13607 				 * parallel. So bail out and revisit during
13608 				 * next tick
13609 				 */
13610 				break;
13611 			}
13612 		end_of_loop:
13613 			/*
13614 			 * Scan only upto the previously known tail pointer
13615 			 * to avoid excessive processing - lots of new packets
13616 			 * could have been added to the tail or the old ones
13617 			 * re-queued.
13618 			 */
13619 			if (cmd == tail) {
13620 				break;
13621 			}
13622 		}
13623 		mutex_exit(&pptr->port_pkt_mutex);
13624 
13625 		mutex_enter(&pptr->port_mutex);
13626 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13627 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13628 
13629 			nicmd = icmd->ipkt_next;
13630 			if ((icmd->ipkt_restart != 0) &&
13631 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13632 				/* packet has not timed out */
13633 				continue;
13634 			}
13635 
13636 			/* time for packet re-transport */
13637 			if (icmd == pptr->port_ipkt_list) {
13638 				pptr->port_ipkt_list = icmd->ipkt_next;
13639 				if (pptr->port_ipkt_list) {
13640 					pptr->port_ipkt_list->ipkt_prev =
13641 					    NULL;
13642 				}
13643 			} else {
13644 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13645 				if (icmd->ipkt_next) {
13646 					icmd->ipkt_next->ipkt_prev =
13647 					    icmd->ipkt_prev;
13648 				}
13649 			}
13650 			icmd->ipkt_next = NULL;
13651 			icmd->ipkt_prev = NULL;
13652 			mutex_exit(&pptr->port_mutex);
13653 
13654 			if (fcp_is_retryable(icmd)) {
13655 				fc_ulp_rscn_info_t *rscnp =
13656 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13657 				    pkt_ulp_rscn_infop;
13658 
13659 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13660 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13661 				    "%x to D_ID=%x Retrying..",
13662 				    icmd->ipkt_opcode,
13663 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13664 
13665 				/*
13666 				 * Update the RSCN count in the packet
13667 				 * before resending.
13668 				 */
13669 
13670 				if (rscnp != NULL) {
13671 					rscnp->ulp_rscn_count =
13672 					    fc_ulp_get_rscn_count(pptr->
13673 					    port_fp_handle);
13674 				}
13675 
13676 				mutex_enter(&pptr->port_mutex);
13677 				mutex_enter(&ptgt->tgt_mutex);
13678 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13679 					mutex_exit(&ptgt->tgt_mutex);
13680 					mutex_exit(&pptr->port_mutex);
13681 					switch (icmd->ipkt_opcode) {
13682 						int rval;
13683 					case LA_ELS_PLOGI:
13684 						if ((rval = fc_ulp_login(
13685 						    pptr->port_fp_handle,
13686 						    &icmd->ipkt_fpkt, 1)) ==
13687 						    FC_SUCCESS) {
13688 							mutex_enter(
13689 							    &pptr->port_mutex);
13690 							continue;
13691 						}
13692 						if (fcp_handle_ipkt_errors(
13693 						    pptr, ptgt, icmd, rval,
13694 						    "PLOGI") == DDI_SUCCESS) {
13695 							mutex_enter(
13696 							    &pptr->port_mutex);
13697 							continue;
13698 						}
13699 						break;
13700 
13701 					case LA_ELS_PRLI:
13702 						if ((rval = fc_ulp_issue_els(
13703 						    pptr->port_fp_handle,
13704 						    icmd->ipkt_fpkt)) ==
13705 						    FC_SUCCESS) {
13706 							mutex_enter(
13707 							    &pptr->port_mutex);
13708 							continue;
13709 						}
13710 						if (fcp_handle_ipkt_errors(
13711 						    pptr, ptgt, icmd, rval,
13712 						    "PRLI") == DDI_SUCCESS) {
13713 							mutex_enter(
13714 							    &pptr->port_mutex);
13715 							continue;
13716 						}
13717 						break;
13718 
13719 					default:
13720 						if ((rval = fcp_transport(
13721 						    pptr->port_fp_handle,
13722 						    icmd->ipkt_fpkt, 1)) ==
13723 						    FC_SUCCESS) {
13724 							mutex_enter(
13725 							    &pptr->port_mutex);
13726 							continue;
13727 						}
13728 						if (fcp_handle_ipkt_errors(
13729 						    pptr, ptgt, icmd, rval,
13730 						    "PRLI") == DDI_SUCCESS) {
13731 							mutex_enter(
13732 							    &pptr->port_mutex);
13733 							continue;
13734 						}
13735 						break;
13736 					}
13737 				} else {
13738 					mutex_exit(&ptgt->tgt_mutex);
13739 					mutex_exit(&pptr->port_mutex);
13740 				}
13741 			} else {
13742 				fcp_print_error(icmd->ipkt_fpkt);
13743 			}
13744 
13745 			(void) fcp_call_finish_init(pptr, ptgt,
13746 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13747 			    icmd->ipkt_cause);
13748 			fcp_icmd_free(pptr, icmd);
13749 			mutex_enter(&pptr->port_mutex);
13750 		}
13751 
13752 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13753 		mutex_exit(&pptr->port_mutex);
13754 		mutex_enter(&fcp_global_mutex);
13755 
13756 	end_of_watchdog:
13757 		/*
13758 		 * Bail out early before getting into trouble
13759 		 */
13760 		if (save_port != fcp_port_head) {
13761 			break;
13762 		}
13763 	}
13764 
13765 	if (fcp_watchdog_init > 0) {
13766 		/* reschedule timeout to go again */
13767 		fcp_watchdog_id =
13768 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13769 	}
13770 	mutex_exit(&fcp_global_mutex);
13771 }
13772 
13773 
13774 static void
13775 fcp_check_reset_delay(struct fcp_port *pptr)
13776 {
13777 	uint32_t		tgt_cnt;
13778 	int			level;
13779 	struct fcp_tgt	*ptgt;
13780 	struct fcp_lun	*plun;
13781 	struct fcp_reset_elem *cur = NULL;
13782 	struct fcp_reset_elem *next = NULL;
13783 	struct fcp_reset_elem *prev = NULL;
13784 
13785 	ASSERT(mutex_owned(&pptr->port_mutex));
13786 
13787 	next = pptr->port_reset_list;
13788 	while ((cur = next) != NULL) {
13789 		next = cur->next;
13790 
13791 		if (cur->timeout < fcp_watchdog_time) {
13792 			prev = cur;
13793 			continue;
13794 		}
13795 
13796 		ptgt = cur->tgt;
13797 		plun = cur->lun;
13798 		tgt_cnt = cur->tgt_cnt;
13799 
13800 		if (ptgt) {
13801 			level = RESET_TARGET;
13802 		} else {
13803 			ASSERT(plun != NULL);
13804 			level = RESET_LUN;
13805 			ptgt = plun->lun_tgt;
13806 		}
13807 		if (prev) {
13808 			prev->next = next;
13809 		} else {
13810 			/*
13811 			 * Because we drop port mutex while doing aborts for
13812 			 * packets, we can't rely on reset_list pointing to
13813 			 * our head
13814 			 */
13815 			if (cur == pptr->port_reset_list) {
13816 				pptr->port_reset_list = next;
13817 			} else {
13818 				struct fcp_reset_elem *which;
13819 
13820 				which = pptr->port_reset_list;
13821 				while (which && which->next != cur) {
13822 					which = which->next;
13823 				}
13824 				ASSERT(which != NULL);
13825 
13826 				which->next = next;
13827 				prev = which;
13828 			}
13829 		}
13830 
13831 		kmem_free(cur, sizeof (*cur));
13832 
13833 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13834 			mutex_enter(&ptgt->tgt_mutex);
13835 			if (level == RESET_TARGET) {
13836 				fcp_update_tgt_state(ptgt,
13837 				    FCP_RESET, FCP_LUN_BUSY);
13838 			} else {
13839 				fcp_update_lun_state(plun,
13840 				    FCP_RESET, FCP_LUN_BUSY);
13841 			}
13842 			mutex_exit(&ptgt->tgt_mutex);
13843 
13844 			mutex_exit(&pptr->port_mutex);
13845 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13846 			mutex_enter(&pptr->port_mutex);
13847 		}
13848 	}
13849 }
13850 
13851 
13852 static void
13853 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13854     struct fcp_lun *rlun, int tgt_cnt)
13855 {
13856 	int			rval;
13857 	struct fcp_lun	*tlun, *nlun;
13858 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13859 	    *cmd = NULL, *head = NULL,
13860 	    *tail = NULL;
13861 
13862 	mutex_enter(&pptr->port_pkt_mutex);
13863 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13864 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13865 		struct fcp_tgt *ptgt = plun->lun_tgt;
13866 
13867 		ncmd = cmd->cmd_next;
13868 
13869 		if (ptgt != ttgt && plun != rlun) {
13870 			pcmd = cmd;
13871 			continue;
13872 		}
13873 
13874 		if (pcmd != NULL) {
13875 			ASSERT(pptr->port_pkt_head != cmd);
13876 			pcmd->cmd_next = ncmd;
13877 		} else {
13878 			ASSERT(cmd == pptr->port_pkt_head);
13879 			pptr->port_pkt_head = ncmd;
13880 		}
13881 		if (pptr->port_pkt_tail == cmd) {
13882 			ASSERT(cmd->cmd_next == NULL);
13883 			pptr->port_pkt_tail = pcmd;
13884 			if (pcmd != NULL) {
13885 				pcmd->cmd_next = NULL;
13886 			}
13887 		}
13888 
13889 		if (head == NULL) {
13890 			head = tail = cmd;
13891 		} else {
13892 			ASSERT(tail != NULL);
13893 			tail->cmd_next = cmd;
13894 			tail = cmd;
13895 		}
13896 		cmd->cmd_next = NULL;
13897 	}
13898 	mutex_exit(&pptr->port_pkt_mutex);
13899 
13900 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13901 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13902 
13903 		ncmd = cmd->cmd_next;
13904 		ASSERT(pkt != NULL);
13905 
13906 		mutex_enter(&pptr->port_mutex);
13907 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13908 			mutex_exit(&pptr->port_mutex);
13909 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13910 			pkt->pkt_reason = CMD_RESET;
13911 			pkt->pkt_statistics |= STAT_DEV_RESET;
13912 			cmd->cmd_state = FCP_PKT_IDLE;
13913 			fcp_post_callback(cmd);
13914 		} else {
13915 			mutex_exit(&pptr->port_mutex);
13916 		}
13917 	}
13918 
13919 	/*
13920 	 * If the FCA will return all the commands in its queue then our
13921 	 * work is easy, just return.
13922 	 */
13923 
13924 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13925 		return;
13926 	}
13927 
13928 	/*
13929 	 * For RESET_LUN get hold of target pointer
13930 	 */
13931 	if (ttgt == NULL) {
13932 		ASSERT(rlun != NULL);
13933 
13934 		ttgt = rlun->lun_tgt;
13935 
13936 		ASSERT(ttgt != NULL);
13937 	}
13938 
13939 	/*
13940 	 * There are some severe race conditions here.
13941 	 * While we are trying to abort the pkt, it might be completing
13942 	 * so mark it aborted and if the abort does not succeed then
13943 	 * handle it in the watch thread.
13944 	 */
13945 	mutex_enter(&ttgt->tgt_mutex);
13946 	nlun = ttgt->tgt_lun;
13947 	mutex_exit(&ttgt->tgt_mutex);
13948 	while ((tlun = nlun) != NULL) {
13949 		int restart = 0;
13950 		if (rlun && rlun != tlun) {
13951 			mutex_enter(&ttgt->tgt_mutex);
13952 			nlun = tlun->lun_next;
13953 			mutex_exit(&ttgt->tgt_mutex);
13954 			continue;
13955 		}
13956 		mutex_enter(&tlun->lun_mutex);
13957 		cmd = tlun->lun_pkt_head;
13958 		while (cmd != NULL) {
13959 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13960 				struct scsi_pkt *pkt;
13961 
13962 				restart = 1;
13963 				cmd->cmd_state = FCP_PKT_ABORTING;
13964 				mutex_exit(&tlun->lun_mutex);
13965 				rval = fc_ulp_abort(pptr->port_fp_handle,
13966 				    cmd->cmd_fp_pkt, KM_SLEEP);
13967 				if (rval == FC_SUCCESS) {
13968 					pkt = cmd->cmd_pkt;
13969 					pkt->pkt_reason = CMD_RESET;
13970 					pkt->pkt_statistics |= STAT_DEV_RESET;
13971 					cmd->cmd_state = FCP_PKT_IDLE;
13972 					fcp_post_callback(cmd);
13973 				} else {
13974 					caddr_t msg;
13975 
13976 					(void) fc_ulp_error(rval, &msg);
13977 
13978 					/*
13979 					 * This part is tricky. The abort
13980 					 * failed and now the command could
13981 					 * be completing.  The cmd_state ==
13982 					 * FCP_PKT_ABORTING should save
13983 					 * us in fcp_cmd_callback. If we
13984 					 * are already aborting ignore the
13985 					 * command in fcp_cmd_callback.
13986 					 * Here we leave this packet for 20
13987 					 * sec to be aborted in the
13988 					 * fcp_watch thread.
13989 					 */
13990 					fcp_log(CE_WARN, pptr->port_dip,
13991 					    "!Abort failed after reset %s",
13992 					    msg);
13993 
13994 					cmd->cmd_timeout =
13995 					    fcp_watchdog_time +
13996 					    cmd->cmd_pkt->pkt_time +
13997 					    FCP_FAILED_DELAY;
13998 
13999 					cmd->cmd_fp_pkt->pkt_timeout =
14000 					    FCP_INVALID_TIMEOUT;
14001 					/*
14002 					 * This is a hack, cmd is put in the
14003 					 * overflow queue so that it can be
14004 					 * timed out finally
14005 					 */
14006 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
14007 
14008 					mutex_enter(&pptr->port_pkt_mutex);
14009 					if (pptr->port_pkt_head) {
14010 						ASSERT(pptr->port_pkt_tail
14011 						    != NULL);
14012 						pptr->port_pkt_tail->cmd_next
14013 						    = cmd;
14014 						pptr->port_pkt_tail = cmd;
14015 					} else {
14016 						ASSERT(pptr->port_pkt_tail
14017 						    == NULL);
14018 						pptr->port_pkt_head =
14019 						    pptr->port_pkt_tail
14020 						    = cmd;
14021 					}
14022 					cmd->cmd_next = NULL;
14023 					mutex_exit(&pptr->port_pkt_mutex);
14024 				}
14025 				mutex_enter(&tlun->lun_mutex);
14026 				cmd = tlun->lun_pkt_head;
14027 			} else {
14028 				cmd = cmd->cmd_forw;
14029 			}
14030 		}
14031 		mutex_exit(&tlun->lun_mutex);
14032 
14033 		mutex_enter(&ttgt->tgt_mutex);
14034 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14035 		mutex_exit(&ttgt->tgt_mutex);
14036 
14037 		mutex_enter(&pptr->port_mutex);
14038 		if (tgt_cnt != ttgt->tgt_change_cnt) {
14039 			mutex_exit(&pptr->port_mutex);
14040 			return;
14041 		} else {
14042 			mutex_exit(&pptr->port_mutex);
14043 		}
14044 	}
14045 }
14046 
14047 
14048 /*
14049  * unlink the soft state, returning the soft state found (if any)
14050  *
14051  * acquires and releases the global mutex
14052  */
14053 struct fcp_port *
14054 fcp_soft_state_unlink(struct fcp_port *pptr)
14055 {
14056 	struct fcp_port	*hptr;		/* ptr index */
14057 	struct fcp_port	*tptr;		/* prev hptr */
14058 
14059 	mutex_enter(&fcp_global_mutex);
14060 	for (hptr = fcp_port_head, tptr = NULL;
14061 	    hptr != NULL;
14062 	    tptr = hptr, hptr = hptr->port_next) {
14063 		if (hptr == pptr) {
14064 			/* we found a match -- remove this item */
14065 			if (tptr == NULL) {
14066 				/* we're at the head of the list */
14067 				fcp_port_head = hptr->port_next;
14068 			} else {
14069 				tptr->port_next = hptr->port_next;
14070 			}
14071 			break;			/* success */
14072 		}
14073 	}
14074 	if (fcp_port_head == NULL) {
14075 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
14076 	}
14077 	mutex_exit(&fcp_global_mutex);
14078 	return (hptr);
14079 }
14080 
14081 
14082 /*
14083  * called by fcp_scsi_hba_tgt_init to find a LUN given a
14084  * WWN and a LUN number
14085  */
14086 /* ARGSUSED */
14087 static struct fcp_lun *
14088 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14089 {
14090 	int hash;
14091 	struct fcp_tgt *ptgt;
14092 	struct fcp_lun *plun;
14093 
14094 	ASSERT(mutex_owned(&pptr->port_mutex));
14095 
14096 	hash = FCP_HASH(wwn);
14097 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14098 	    ptgt = ptgt->tgt_next) {
14099 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14100 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
14101 			mutex_enter(&ptgt->tgt_mutex);
14102 			for (plun = ptgt->tgt_lun;
14103 			    plun != NULL;
14104 			    plun = plun->lun_next) {
14105 				if (plun->lun_num == lun) {
14106 					mutex_exit(&ptgt->tgt_mutex);
14107 					return (plun);
14108 				}
14109 			}
14110 			mutex_exit(&ptgt->tgt_mutex);
14111 			return (NULL);
14112 		}
14113 	}
14114 	return (NULL);
14115 }
14116 
14117 /*
14118  *     Function: fcp_prepare_pkt
14119  *
14120  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
14121  *		 for fcp_start(). It binds the data or partially maps it.
14122  *		 Builds the FCP header and starts the initialization of the
14123  *		 Fibre Channel header.
14124  *
14125  *     Argument: *pptr		FCP port.
14126  *		 *cmd		FCP packet.
14127  *		 *plun		LUN the command will be sent to.
14128  *
14129  *	Context: User, Kernel and Interrupt context.
14130  */
14131 static void
14132 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14133     struct fcp_lun *plun)
14134 {
14135 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
14136 	struct fcp_tgt		*ptgt = plun->lun_tgt;
14137 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
14138 
14139 	ASSERT(cmd->cmd_pkt->pkt_comp ||
14140 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14141 
14142 	if (cmd->cmd_pkt->pkt_numcookies) {
14143 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14144 			fcmd->fcp_cntl.cntl_read_data = 1;
14145 			fcmd->fcp_cntl.cntl_write_data = 0;
14146 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14147 		} else {
14148 			fcmd->fcp_cntl.cntl_read_data = 0;
14149 			fcmd->fcp_cntl.cntl_write_data = 1;
14150 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14151 		}
14152 
14153 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14154 
14155 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14156 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14157 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14158 
14159 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14160 
14161 		/* FCA needs pkt_datalen to be set */
14162 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14163 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14164 	} else {
14165 		fcmd->fcp_cntl.cntl_read_data = 0;
14166 		fcmd->fcp_cntl.cntl_write_data = 0;
14167 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14168 		fpkt->pkt_datalen = 0;
14169 		fcmd->fcp_data_len = 0;
14170 	}
14171 
14172 	/* set up the Tagged Queuing type */
14173 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14174 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14175 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14176 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14177 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14178 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14179 	} else {
14180 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14181 	}
14182 
14183 	fcmd->fcp_ent_addr = plun->lun_addr;
14184 
14185 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14186 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14187 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14188 	} else {
14189 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14190 	}
14191 
14192 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14193 	cmd->cmd_pkt->pkt_state = 0;
14194 	cmd->cmd_pkt->pkt_statistics = 0;
14195 	cmd->cmd_pkt->pkt_resid = 0;
14196 
14197 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14198 
14199 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14200 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14201 		fpkt->pkt_comp = NULL;
14202 	} else {
14203 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14204 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14205 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14206 		}
14207 		fpkt->pkt_comp = fcp_cmd_callback;
14208 	}
14209 
14210 	mutex_enter(&pptr->port_mutex);
14211 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14212 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14213 	}
14214 	mutex_exit(&pptr->port_mutex);
14215 
14216 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14217 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14218 
14219 	/*
14220 	 * Save a few kernel cycles here
14221 	 */
14222 #ifndef	__lock_lint
14223 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14224 #endif /* __lock_lint */
14225 }
14226 
14227 static void
14228 fcp_post_callback(struct fcp_pkt *cmd)
14229 {
14230 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14231 }
14232 
14233 
14234 /*
14235  * called to do polled I/O by fcp_start()
14236  *
14237  * return a transport status value, i.e. TRAN_ACCECPT for success
14238  */
14239 static int
14240 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14241 {
14242 	int	rval;
14243 
14244 #ifdef	DEBUG
14245 	mutex_enter(&pptr->port_pkt_mutex);
14246 	pptr->port_npkts++;
14247 	mutex_exit(&pptr->port_pkt_mutex);
14248 #endif /* DEBUG */
14249 
14250 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14251 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14252 	} else {
14253 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14254 	}
14255 
14256 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14257 
14258 	cmd->cmd_state = FCP_PKT_ISSUED;
14259 
14260 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14261 
14262 #ifdef	DEBUG
14263 	mutex_enter(&pptr->port_pkt_mutex);
14264 	pptr->port_npkts--;
14265 	mutex_exit(&pptr->port_pkt_mutex);
14266 #endif /* DEBUG */
14267 
14268 	cmd->cmd_state = FCP_PKT_IDLE;
14269 
14270 	switch (rval) {
14271 	case FC_SUCCESS:
14272 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14273 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14274 			rval = TRAN_ACCEPT;
14275 		} else {
14276 			rval = TRAN_FATAL_ERROR;
14277 		}
14278 		break;
14279 
14280 	case FC_TRAN_BUSY:
14281 		rval = TRAN_BUSY;
14282 		cmd->cmd_pkt->pkt_resid = 0;
14283 		break;
14284 
14285 	case FC_BADPACKET:
14286 		rval = TRAN_BADPKT;
14287 		break;
14288 
14289 	default:
14290 		rval = TRAN_FATAL_ERROR;
14291 		break;
14292 	}
14293 
14294 	return (rval);
14295 }
14296 
14297 
14298 /*
14299  * called by some of the following transport-called routines to convert
14300  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14301  */
14302 static struct fcp_port *
14303 fcp_dip2port(dev_info_t *dip)
14304 {
14305 	int	instance;
14306 
14307 	instance = ddi_get_instance(dip);
14308 	return (ddi_get_soft_state(fcp_softstate, instance));
14309 }
14310 
14311 
14312 /*
14313  * called internally to return a LUN given a dip
14314  */
14315 struct fcp_lun *
14316 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14317 {
14318 	struct fcp_tgt *ptgt;
14319 	struct fcp_lun *plun;
14320 	int i;
14321 
14322 
14323 	ASSERT(mutex_owned(&pptr->port_mutex));
14324 
14325 	for (i = 0; i < FCP_NUM_HASH; i++) {
14326 		for (ptgt = pptr->port_tgt_hash_table[i];
14327 		    ptgt != NULL;
14328 		    ptgt = ptgt->tgt_next) {
14329 			mutex_enter(&ptgt->tgt_mutex);
14330 			for (plun = ptgt->tgt_lun; plun != NULL;
14331 			    plun = plun->lun_next) {
14332 				mutex_enter(&plun->lun_mutex);
14333 				if (plun->lun_cip == cip) {
14334 					mutex_exit(&plun->lun_mutex);
14335 					mutex_exit(&ptgt->tgt_mutex);
14336 					return (plun); /* match found */
14337 				}
14338 				mutex_exit(&plun->lun_mutex);
14339 			}
14340 			mutex_exit(&ptgt->tgt_mutex);
14341 		}
14342 	}
14343 	return (NULL);				/* no LUN found */
14344 }
14345 
14346 /*
14347  * pass an element to the hotplug list, kick the hotplug thread
14348  * and wait for the element to get processed by the hotplug thread.
14349  * on return the element is freed.
14350  *
14351  * return zero success and non-zero on failure
14352  *
14353  * acquires/releases the target mutex
14354  *
14355  */
14356 static int
14357 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14358     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14359 {
14360 	struct fcp_hp_elem	*elem;
14361 	int			rval;
14362 
14363 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14364 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14365 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14366 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14367 		fcp_log(CE_CONT, pptr->port_dip,
14368 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14369 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14370 		return (NDI_FAILURE);
14371 	}
14372 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14373 	mutex_enter(&elem->mutex);
14374 	if (elem->wait) {
14375 		while (elem->wait) {
14376 			cv_wait(&elem->cv, &elem->mutex);
14377 		}
14378 	}
14379 	rval = (elem->result);
14380 	mutex_exit(&elem->mutex);
14381 	mutex_destroy(&elem->mutex);
14382 	cv_destroy(&elem->cv);
14383 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14384 	return (rval);
14385 }
14386 
14387 /*
14388  * pass an element to the hotplug list, and then
14389  * kick the hotplug thread
14390  *
14391  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14392  *
14393  * acquires/releases the hotplug mutex
14394  *
14395  * called with the target mutex owned
14396  *
14397  * memory acquired in NOSLEEP mode
14398  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14399  *	 for the hp daemon to process the request and is responsible for
14400  *	 freeing the element
14401  */
14402 static struct fcp_hp_elem *
14403 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14404     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14405 {
14406 	struct fcp_hp_elem	*elem;
14407 	dev_info_t *pdip;
14408 
14409 	ASSERT(pptr != NULL);
14410 	ASSERT(plun != NULL);
14411 	ASSERT(plun->lun_tgt != NULL);
14412 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14413 
14414 	/* create space for a hotplug element */
14415 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14416 	    == NULL) {
14417 		fcp_log(CE_WARN, NULL,
14418 		    "!can't allocate memory for hotplug element");
14419 		return (NULL);
14420 	}
14421 
14422 	/* fill in hotplug element */
14423 	elem->port = pptr;
14424 	elem->lun = plun;
14425 	elem->cip = cip;
14426 	elem->old_lun_mpxio = plun->lun_mpxio;
14427 	elem->what = what;
14428 	elem->flags = flags;
14429 	elem->link_cnt = link_cnt;
14430 	elem->tgt_cnt = tgt_cnt;
14431 	elem->wait = wait;
14432 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14433 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14434 
14435 	/* schedule the hotplug task */
14436 	pdip = pptr->port_dip;
14437 	mutex_enter(&plun->lun_mutex);
14438 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14439 		plun->lun_event_count++;
14440 		elem->event_cnt = plun->lun_event_count;
14441 	}
14442 	mutex_exit(&plun->lun_mutex);
14443 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14444 	    (void *)elem, KM_NOSLEEP) == NULL) {
14445 		mutex_enter(&plun->lun_mutex);
14446 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14447 			plun->lun_event_count--;
14448 		}
14449 		mutex_exit(&plun->lun_mutex);
14450 		kmem_free(elem, sizeof (*elem));
14451 		return (0);
14452 	}
14453 
14454 	return (elem);
14455 }
14456 
14457 
14458 static void
14459 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14460 {
14461 	int			rval;
14462 	struct scsi_address	*ap;
14463 	struct fcp_lun	*plun;
14464 	struct fcp_tgt	*ptgt;
14465 	fc_packet_t	*fpkt;
14466 
14467 	ap = &cmd->cmd_pkt->pkt_address;
14468 	plun = ADDR2LUN(ap);
14469 	ptgt = plun->lun_tgt;
14470 
14471 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14472 
14473 	cmd->cmd_state = FCP_PKT_IDLE;
14474 
14475 	mutex_enter(&pptr->port_mutex);
14476 	mutex_enter(&ptgt->tgt_mutex);
14477 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14478 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14479 		fc_ulp_rscn_info_t *rscnp;
14480 
14481 		cmd->cmd_state = FCP_PKT_ISSUED;
14482 
14483 		/*
14484 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14485 		 * originally NULL, hence we try to set it to the pd pointed
14486 		 * to by the SCSI device we're trying to get to.
14487 		 */
14488 
14489 		fpkt = cmd->cmd_fp_pkt;
14490 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14491 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14492 			/*
14493 			 * We need to notify the transport that we now have a
14494 			 * reference to the remote port handle.
14495 			 */
14496 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14497 		}
14498 
14499 		mutex_exit(&ptgt->tgt_mutex);
14500 		mutex_exit(&pptr->port_mutex);
14501 
14502 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14503 
14504 		/* prepare the packet */
14505 
14506 		fcp_prepare_pkt(pptr, cmd, plun);
14507 
14508 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14509 		    pkt_ulp_rscn_infop;
14510 
14511 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14512 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14513 
14514 		if (rscnp != NULL) {
14515 			rscnp->ulp_rscn_count =
14516 			    fc_ulp_get_rscn_count(pptr->
14517 			    port_fp_handle);
14518 		}
14519 
14520 		rval = fcp_transport(pptr->port_fp_handle,
14521 		    cmd->cmd_fp_pkt, 0);
14522 
14523 		if (rval == FC_SUCCESS) {
14524 			return;
14525 		}
14526 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14527 	} else {
14528 		mutex_exit(&ptgt->tgt_mutex);
14529 		mutex_exit(&pptr->port_mutex);
14530 	}
14531 
14532 	fcp_queue_pkt(pptr, cmd);
14533 }
14534 
14535 
14536 static void
14537 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14538 {
14539 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14540 
14541 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14542 	cmd->cmd_state = FCP_PKT_IDLE;
14543 
14544 	cmd->cmd_pkt->pkt_reason = reason;
14545 	cmd->cmd_pkt->pkt_state = 0;
14546 	cmd->cmd_pkt->pkt_statistics = statistics;
14547 
14548 	fcp_post_callback(cmd);
14549 }
14550 
14551 /*
14552  *     Function: fcp_queue_pkt
14553  *
14554  *  Description: This function queues the packet passed by the caller into
14555  *		 the list of packets of the FCP port.
14556  *
14557  *     Argument: *pptr		FCP port.
14558  *		 *cmd		FCP packet to queue.
14559  *
14560  * Return Value: None
14561  *
14562  *	Context: User, Kernel and Interrupt context.
14563  */
14564 static void
14565 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14566 {
14567 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14568 
14569 	mutex_enter(&pptr->port_pkt_mutex);
14570 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14571 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14572 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14573 
14574 	/*
14575 	 * zero pkt_time means hang around for ever
14576 	 */
14577 	if (cmd->cmd_pkt->pkt_time) {
14578 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14579 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14580 		} else {
14581 			/*
14582 			 * Indicate the watch thread to fail the
14583 			 * command by setting it to highest value
14584 			 */
14585 			cmd->cmd_timeout = fcp_watchdog_time;
14586 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14587 		}
14588 	}
14589 
14590 	if (pptr->port_pkt_head) {
14591 		ASSERT(pptr->port_pkt_tail != NULL);
14592 
14593 		pptr->port_pkt_tail->cmd_next = cmd;
14594 		pptr->port_pkt_tail = cmd;
14595 	} else {
14596 		ASSERT(pptr->port_pkt_tail == NULL);
14597 
14598 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14599 	}
14600 	cmd->cmd_next = NULL;
14601 	mutex_exit(&pptr->port_pkt_mutex);
14602 }
14603 
14604 /*
14605  *     Function: fcp_update_targets
14606  *
14607  *  Description: This function applies the specified change of state to all
14608  *		 the targets listed.  The operation applied is 'set'.
14609  *
14610  *     Argument: *pptr		FCP port.
14611  *		 *dev_list	Array of fc_portmap_t structures.
14612  *		 count		Length of dev_list.
14613  *		 state		State bits to update.
14614  *		 cause		Reason for the update.
14615  *
14616  * Return Value: None
14617  *
14618  *	Context: User, Kernel and Interrupt context.
14619  *		 The mutex pptr->port_mutex must be held.
14620  */
14621 static void
14622 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14623     uint32_t count, uint32_t state, int cause)
14624 {
14625 	fc_portmap_t		*map_entry;
14626 	struct fcp_tgt	*ptgt;
14627 
14628 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14629 
14630 	while (count--) {
14631 		map_entry = &(dev_list[count]);
14632 		ptgt = fcp_lookup_target(pptr,
14633 		    (uchar_t *)&(map_entry->map_pwwn));
14634 		if (ptgt == NULL) {
14635 			continue;
14636 		}
14637 
14638 		mutex_enter(&ptgt->tgt_mutex);
14639 		ptgt->tgt_trace = 0;
14640 		ptgt->tgt_change_cnt++;
14641 		ptgt->tgt_statec_cause = cause;
14642 		ptgt->tgt_tmp_cnt = 1;
14643 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14644 		mutex_exit(&ptgt->tgt_mutex);
14645 	}
14646 }
14647 
14648 static int
14649 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14650     int lcount, int tcount, int cause)
14651 {
14652 	int rval;
14653 
14654 	mutex_enter(&pptr->port_mutex);
14655 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14656 	mutex_exit(&pptr->port_mutex);
14657 
14658 	return (rval);
14659 }
14660 
14661 
14662 static int
14663 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14664     int lcount, int tcount, int cause)
14665 {
14666 	int	finish_init = 0;
14667 	int	finish_tgt = 0;
14668 	int	do_finish_init = 0;
14669 	int	rval = FCP_NO_CHANGE;
14670 
14671 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14672 	    cause == FCP_CAUSE_LINK_DOWN) {
14673 		do_finish_init = 1;
14674 	}
14675 
14676 	if (ptgt != NULL) {
14677 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14678 		    FCP_BUF_LEVEL_2, 0,
14679 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14680 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14681 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14682 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14683 		    ptgt->tgt_d_id, ptgt->tgt_done);
14684 
14685 		mutex_enter(&ptgt->tgt_mutex);
14686 
14687 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14688 			rval = FCP_DEV_CHANGE;
14689 			if (do_finish_init && ptgt->tgt_done == 0) {
14690 				ptgt->tgt_done++;
14691 				finish_init = 1;
14692 			}
14693 		} else {
14694 			if (--ptgt->tgt_tmp_cnt <= 0) {
14695 				ptgt->tgt_tmp_cnt = 0;
14696 				finish_tgt = 1;
14697 
14698 				if (do_finish_init) {
14699 					finish_init = 1;
14700 				}
14701 			}
14702 		}
14703 		mutex_exit(&ptgt->tgt_mutex);
14704 	} else {
14705 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14706 		    FCP_BUF_LEVEL_2, 0,
14707 		    "Call Finish Init for NO target");
14708 
14709 		if (do_finish_init) {
14710 			finish_init = 1;
14711 		}
14712 	}
14713 
14714 	if (finish_tgt) {
14715 		ASSERT(ptgt != NULL);
14716 
14717 		mutex_enter(&ptgt->tgt_mutex);
14718 #ifdef	DEBUG
14719 		bzero(ptgt->tgt_tmp_cnt_stack,
14720 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14721 
14722 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14723 		    FCP_STACK_DEPTH);
14724 #endif /* DEBUG */
14725 		mutex_exit(&ptgt->tgt_mutex);
14726 
14727 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14728 	}
14729 
14730 	if (finish_init && lcount == pptr->port_link_cnt) {
14731 		ASSERT(pptr->port_tmp_cnt > 0);
14732 		if (--pptr->port_tmp_cnt == 0) {
14733 			fcp_finish_init(pptr);
14734 		}
14735 	} else if (lcount != pptr->port_link_cnt) {
14736 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14737 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14738 		    "fcp_call_finish_init_held,1: state change occured"
14739 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14740 	}
14741 
14742 	return (rval);
14743 }
14744 
14745 static void
14746 fcp_reconfigure_luns(void * tgt_handle)
14747 {
14748 	uint32_t		dev_cnt;
14749 	fc_portmap_t		*devlist;
14750 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14751 	struct fcp_port		*pptr = ptgt->tgt_port;
14752 
14753 	/*
14754 	 * If the timer that fires this off got canceled too late, the
14755 	 * target could have been destroyed.
14756 	 */
14757 
14758 	if (ptgt->tgt_tid == NULL) {
14759 		return;
14760 	}
14761 
14762 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14763 	if (devlist == NULL) {
14764 		fcp_log(CE_WARN, pptr->port_dip,
14765 		    "!fcp%d: failed to allocate for portmap",
14766 		    pptr->port_instance);
14767 		return;
14768 	}
14769 
14770 	dev_cnt = 1;
14771 	devlist->map_pd = ptgt->tgt_pd_handle;
14772 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14773 	devlist->map_did.port_id = ptgt->tgt_d_id;
14774 
14775 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14776 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14777 
14778 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14779 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14780 	devlist->map_flags = 0;
14781 
14782 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14783 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14784 
14785 	/*
14786 	 * Clear the tgt_tid after no more references to
14787 	 * the fcp_tgt
14788 	 */
14789 	mutex_enter(&ptgt->tgt_mutex);
14790 	ptgt->tgt_tid = NULL;
14791 	mutex_exit(&ptgt->tgt_mutex);
14792 
14793 	kmem_free(devlist, sizeof (*devlist));
14794 }
14795 
14796 
14797 static void
14798 fcp_free_targets(struct fcp_port *pptr)
14799 {
14800 	int			i;
14801 	struct fcp_tgt	*ptgt;
14802 
14803 	mutex_enter(&pptr->port_mutex);
14804 	for (i = 0; i < FCP_NUM_HASH; i++) {
14805 		ptgt = pptr->port_tgt_hash_table[i];
14806 		while (ptgt != NULL) {
14807 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14808 
14809 			fcp_free_target(ptgt);
14810 			ptgt = next_tgt;
14811 		}
14812 	}
14813 	mutex_exit(&pptr->port_mutex);
14814 }
14815 
14816 
14817 static void
14818 fcp_free_target(struct fcp_tgt *ptgt)
14819 {
14820 	struct fcp_lun	*plun;
14821 	timeout_id_t		tid;
14822 
14823 	mutex_enter(&ptgt->tgt_mutex);
14824 	tid = ptgt->tgt_tid;
14825 
14826 	/*
14827 	 * Cancel any pending timeouts for this target.
14828 	 */
14829 
14830 	if (tid != NULL) {
14831 		/*
14832 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14833 		 * If tgt_tid is NULL, the callback will simply return.
14834 		 */
14835 		ptgt->tgt_tid = NULL;
14836 		mutex_exit(&ptgt->tgt_mutex);
14837 		(void) untimeout(tid);
14838 		mutex_enter(&ptgt->tgt_mutex);
14839 	}
14840 
14841 	plun = ptgt->tgt_lun;
14842 	while (plun != NULL) {
14843 		struct fcp_lun *next_lun = plun->lun_next;
14844 
14845 		fcp_dealloc_lun(plun);
14846 		plun = next_lun;
14847 	}
14848 
14849 	mutex_exit(&ptgt->tgt_mutex);
14850 	fcp_dealloc_tgt(ptgt);
14851 }
14852 
14853 /*
14854  *     Function: fcp_is_retryable
14855  *
14856  *  Description: Indicates if the internal packet is retryable.
14857  *
14858  *     Argument: *icmd		FCP internal packet.
14859  *
14860  * Return Value: 0	Not retryable
14861  *		 1	Retryable
14862  *
14863  *	Context: User, Kernel and Interrupt context
14864  */
14865 static int
14866 fcp_is_retryable(struct fcp_ipkt *icmd)
14867 {
14868 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14869 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14870 		return (0);
14871 	}
14872 
14873 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14874 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14875 }
14876 
14877 /*
14878  *     Function: fcp_create_on_demand
14879  *
14880  *     Argument: *pptr		FCP port.
14881  *		 *pwwn		Port WWN.
14882  *
14883  * Return Value: 0	Success
14884  *		 EIO
14885  *		 ENOMEM
14886  *		 EBUSY
14887  *		 EINVAL
14888  *
14889  *	Context: User and Kernel context
14890  */
14891 static int
14892 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14893 {
14894 	int			wait_ms;
14895 	int			tcount;
14896 	int			lcount;
14897 	int			ret;
14898 	int			error;
14899 	int			rval = EIO;
14900 	int			ntries;
14901 	fc_portmap_t		*devlist;
14902 	opaque_t		pd;
14903 	struct fcp_lun		*plun;
14904 	struct fcp_tgt		*ptgt;
14905 	int			old_manual = 0;
14906 
14907 	/* Allocates the fc_portmap_t structure. */
14908 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14909 
14910 	/*
14911 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14912 	 * in the commented statement below:
14913 	 *
14914 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14915 	 *
14916 	 * Below, the deadline for the discovery process is set.
14917 	 */
14918 	mutex_enter(&pptr->port_mutex);
14919 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14920 	mutex_exit(&pptr->port_mutex);
14921 
14922 	/*
14923 	 * We try to find the remote port based on the WWN provided by the
14924 	 * caller.  We actually ask fp/fctl if it has it.
14925 	 */
14926 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14927 	    (la_wwn_t *)pwwn, &error, 1);
14928 
14929 	if (pd == NULL) {
14930 		kmem_free(devlist, sizeof (*devlist));
14931 		return (rval);
14932 	}
14933 
14934 	/*
14935 	 * The remote port was found.  We ask fp/fctl to update our
14936 	 * fc_portmap_t structure.
14937 	 */
14938 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14939 	    (la_wwn_t *)pwwn, devlist);
14940 	if (ret != FC_SUCCESS) {
14941 		kmem_free(devlist, sizeof (*devlist));
14942 		return (rval);
14943 	}
14944 
14945 	/*
14946 	 * The map flag field is set to indicates that the creation is being
14947 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14948 	 */
14949 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14950 
14951 	mutex_enter(&pptr->port_mutex);
14952 
14953 	/*
14954 	 * We check to see if fcp already has a target that describes the
14955 	 * device being created.  If not it is created.
14956 	 */
14957 	ptgt = fcp_lookup_target(pptr, pwwn);
14958 	if (ptgt == NULL) {
14959 		lcount = pptr->port_link_cnt;
14960 		mutex_exit(&pptr->port_mutex);
14961 
14962 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14963 		if (ptgt == NULL) {
14964 			fcp_log(CE_WARN, pptr->port_dip,
14965 			    "!FC target allocation failed");
14966 			return (ENOMEM);
14967 		}
14968 
14969 		mutex_enter(&pptr->port_mutex);
14970 	}
14971 
14972 	mutex_enter(&ptgt->tgt_mutex);
14973 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14974 	ptgt->tgt_tmp_cnt = 1;
14975 	ptgt->tgt_device_created = 0;
14976 	/*
14977 	 * If fabric and auto config is set but the target was
14978 	 * manually unconfigured then reset to the manual_config_only to
14979 	 * 0 so the device will get configured.
14980 	 */
14981 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14982 	    fcp_enable_auto_configuration &&
14983 	    ptgt->tgt_manual_config_only == 1) {
14984 		old_manual = 1;
14985 		ptgt->tgt_manual_config_only = 0;
14986 	}
14987 	mutex_exit(&ptgt->tgt_mutex);
14988 
14989 	fcp_update_targets(pptr, devlist, 1,
14990 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
14991 
14992 	lcount = pptr->port_link_cnt;
14993 	tcount = ptgt->tgt_change_cnt;
14994 
14995 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
14996 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
14997 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14998 		    fcp_enable_auto_configuration && old_manual) {
14999 			mutex_enter(&ptgt->tgt_mutex);
15000 			ptgt->tgt_manual_config_only = 1;
15001 			mutex_exit(&ptgt->tgt_mutex);
15002 		}
15003 
15004 		if (pptr->port_link_cnt != lcount ||
15005 		    ptgt->tgt_change_cnt != tcount) {
15006 			rval = EBUSY;
15007 		}
15008 		mutex_exit(&pptr->port_mutex);
15009 
15010 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15011 		    FCP_BUF_LEVEL_3, 0,
15012 		    "fcp_create_on_demand: mapflags ptgt=%x, "
15013 		    "lcount=%x::port_link_cnt=%x, "
15014 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
15015 		    ptgt, lcount, pptr->port_link_cnt,
15016 		    tcount, ptgt->tgt_change_cnt, rval);
15017 		return (rval);
15018 	}
15019 
15020 	/*
15021 	 * Due to lack of synchronization mechanisms, we perform
15022 	 * periodic monitoring of our request; Because requests
15023 	 * get dropped when another one supercedes (either because
15024 	 * of a link change or a target change), it is difficult to
15025 	 * provide a clean synchronization mechanism (such as a
15026 	 * semaphore or a conditional variable) without exhaustively
15027 	 * rewriting the mainline discovery code of this driver.
15028 	 */
15029 	wait_ms = 500;
15030 
15031 	ntries = fcp_max_target_retries;
15032 
15033 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15034 	    FCP_BUF_LEVEL_3, 0,
15035 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15036 	    "lcount=%x::port_link_cnt=%x, "
15037 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15038 	    "tgt_tmp_cnt =%x",
15039 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15040 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15041 	    ptgt->tgt_tmp_cnt);
15042 
15043 	mutex_enter(&ptgt->tgt_mutex);
15044 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15045 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15046 		mutex_exit(&ptgt->tgt_mutex);
15047 		mutex_exit(&pptr->port_mutex);
15048 
15049 		delay(drv_usectohz(wait_ms * 1000));
15050 
15051 		mutex_enter(&pptr->port_mutex);
15052 		mutex_enter(&ptgt->tgt_mutex);
15053 	}
15054 
15055 
15056 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15057 		rval = EBUSY;
15058 	} else {
15059 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15060 		    FCP_TGT_NODE_PRESENT) {
15061 			rval = 0;
15062 		}
15063 	}
15064 
15065 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15066 	    FCP_BUF_LEVEL_3, 0,
15067 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15068 	    "lcount=%x::port_link_cnt=%x, "
15069 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15070 	    "tgt_tmp_cnt =%x",
15071 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15072 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15073 	    ptgt->tgt_tmp_cnt);
15074 
15075 	if (rval) {
15076 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15077 		    fcp_enable_auto_configuration && old_manual) {
15078 			ptgt->tgt_manual_config_only = 1;
15079 		}
15080 		mutex_exit(&ptgt->tgt_mutex);
15081 		mutex_exit(&pptr->port_mutex);
15082 		kmem_free(devlist, sizeof (*devlist));
15083 
15084 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15085 		    FCP_BUF_LEVEL_3, 0,
15086 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15087 		    "lcount=%x::port_link_cnt=%x, "
15088 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15089 		    "tgt_device_created=%x, tgt D_ID=%x",
15090 		    ntries, ptgt, lcount, pptr->port_link_cnt,
15091 		    tcount, ptgt->tgt_change_cnt, rval,
15092 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
15093 		return (rval);
15094 	}
15095 
15096 	if ((plun = ptgt->tgt_lun) != NULL) {
15097 		tcount = plun->lun_tgt->tgt_change_cnt;
15098 	} else {
15099 		rval = EINVAL;
15100 	}
15101 	lcount = pptr->port_link_cnt;
15102 
15103 	/*
15104 	 * Configuring the target with no LUNs will fail. We
15105 	 * should reset the node state so that it is not
15106 	 * automatically configured when the LUNs are added
15107 	 * to this target.
15108 	 */
15109 	if (ptgt->tgt_lun_cnt == 0) {
15110 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15111 	}
15112 	mutex_exit(&ptgt->tgt_mutex);
15113 	mutex_exit(&pptr->port_mutex);
15114 
15115 	while (plun) {
15116 		child_info_t	*cip;
15117 
15118 		mutex_enter(&plun->lun_mutex);
15119 		cip = plun->lun_cip;
15120 		mutex_exit(&plun->lun_mutex);
15121 
15122 		mutex_enter(&ptgt->tgt_mutex);
15123 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15124 			mutex_exit(&ptgt->tgt_mutex);
15125 
15126 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15127 			    FCP_ONLINE, lcount, tcount,
15128 			    NDI_ONLINE_ATTACH);
15129 			if (rval != NDI_SUCCESS) {
15130 				FCP_TRACE(fcp_logq,
15131 				    pptr->port_instbuf, fcp_trace,
15132 				    FCP_BUF_LEVEL_3, 0,
15133 				    "fcp_create_on_demand: "
15134 				    "pass_to_hp_and_wait failed "
15135 				    "rval=%x", rval);
15136 				rval = EIO;
15137 			} else {
15138 				mutex_enter(&LUN_TGT->tgt_mutex);
15139 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
15140 				    FCP_LUN_BUSY);
15141 				mutex_exit(&LUN_TGT->tgt_mutex);
15142 			}
15143 			mutex_enter(&ptgt->tgt_mutex);
15144 		}
15145 
15146 		plun = plun->lun_next;
15147 		mutex_exit(&ptgt->tgt_mutex);
15148 	}
15149 
15150 	kmem_free(devlist, sizeof (*devlist));
15151 
15152 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15153 	    fcp_enable_auto_configuration && old_manual) {
15154 		mutex_enter(&ptgt->tgt_mutex);
15155 		/* if successful then set manual to 0 */
15156 		if (rval == 0) {
15157 			ptgt->tgt_manual_config_only = 0;
15158 		} else {
15159 			/* reset to 1 so the user has to do the config */
15160 			ptgt->tgt_manual_config_only = 1;
15161 		}
15162 		mutex_exit(&ptgt->tgt_mutex);
15163 	}
15164 
15165 	return (rval);
15166 }
15167 
15168 
15169 static void
15170 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15171 {
15172 	int		count;
15173 	uchar_t		byte;
15174 
15175 	count = 0;
15176 	while (*string) {
15177 		byte = FCP_ATOB(*string); string++;
15178 		byte = byte << 4 | FCP_ATOB(*string); string++;
15179 		bytes[count++] = byte;
15180 
15181 		if (count >= byte_len) {
15182 			break;
15183 		}
15184 	}
15185 }
15186 
15187 static void
15188 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15189 {
15190 	int		i;
15191 
15192 	for (i = 0; i < FC_WWN_SIZE; i++) {
15193 		(void) sprintf(string + (i * 2),
15194 		    "%02x", wwn[i]);
15195 	}
15196 
15197 }
15198 
15199 static void
15200 fcp_print_error(fc_packet_t *fpkt)
15201 {
15202 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15203 	    fpkt->pkt_ulp_private;
15204 	struct fcp_port	*pptr;
15205 	struct fcp_tgt	*ptgt;
15206 	struct fcp_lun	*plun;
15207 	caddr_t			buf;
15208 	int			scsi_cmd = 0;
15209 
15210 	ptgt = icmd->ipkt_tgt;
15211 	plun = icmd->ipkt_lun;
15212 	pptr = ptgt->tgt_port;
15213 
15214 	buf = kmem_zalloc(256, KM_NOSLEEP);
15215 	if (buf == NULL) {
15216 		return;
15217 	}
15218 
15219 	switch (icmd->ipkt_opcode) {
15220 	case SCMD_REPORT_LUN:
15221 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15222 		    " lun=0x%%x failed");
15223 		scsi_cmd++;
15224 		break;
15225 
15226 	case SCMD_INQUIRY_PAGE83:
15227 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15228 		    " lun=0x%%x failed");
15229 		scsi_cmd++;
15230 		break;
15231 
15232 	case SCMD_INQUIRY:
15233 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15234 		    " lun=0x%%x failed");
15235 		scsi_cmd++;
15236 		break;
15237 
15238 	case LA_ELS_PLOGI:
15239 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15240 		break;
15241 
15242 	case LA_ELS_PRLI:
15243 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15244 		break;
15245 	}
15246 
15247 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15248 		struct fcp_rsp		response, *rsp;
15249 		uchar_t			asc, ascq;
15250 		caddr_t			sense_key = NULL;
15251 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15252 
15253 		if (icmd->ipkt_nodma) {
15254 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15255 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15256 			    sizeof (struct fcp_rsp));
15257 		} else {
15258 			rsp = &response;
15259 			bep = &fcp_rsp_err;
15260 
15261 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15262 			    sizeof (struct fcp_rsp));
15263 
15264 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15265 			    bep, fpkt->pkt_resp_acc,
15266 			    sizeof (struct fcp_rsp_info));
15267 		}
15268 
15269 
15270 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15271 			(void) sprintf(buf + strlen(buf),
15272 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15273 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15274 			    " senselen=%%x. Giving up");
15275 
15276 			fcp_log(CE_WARN, pptr->port_dip, buf,
15277 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15278 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15279 			    rsp->fcp_u.fcp_status.reserved_1,
15280 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15281 
15282 			kmem_free(buf, 256);
15283 			return;
15284 		}
15285 
15286 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15287 		    bep->rsp_code != FCP_NO_FAILURE) {
15288 			(void) sprintf(buf + strlen(buf),
15289 			    " FCP Response code = 0x%x", bep->rsp_code);
15290 		}
15291 
15292 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15293 			struct scsi_extended_sense sense_info, *sense_ptr;
15294 
15295 			if (icmd->ipkt_nodma) {
15296 				sense_ptr = (struct scsi_extended_sense *)
15297 				    ((caddr_t)fpkt->pkt_resp +
15298 				    sizeof (struct fcp_rsp) +
15299 				    rsp->fcp_response_len);
15300 			} else {
15301 				sense_ptr = &sense_info;
15302 
15303 				FCP_CP_IN(fpkt->pkt_resp +
15304 				    sizeof (struct fcp_rsp) +
15305 				    rsp->fcp_response_len, &sense_info,
15306 				    fpkt->pkt_resp_acc,
15307 				    sizeof (struct scsi_extended_sense));
15308 			}
15309 
15310 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15311 			    NUM_IMPL_SENSE_KEYS) {
15312 				sense_key = sense_keys[sense_ptr->es_key];
15313 			} else {
15314 				sense_key = "Undefined";
15315 			}
15316 
15317 			asc = sense_ptr->es_add_code;
15318 			ascq = sense_ptr->es_qual_code;
15319 
15320 			(void) sprintf(buf + strlen(buf),
15321 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15322 			    " Giving up");
15323 
15324 			fcp_log(CE_WARN, pptr->port_dip, buf,
15325 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15326 			    asc, ascq);
15327 		} else {
15328 			(void) sprintf(buf + strlen(buf),
15329 			    " : SCSI status=%%x. Giving up");
15330 
15331 			fcp_log(CE_WARN, pptr->port_dip, buf,
15332 			    ptgt->tgt_d_id, plun->lun_num,
15333 			    rsp->fcp_u.fcp_status.scsi_status);
15334 		}
15335 	} else {
15336 		caddr_t state, reason, action, expln;
15337 
15338 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15339 		    &action, &expln);
15340 
15341 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15342 		    " Reason:%%s. Giving up");
15343 
15344 		if (scsi_cmd) {
15345 			fcp_log(CE_WARN, pptr->port_dip, buf,
15346 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15347 		} else {
15348 			fcp_log(CE_WARN, pptr->port_dip, buf,
15349 			    ptgt->tgt_d_id, state, reason);
15350 		}
15351 	}
15352 
15353 	kmem_free(buf, 256);
15354 }
15355 
15356 
15357 static int
15358 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15359     struct fcp_ipkt *icmd, int rval, caddr_t op)
15360 {
15361 	int	ret = DDI_FAILURE;
15362 	char	*error;
15363 
15364 	switch (rval) {
15365 	case FC_DEVICE_BUSY_NEW_RSCN:
15366 		/*
15367 		 * This means that there was a new RSCN that the transport
15368 		 * knows about (which the ULP *may* know about too) but the
15369 		 * pkt that was sent down was related to an older RSCN. So, we
15370 		 * are just going to reset the retry count and deadline and
15371 		 * continue to retry. The idea is that transport is currently
15372 		 * working on the new RSCN and will soon let the ULPs know
15373 		 * about it and when it does the existing logic will kick in
15374 		 * where it will change the tcount to indicate that something
15375 		 * changed on the target. So, rediscovery will start and there
15376 		 * will not be an infinite retry.
15377 		 *
15378 		 * For a full flow of how the RSCN info is transferred back and
15379 		 * forth, see fp.c
15380 		 */
15381 		icmd->ipkt_retries = 0;
15382 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15383 		    FCP_ICMD_DEADLINE;
15384 
15385 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15386 		    FCP_BUF_LEVEL_3, 0,
15387 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15388 		    rval, ptgt->tgt_d_id);
15389 		/* FALLTHROUGH */
15390 
15391 	case FC_STATEC_BUSY:
15392 	case FC_DEVICE_BUSY:
15393 	case FC_PBUSY:
15394 	case FC_FBUSY:
15395 	case FC_TRAN_BUSY:
15396 	case FC_OFFLINE:
15397 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15398 		    FCP_BUF_LEVEL_3, 0,
15399 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15400 		    rval, ptgt->tgt_d_id);
15401 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15402 		    fcp_is_retryable(icmd)) {
15403 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15404 			ret = DDI_SUCCESS;
15405 		}
15406 		break;
15407 
15408 	case FC_LOGINREQ:
15409 		/*
15410 		 * FC_LOGINREQ used to be handled just like all the cases
15411 		 * above. It has been changed to handled a PRLI that fails
15412 		 * with FC_LOGINREQ different than other ipkts that fail
15413 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15414 		 * a simple matter to turn it into a PLOGI instead, so that's
15415 		 * exactly what we do here.
15416 		 */
15417 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15418 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15419 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15420 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15421 		} else {
15422 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15423 			    FCP_BUF_LEVEL_3, 0,
15424 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15425 			    rval, ptgt->tgt_d_id);
15426 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15427 			    fcp_is_retryable(icmd)) {
15428 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15429 				ret = DDI_SUCCESS;
15430 			}
15431 		}
15432 		break;
15433 
15434 	default:
15435 		mutex_enter(&pptr->port_mutex);
15436 		mutex_enter(&ptgt->tgt_mutex);
15437 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15438 			mutex_exit(&ptgt->tgt_mutex);
15439 			mutex_exit(&pptr->port_mutex);
15440 
15441 			(void) fc_ulp_error(rval, &error);
15442 			fcp_log(CE_WARN, pptr->port_dip,
15443 			    "!Failed to send %s to D_ID=%x error=%s",
15444 			    op, ptgt->tgt_d_id, error);
15445 		} else {
15446 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15447 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15448 			    "fcp_handle_ipkt_errors,1: state change occured"
15449 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15450 			mutex_exit(&ptgt->tgt_mutex);
15451 			mutex_exit(&pptr->port_mutex);
15452 		}
15453 		break;
15454 	}
15455 
15456 	return (ret);
15457 }
15458 
15459 
15460 /*
15461  * Check of outstanding commands on any LUN for this target
15462  */
15463 static int
15464 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15465 {
15466 	struct	fcp_lun	*plun;
15467 	struct	fcp_pkt	*cmd;
15468 
15469 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15470 		mutex_enter(&plun->lun_mutex);
15471 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15472 		    cmd = cmd->cmd_forw) {
15473 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15474 				mutex_exit(&plun->lun_mutex);
15475 				return (FC_SUCCESS);
15476 			}
15477 		}
15478 		mutex_exit(&plun->lun_mutex);
15479 	}
15480 
15481 	return (FC_FAILURE);
15482 }
15483 
15484 static fc_portmap_t *
15485 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15486 {
15487 	int			i;
15488 	fc_portmap_t		*devlist;
15489 	fc_portmap_t		*devptr = NULL;
15490 	struct fcp_tgt	*ptgt;
15491 
15492 	mutex_enter(&pptr->port_mutex);
15493 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15494 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15495 		    ptgt = ptgt->tgt_next) {
15496 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15497 				++*dev_cnt;
15498 			}
15499 		}
15500 	}
15501 
15502 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15503 	    KM_NOSLEEP);
15504 	if (devlist == NULL) {
15505 		mutex_exit(&pptr->port_mutex);
15506 		fcp_log(CE_WARN, pptr->port_dip,
15507 		    "!fcp%d: failed to allocate for portmap for construct map",
15508 		    pptr->port_instance);
15509 		return (devptr);
15510 	}
15511 
15512 	for (i = 0; i < FCP_NUM_HASH; i++) {
15513 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15514 		    ptgt = ptgt->tgt_next) {
15515 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15516 				int ret;
15517 
15518 				ret = fc_ulp_pwwn_to_portmap(
15519 				    pptr->port_fp_handle,
15520 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15521 				    devlist);
15522 
15523 				if (ret == FC_SUCCESS) {
15524 					devlist++;
15525 					continue;
15526 				}
15527 
15528 				devlist->map_pd = NULL;
15529 				devlist->map_did.port_id = ptgt->tgt_d_id;
15530 				devlist->map_hard_addr.hard_addr =
15531 				    ptgt->tgt_hard_addr;
15532 
15533 				devlist->map_state = PORT_DEVICE_INVALID;
15534 				devlist->map_type = PORT_DEVICE_OLD;
15535 
15536 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15537 				    &devlist->map_nwwn, FC_WWN_SIZE);
15538 
15539 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15540 				    &devlist->map_pwwn, FC_WWN_SIZE);
15541 
15542 				devlist++;
15543 			}
15544 		}
15545 	}
15546 
15547 	mutex_exit(&pptr->port_mutex);
15548 
15549 	return (devptr);
15550 }
15551 /*
15552  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15553  */
15554 static void
15555 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15556 {
15557 	int i;
15558 	struct fcp_tgt	*ptgt;
15559 	struct fcp_lun	*plun;
15560 
15561 	for (i = 0; i < FCP_NUM_HASH; i++) {
15562 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15563 		    ptgt = ptgt->tgt_next) {
15564 			mutex_enter(&ptgt->tgt_mutex);
15565 			for (plun = ptgt->tgt_lun; plun != NULL;
15566 			    plun = plun->lun_next) {
15567 				if (plun->lun_mpxio &&
15568 				    plun->lun_state & FCP_LUN_BUSY) {
15569 					if (!fcp_pass_to_hp(pptr, plun,
15570 					    plun->lun_cip,
15571 					    FCP_MPXIO_PATH_SET_BUSY,
15572 					    pptr->port_link_cnt,
15573 					    ptgt->tgt_change_cnt, 0, 0)) {
15574 						FCP_TRACE(fcp_logq,
15575 						    pptr->port_instbuf,
15576 						    fcp_trace,
15577 						    FCP_BUF_LEVEL_2, 0,
15578 						    "path_verifybusy: "
15579 						    "disable lun %p failed!",
15580 						    plun);
15581 					}
15582 				}
15583 			}
15584 			mutex_exit(&ptgt->tgt_mutex);
15585 		}
15586 	}
15587 }
15588 
15589 static int
15590 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15591 {
15592 	dev_info_t		*cdip = NULL;
15593 	dev_info_t		*pdip = NULL;
15594 
15595 	ASSERT(plun);
15596 
15597 	mutex_enter(&plun->lun_mutex);
15598 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15599 		mutex_exit(&plun->lun_mutex);
15600 		return (NDI_FAILURE);
15601 	}
15602 	mutex_exit(&plun->lun_mutex);
15603 	cdip = mdi_pi_get_client(PIP(cip));
15604 	pdip = mdi_pi_get_phci(PIP(cip));
15605 
15606 	ASSERT(cdip != NULL);
15607 	ASSERT(pdip != NULL);
15608 
15609 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15610 		/* LUN ready for IO */
15611 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15612 	} else {
15613 		/* LUN busy to accept IO */
15614 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15615 	}
15616 	return (NDI_SUCCESS);
15617 }
15618 
15619 /*
15620  * Caller must free the returned string of MAXPATHLEN len
15621  * If the device is offline (-1 instance number) NULL
15622  * will be returned.
15623  */
15624 static char *
15625 fcp_get_lun_path(struct fcp_lun *plun) {
15626 	dev_info_t	*dip = NULL;
15627 	char	*path = NULL;
15628 	if (plun == NULL) {
15629 		return (NULL);
15630 	}
15631 	if (plun->lun_mpxio == 0) {
15632 		dip = DIP(plun->lun_cip);
15633 	} else {
15634 		dip = mdi_pi_get_client(PIP(plun->lun_cip));
15635 	}
15636 	if (dip == NULL) {
15637 		return (NULL);
15638 	}
15639 	if (ddi_get_instance(dip) < 0) {
15640 		return (NULL);
15641 	}
15642 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15643 	if (path == NULL) {
15644 		return (NULL);
15645 	}
15646 
15647 	(void) ddi_pathname(dip, path);
15648 	/*
15649 	 * In reality, the user wants a fully valid path (one they can open)
15650 	 * but this string is lacking the mount point, and the minor node.
15651 	 * It would be nice if we could "figure these out" somehow
15652 	 * and fill them in.  Otherwise, the userland code has to understand
15653 	 * driver specific details of which minor node is the "best" or
15654 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15655 	 * which tape doesn't rewind)
15656 	 */
15657 	return (path);
15658 }
15659 
15660 static int
15661 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15662     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15663 {
15664 	int64_t reset_delay;
15665 	int rval, retry = 0;
15666 	struct fcp_port *pptr = fcp_dip2port(parent);
15667 
15668 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15669 	    (ddi_get_lbolt64() - pptr->port_attach_time);
15670 	if (reset_delay < 0) {
15671 		reset_delay = 0;
15672 	}
15673 
15674 	if (fcp_bus_config_debug) {
15675 		flag |= NDI_DEVI_DEBUG;
15676 	}
15677 
15678 	switch (op) {
15679 	case BUS_CONFIG_ONE:
15680 		/*
15681 		 * Retry the command since we need to ensure
15682 		 * the fabric devices are available for root
15683 		 */
15684 		while (retry++ < fcp_max_bus_config_retries) {
15685 			rval =	(ndi_busop_bus_config(parent,
15686 			    flag | NDI_MDI_FALLBACK, op,
15687 			    arg, childp, (clock_t)reset_delay));
15688 			if (rval == 0) {
15689 				return (rval);
15690 			}
15691 		}
15692 
15693 		/*
15694 		 * drain taskq to make sure nodes are created and then
15695 		 * try again.
15696 		 */
15697 		taskq_wait(DEVI(parent)->devi_taskq);
15698 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15699 		    op, arg, childp, 0));
15700 
15701 	case BUS_CONFIG_DRIVER:
15702 	case BUS_CONFIG_ALL: {
15703 		/*
15704 		 * delay till all devices report in (port_tmp_cnt == 0)
15705 		 * or FCP_INIT_WAIT_TIMEOUT
15706 		 */
15707 		mutex_enter(&pptr->port_mutex);
15708 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15709 			(void) cv_timedwait(&pptr->port_config_cv,
15710 			    &pptr->port_mutex,
15711 			    ddi_get_lbolt() + (clock_t)reset_delay);
15712 			reset_delay =
15713 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15714 			    (ddi_get_lbolt64() - pptr->port_attach_time);
15715 		}
15716 		mutex_exit(&pptr->port_mutex);
15717 		/* drain taskq to make sure nodes are created */
15718 		taskq_wait(DEVI(parent)->devi_taskq);
15719 		return (ndi_busop_bus_config(parent, flag, op,
15720 		    arg, childp, 0));
15721 	}
15722 
15723 	default:
15724 		return (NDI_FAILURE);
15725 	}
15726 	/*NOTREACHED*/
15727 }
15728 
15729 static int
15730 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15731     ddi_bus_config_op_t op, void *arg)
15732 {
15733 	if (fcp_bus_config_debug) {
15734 		flag |= NDI_DEVI_DEBUG;
15735 	}
15736 
15737 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15738 }
15739 
15740 
15741 /*
15742  * Routine to copy GUID into the lun structure.
15743  * returns 0 if copy was successful and 1 if encountered a
15744  * failure and did not copy the guid.
15745  */
15746 static int
15747 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15748 {
15749 
15750 	int retval = 0;
15751 
15752 	/* add one for the null terminator */
15753 	const unsigned int len = strlen(guidp) + 1;
15754 
15755 	if ((guidp == NULL) || (plun == NULL)) {
15756 		return (1);
15757 	}
15758 
15759 	/*
15760 	 * if the plun->lun_guid already has been allocated,
15761 	 * then check the size. if the size is exact, reuse
15762 	 * it....if not free it an allocate the required size.
15763 	 * The reallocation should NOT typically happen
15764 	 * unless the GUIDs reported changes between passes.
15765 	 * We free up and alloc again even if the
15766 	 * size was more than required. This is due to the
15767 	 * fact that the field lun_guid_size - serves
15768 	 * dual role of indicating the size of the wwn
15769 	 * size and ALSO the allocation size.
15770 	 */
15771 	if (plun->lun_guid) {
15772 		if (plun->lun_guid_size != len) {
15773 			/*
15774 			 * free the allocated memory and
15775 			 * initialize the field
15776 			 * lun_guid_size to 0.
15777 			 */
15778 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15779 			plun->lun_guid = NULL;
15780 			plun->lun_guid_size = 0;
15781 		}
15782 	}
15783 	/*
15784 	 * alloc only if not already done.
15785 	 */
15786 	if (plun->lun_guid == NULL) {
15787 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15788 		if (plun->lun_guid == NULL) {
15789 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15790 			    "Unable to allocate"
15791 			    "Memory for GUID!!! size %d", len);
15792 			retval = 1;
15793 		} else {
15794 			plun->lun_guid_size = len;
15795 		}
15796 	}
15797 	if (plun->lun_guid) {
15798 		/*
15799 		 * now copy the GUID
15800 		 */
15801 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15802 	}
15803 	return (retval);
15804 }
15805 
15806 /*
15807  * fcp_reconfig_wait
15808  *
15809  * Wait for a rediscovery/reconfiguration to complete before continuing.
15810  */
15811 
15812 static void
15813 fcp_reconfig_wait(struct fcp_port *pptr)
15814 {
15815 	clock_t		reconfig_start, wait_timeout;
15816 
15817 	/*
15818 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15819 	 * reconfiguration in progress.
15820 	 */
15821 
15822 	mutex_enter(&pptr->port_mutex);
15823 	if (pptr->port_tmp_cnt == 0) {
15824 		mutex_exit(&pptr->port_mutex);
15825 		return;
15826 	}
15827 	mutex_exit(&pptr->port_mutex);
15828 
15829 	/*
15830 	 * If we cause a reconfig by raising power, delay until all devices
15831 	 * report in (port_tmp_cnt returns to 0)
15832 	 */
15833 
15834 	reconfig_start = ddi_get_lbolt();
15835 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15836 
15837 	mutex_enter(&pptr->port_mutex);
15838 
15839 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15840 	    pptr->port_tmp_cnt) {
15841 
15842 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15843 		    reconfig_start + wait_timeout);
15844 	}
15845 
15846 	mutex_exit(&pptr->port_mutex);
15847 
15848 	/*
15849 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15850 	 * we want may still be ok.  If not, it will error out later
15851 	 */
15852 }
15853 
15854 /*
15855  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15856  * We rely on the fcp_global_mutex to provide protection against changes to
15857  * the fcp_lun_blacklist.
15858  *
15859  * You can describe a list of target port WWNs and LUN numbers which will
15860  * not be configured. LUN numbers will be interpreted as decimal. White
15861  * spaces and ',' can be used in the list of LUN numbers.
15862  *
15863  * To prevent LUNs 1 and 2 from being configured for target
15864  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15865  *
15866  * pwwn-lun-blacklist=
15867  * "510000f010fd92a1,1,2",
15868  * "510000e012079df1,1,2";
15869  */
15870 static void
15871 fcp_read_blacklist(dev_info_t *dip,
15872     struct fcp_black_list_entry **pplun_blacklist) {
15873 	char **prop_array	= NULL;
15874 	char *curr_pwwn		= NULL;
15875 	char *curr_lun		= NULL;
15876 	uint32_t prop_item	= 0;
15877 	int idx			= 0;
15878 	int len			= 0;
15879 
15880 	ASSERT(mutex_owned(&fcp_global_mutex));
15881 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15882 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15883 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15884 		return;
15885 	}
15886 
15887 	for (idx = 0; idx < prop_item; idx++) {
15888 
15889 		curr_pwwn = prop_array[idx];
15890 		while (*curr_pwwn == ' ') {
15891 			curr_pwwn++;
15892 		}
15893 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15894 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15895 			    ", please check.", curr_pwwn);
15896 			continue;
15897 		}
15898 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15899 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15900 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15901 			    ", please check.", curr_pwwn);
15902 			continue;
15903 		}
15904 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15905 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15906 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15907 				    "blacklist, please check.", curr_pwwn);
15908 				break;
15909 			}
15910 		}
15911 		if (len != sizeof (la_wwn_t) * 2) {
15912 			continue;
15913 		}
15914 
15915 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15916 		*(curr_lun - 1) = '\0';
15917 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15918 	}
15919 
15920 	ddi_prop_free(prop_array);
15921 }
15922 
15923 /*
15924  * Get the masking info about one remote target port designated by wwn.
15925  * Lun ids could be separated by ',' or white spaces.
15926  */
15927 static void
15928 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15929     struct fcp_black_list_entry **pplun_blacklist) {
15930 	int		idx			= 0;
15931 	uint32_t	offset			= 0;
15932 	unsigned long	lun_id			= 0;
15933 	char		lunid_buf[16];
15934 	char		*pend			= NULL;
15935 	int		illegal_digit		= 0;
15936 
15937 	while (offset < strlen(curr_lun)) {
15938 		while ((curr_lun[offset + idx] != ',') &&
15939 		    (curr_lun[offset + idx] != '\0') &&
15940 		    (curr_lun[offset + idx] != ' ')) {
15941 			if (isdigit(curr_lun[offset + idx]) == 0) {
15942 				illegal_digit++;
15943 			}
15944 			idx++;
15945 		}
15946 		if (illegal_digit > 0) {
15947 			offset += (idx+1);	/* To the start of next lun */
15948 			idx = 0;
15949 			illegal_digit = 0;
15950 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15951 			    "the blacklist, please check digits.",
15952 			    curr_lun, curr_pwwn);
15953 			continue;
15954 		}
15955 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
15956 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15957 			    "the blacklist, please check the length of LUN#.",
15958 			    curr_lun, curr_pwwn);
15959 			break;
15960 		}
15961 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
15962 		    offset++;
15963 		    continue;
15964 		}
15965 
15966 		bcopy(curr_lun + offset, lunid_buf, idx);
15967 		lunid_buf[idx] = '\0';
15968 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
15969 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
15970 		} else {
15971 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15972 			    "the blacklist, please check %s.",
15973 			    curr_lun, curr_pwwn, lunid_buf);
15974 		}
15975 		offset += (idx+1);	/* To the start of next lun */
15976 		idx = 0;
15977 	}
15978 }
15979 
15980 /*
15981  * Add one masking record
15982  */
15983 static void
15984 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
15985     struct fcp_black_list_entry **pplun_blacklist) {
15986 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
15987 	struct fcp_black_list_entry	*new_entry	= NULL;
15988 	la_wwn_t			wwn;
15989 
15990 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
15991 	while (tmp_entry) {
15992 		if ((bcmp(&tmp_entry->wwn, &wwn,
15993 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
15994 			return;
15995 		}
15996 
15997 		tmp_entry = tmp_entry->next;
15998 	}
15999 
16000 	/* add to black list */
16001 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16002 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16003 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16004 	new_entry->lun = lun_id;
16005 	new_entry->masked = 0;
16006 	new_entry->next = *pplun_blacklist;
16007 	*pplun_blacklist = new_entry;
16008 }
16009 
16010 /*
16011  * Check if we should mask the specified lun of this fcp_tgt
16012  */
16013 static int
16014 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16015 	struct fcp_black_list_entry *remote_port;
16016 
16017 	remote_port = fcp_lun_blacklist;
16018 	while (remote_port != NULL) {
16019 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16020 			if (remote_port->lun == lun_id) {
16021 				remote_port->masked++;
16022 				if (remote_port->masked == 1) {
16023 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
16024 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
16025 					    "is masked due to black listing.\n",
16026 					    lun_id, wwn->raw_wwn[0],
16027 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
16028 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
16029 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
16030 					    wwn->raw_wwn[7]);
16031 				}
16032 				return (TRUE);
16033 			}
16034 		}
16035 		remote_port = remote_port->next;
16036 	}
16037 	return (FALSE);
16038 }
16039 
16040 /*
16041  * Release all allocated resources
16042  */
16043 static void
16044 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16045 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16046 	struct fcp_black_list_entry	*current_entry	= NULL;
16047 
16048 	ASSERT(mutex_owned(&fcp_global_mutex));
16049 	/*
16050 	 * Traverse all luns
16051 	 */
16052 	while (tmp_entry) {
16053 		current_entry = tmp_entry;
16054 		tmp_entry = tmp_entry->next;
16055 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16056 	}
16057 	*pplun_blacklist = NULL;
16058 }
16059 
16060 /*
16061  * In fcp module,
16062  *   pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16063  */
16064 static struct scsi_pkt *
16065 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16066     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16067     int flags, int (*callback)(), caddr_t arg)
16068 {
16069 	fcp_port_t	*pptr = ADDR2FCP(ap);
16070 	fcp_pkt_t	*cmd  = NULL;
16071 	fc_frame_hdr_t	*hp;
16072 
16073 	/*
16074 	 * First step: get the packet
16075 	 */
16076 	if (pkt == NULL) {
16077 		pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16078 		    tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16079 		    callback, arg);
16080 		if (pkt == NULL) {
16081 			return (NULL);
16082 		}
16083 
16084 		/*
16085 		 * All fields in scsi_pkt will be initialized properly or
16086 		 * set to zero. We need do nothing for scsi_pkt.
16087 		 */
16088 		/*
16089 		 * But it's our responsibility to link other related data
16090 		 * structures. Their initialization will be done, just
16091 		 * before the scsi_pkt will be sent to FCA.
16092 		 */
16093 		cmd		= PKT2CMD(pkt);
16094 		cmd->cmd_pkt	= pkt;
16095 		cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16096 		/*
16097 		 * fc_packet_t
16098 		 */
16099 		cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16100 		cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16101 		    sizeof (struct fcp_pkt));
16102 		cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16103 		cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16104 		cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16105 		cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16106 		/*
16107 		 * Fill in the Fabric Channel Header
16108 		 */
16109 		hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16110 		hp->r_ctl = R_CTL_COMMAND;
16111 		hp->rsvd = 0;
16112 		hp->type = FC_TYPE_SCSI_FCP;
16113 		hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16114 		hp->seq_id = 0;
16115 		hp->df_ctl  = 0;
16116 		hp->seq_cnt = 0;
16117 		hp->ox_id = 0xffff;
16118 		hp->rx_id = 0xffff;
16119 		hp->ro = 0;
16120 	} else {
16121 		/*
16122 		 * We need think if we should reset any elements in
16123 		 * related data structures.
16124 		 */
16125 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
16126 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
16127 		    "reusing pkt, flags %d", flags);
16128 		cmd = PKT2CMD(pkt);
16129 		if (cmd->cmd_fp_pkt->pkt_pd) {
16130 			cmd->cmd_fp_pkt->pkt_pd = NULL;
16131 		}
16132 	}
16133 
16134 	/*
16135 	 * Second step:	 dma allocation/move
16136 	 */
16137 	if (bp && bp->b_bcount != 0) {
16138 		/*
16139 		 * Mark if it's read or write
16140 		 */
16141 		if (bp->b_flags & B_READ) {
16142 			cmd->cmd_flags |= CFLAG_IS_READ;
16143 		} else {
16144 			cmd->cmd_flags &= ~CFLAG_IS_READ;
16145 		}
16146 
16147 		bp_mapin(bp);
16148 		cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16149 		cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16150 		cmd->cmd_fp_pkt->pkt_data_resid = 0;
16151 	} else {
16152 		/*
16153 		 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16154 		 * to send zero-length read/write.
16155 		 */
16156 		cmd->cmd_fp_pkt->pkt_data = NULL;
16157 		cmd->cmd_fp_pkt->pkt_datalen = 0;
16158 	}
16159 
16160 	return (pkt);
16161 }
16162 
16163 static void
16164 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16165 {
16166 	fcp_port_t	*pptr = ADDR2FCP(ap);
16167 
16168 	/*
16169 	 * First we let FCA to uninitilize private part.
16170 	 */
16171 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16172 	    PKT2CMD(pkt)->cmd_fp_pkt);
16173 
16174 	/*
16175 	 * Then we uninitialize fc_packet.
16176 	 */
16177 
16178 	/*
16179 	 * Thirdly, we uninitializae fcp_pkt.
16180 	 */
16181 
16182 	/*
16183 	 * In the end, we free scsi_pkt.
16184 	 */
16185 	scsi_hba_pkt_free(ap, pkt);
16186 }
16187 
16188 static int
16189 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16190 {
16191 	fcp_port_t	*pptr = ADDR2FCP(ap);
16192 	fcp_lun_t	*plun = ADDR2LUN(ap);
16193 	fcp_tgt_t	*ptgt = plun->lun_tgt;
16194 	fcp_pkt_t	*cmd  = PKT2CMD(pkt);
16195 	fcp_cmd_t	*fcmd = &cmd->cmd_fcp_cmd;
16196 	fc_packet_t	*fpkt = cmd->cmd_fp_pkt;
16197 	int		 rval;
16198 
16199 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
16200 	(void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16201 
16202 	/*
16203 	 * Firstly, we need initialize fcp_pkt_t
16204 	 * Secondly, we need initialize fcp_cmd_t.
16205 	 */
16206 	bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16207 	fcmd->fcp_data_len = fpkt->pkt_datalen;
16208 	fcmd->fcp_ent_addr = plun->lun_addr;
16209 	if (pkt->pkt_flags & FLAG_HTAG) {
16210 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16211 	} else if (pkt->pkt_flags & FLAG_OTAG) {
16212 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16213 	} else if (pkt->pkt_flags & FLAG_STAG) {
16214 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16215 	} else {
16216 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16217 	}
16218 
16219 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16220 		fcmd->fcp_cntl.cntl_read_data = 1;
16221 		fcmd->fcp_cntl.cntl_write_data = 0;
16222 	} else {
16223 		fcmd->fcp_cntl.cntl_read_data = 0;
16224 		fcmd->fcp_cntl.cntl_write_data = 1;
16225 	}
16226 
16227 	/*
16228 	 * Then we need initialize fc_packet_t too.
16229 	 */
16230 	fpkt->pkt_timeout = pkt->pkt_time + 2;
16231 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16232 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16233 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16234 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16235 	} else {
16236 		fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16237 	}
16238 
16239 	if (pkt->pkt_flags & FLAG_NOINTR) {
16240 		fpkt->pkt_comp = NULL;
16241 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16242 	} else {
16243 		fpkt->pkt_comp = fcp_cmd_callback;
16244 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16245 		if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16246 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16247 		}
16248 	}
16249 
16250 	/*
16251 	 * Lastly, we need initialize scsi_pkt
16252 	 */
16253 	pkt->pkt_reason = CMD_CMPLT;
16254 	pkt->pkt_state = 0;
16255 	pkt->pkt_statistics = 0;
16256 	pkt->pkt_resid = 0;
16257 
16258 	/*
16259 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
16260 	 * have to do polled I/O
16261 	 */
16262 	if (pkt->pkt_flags & FLAG_NOINTR) {
16263 		return (fcp_dopoll(pptr, cmd));
16264 	}
16265 
16266 	cmd->cmd_state = FCP_PKT_ISSUED;
16267 	rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16268 	if (rval == FC_SUCCESS) {
16269 		return (TRAN_ACCEPT);
16270 	}
16271 
16272 	/*
16273 	 * Need more consideration
16274 	 *
16275 	 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16276 	 */
16277 	cmd->cmd_state = FCP_PKT_IDLE;
16278 	if (rval == FC_TRAN_BUSY) {
16279 		return (TRAN_BUSY);
16280 	} else {
16281 		return (TRAN_FATAL_ERROR);
16282 	}
16283 }
16284 
16285 /*
16286  * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16287  * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16288  */
16289 static void
16290 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16291 {
16292 	FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16293 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16294 }
16295 
16296 /*
16297  * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16298  */
16299 static void
16300 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16301 {
16302 	FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16303 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16304 }
16305