xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/ulp/fcp.c (revision 3fe80ca4a1f8a033d672a9a2e6e4babac651205a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Fibre Channel SCSI ULP Mapping driver
25  */
26 
27 /*
28  * Copyright 2023 Oxide Computer Company
29  */
30 
31 #include <sys/scsi/scsi.h>
32 #include <sys/types.h>
33 #include <sys/varargs.h>
34 #include <sys/devctl.h>
35 #include <sys/thread.h>
36 #include <sys/thread.h>
37 #include <sys/open.h>
38 #include <sys/file.h>
39 #include <sys/sunndi.h>
40 #include <sys/console.h>
41 #include <sys/proc.h>
42 #include <sys/time.h>
43 #include <sys/utsname.h>
44 #include <sys/scsi/impl/scsi_reset_notify.h>
45 #include <sys/ndi_impldefs.h>
46 #include <sys/byteorder.h>
47 #include <sys/fs/dv_node.h>
48 #include <sys/ctype.h>
49 #include <sys/sunmdi.h>
50 
51 #include <sys/fibre-channel/fc.h>
52 #include <sys/fibre-channel/impl/fc_ulpif.h>
53 #include <sys/fibre-channel/ulp/fcpvar.h>
54 
55 /*
56  * Discovery Process
57  * =================
58  *
59  *    The discovery process is a major function of FCP.	 In order to help
60  * understand that function a flow diagram is given here.  This diagram
61  * doesn't claim to cover all the cases and the events that can occur during
62  * the discovery process nor the subtleties of the code.  The code paths shown
63  * are simplified.  Its purpose is to help the reader (and potentially bug
64  * fixer) have an overall view of the logic of the code.  For that reason the
65  * diagram covers the simple case of the line coming up cleanly or of a new
66  * port attaching to FCP the link being up.  The reader must keep in mind
67  * that:
68  *
69  *	- There are special cases where bringing devices online and offline
70  *	  is driven by Ioctl.
71  *
72  *	- The behavior of the discovery process can be modified through the
73  *	  .conf file.
74  *
75  *	- The line can go down and come back up at any time during the
76  *	  discovery process which explains some of the complexity of the code.
77  *
78  * ............................................................................
79  *
80  * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
81  *
82  *
83  *			+-------------------------+
84  *   fp/fctl module --->|    fcp_port_attach	  |
85  *			+-------------------------+
86  *	   |			     |
87  *	   |			     |
88  *	   |			     v
89  *	   |		+-------------------------+
90  *	   |		| fcp_handle_port_attach  |
91  *	   |		+-------------------------+
92  *	   |				|
93  *	   |				|
94  *	   +--------------------+	|
95  *				|	|
96  *				v	v
97  *			+-------------------------+
98  *			|   fcp_statec_callback	  |
99  *			+-------------------------+
100  *				    |
101  *				    |
102  *				    v
103  *			+-------------------------+
104  *			|    fcp_handle_devices	  |
105  *			+-------------------------+
106  *				    |
107  *				    |
108  *				    v
109  *			+-------------------------+
110  *			|   fcp_handle_mapflags	  |
111  *			+-------------------------+
112  *				    |
113  *				    |
114  *				    v
115  *			+-------------------------+
116  *			|     fcp_send_els	  |
117  *			|			  |
118  *			| PLOGI or PRLI To all the|
119  *			| reachable devices.	  |
120  *			+-------------------------+
121  *
122  *
123  * ............................................................................
124  *
125  * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
126  *	   STEP 1 are called (it is actually the same function).
127  *
128  *
129  *			+-------------------------+
130  *			|    fcp_icmd_callback	  |
131  *   fp/fctl module --->|			  |
132  *			| callback for PLOGI and  |
133  *			| PRLI.			  |
134  *			+-------------------------+
135  *				     |
136  *				     |
137  *	    Received PLOGI Accept   /-\	  Received PRLI Accept
138  *		       _ _ _ _ _ _ /   \_ _ _ _ _ _
139  *		      |		   \   /	   |
140  *		      |		    \-/		   |
141  *		      |				   |
142  *		      v				   v
143  *	+-------------------------+	+-------------------------+
144  *	|     fcp_send_els	  |	|     fcp_send_scsi	  |
145  *	|			  |	|			  |
146  *	|	  PRLI		  |	|	REPORT_LUN	  |
147  *	+-------------------------+	+-------------------------+
148  *
149  * ............................................................................
150  *
151  * STEP 3: The callback functions of the SCSI commands issued by FCP are called
152  *	   (It is actually the same function).
153  *
154  *
155  *			    +-------------------------+
156  *   fp/fctl module ------->|	 fcp_scsi_callback    |
157  *			    +-------------------------+
158  *					|
159  *					|
160  *					|
161  *	Receive REPORT_LUN reply       /-\	Receive INQUIRY PAGE83 reply
162  *		  _ _ _ _ _ _ _ _ _ _ /	  \_ _ _ _ _ _ _ _ _ _ _ _
163  *		 |		      \	  /			  |
164  *		 |		       \-/			  |
165  *		 |			|			  |
166  *		 | Receive INQUIRY reply|			  |
167  *		 |			|			  |
168  *		 v			v			  v
169  * +------------------------+ +----------------------+ +----------------------+
170  * |  fcp_handle_reportlun  | |	 fcp_handle_inquiry  | |  fcp_handle_page83   |
171  * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
172  * +------------------------+ +----------------------+ +----------------------+
173  *		 |			|			  |
174  *		 |			|			  |
175  *		 |			|			  |
176  *		 v			v			  |
177  *     +-----------------+	+-----------------+		  |
178  *     |  fcp_send_scsi	 |	|  fcp_send_scsi  |		  |
179  *     |		 |	|		  |		  |
180  *     |     INQUIRY	 |	| INQUIRY PAGE83  |		  |
181  *     |  (To each LUN)	 |	+-----------------+		  |
182  *     +-----------------+					  |
183  *								  |
184  *								  v
185  *						      +------------------------+
186  *						      |	 fcp_call_finish_init  |
187  *						      +------------------------+
188  *								  |
189  *								  v
190  *						 +-----------------------------+
191  *						 |  fcp_call_finish_init_held  |
192  *						 +-----------------------------+
193  *								  |
194  *								  |
195  *			   All LUNs scanned			 /-\
196  *			       _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ /   \
197  *			      |					\   /
198  *			      |					 \-/
199  *			      v					  |
200  *		     +------------------+			  |
201  *		     |	fcp_finish_tgt	|			  |
202  *		     +------------------+			  |
203  *			      |	  Target Not Offline and	  |
204  *  Target Not Offline and    |	  not marked and tgt_node_state	  |
205  *  marked		     /-\  not FCP_TGT_NODE_ON_DEMAND	  |
206  *		_ _ _ _ _ _ /	\_ _ _ _ _ _ _ _		  |
207  *	       |	    \	/		|		  |
208  *	       |	     \-/		|		  |
209  *	       v				v		  |
210  * +----------------------------+     +-------------------+	  |
211  * |	 fcp_offline_target	|     |	 fcp_create_luns  |	  |
212  * |				|     +-------------------+	  |
213  * | A structure fcp_tgt_elem	|		|		  |
214  * | is created and queued in	|		v		  |
215  * | the FCP port list		|     +-------------------+	  |
216  * | port_offline_tgts.	 It	|     |	 fcp_pass_to_hp	  |	  |
217  * | will be unqueued by the	|     |			  |	  |
218  * | watchdog timer.		|     | Called for each	  |	  |
219  * +----------------------------+     | LUN. Dispatches	  |	  |
220  *		  |		      | fcp_hp_task	  |	  |
221  *		  |		      +-------------------+	  |
222  *		  |				|		  |
223  *		  |				|		  |
224  *		  |				|		  |
225  *		  |				+---------------->|
226  *		  |						  |
227  *		  +---------------------------------------------->|
228  *								  |
229  *								  |
230  *		All the targets (devices) have been scanned	 /-\
231  *				_ _ _ _	_ _ _ _	_ _ _ _ _ _ _ _ /   \
232  *			       |				\   /
233  *			       |				 \-/
234  *	    +-------------------------------------+		  |
235  *	    |		fcp_finish_init		  |		  |
236  *	    |					  |		  |
237  *	    | Signal broadcasts the condition	  |		  |
238  *	    | variable port_config_cv of the FCP  |		  |
239  *	    | port.  One potential code sequence  |		  |
240  *	    | waiting on the condition variable	  |		  |
241  *	    | the code sequence handling	  |		  |
242  *	    | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER|		  |
243  *	    | The other is in the function	  |		  |
244  *	    | fcp_reconfig_wait which is called	  |		  |
245  *	    | in the transmit path preventing IOs |		  |
246  *	    | from going through till the disco-  |		  |
247  *	    | very process is over.		  |		  |
248  *	    +-------------------------------------+		  |
249  *			       |				  |
250  *			       |				  |
251  *			       +--------------------------------->|
252  *								  |
253  *								  v
254  *								Return
255  *
256  * ............................................................................
257  *
258  * STEP 4: The hot plug task is called (for each fcp_hp_elem).
259  *
260  *
261  *			+-------------------------+
262  *			|      fcp_hp_task	  |
263  *			+-------------------------+
264  *				     |
265  *				     |
266  *				     v
267  *			+-------------------------+
268  *			|     fcp_trigger_lun	  |
269  *			+-------------------------+
270  *				     |
271  *				     |
272  *				     v
273  *		   Bring offline    /-\	 Bring online
274  *		  _ _ _ _ _ _ _ _ _/   \_ _ _ _ _ _ _ _ _ _
275  *		 |		   \   /		   |
276  *		 |		    \-/			   |
277  *		 v					   v
278  *    +---------------------+			+-----------------------+
279  *    |	 fcp_offline_child  |			|      fcp_get_cip	|
280  *    +---------------------+			|			|
281  *						| Creates a dev_info_t	|
282  *						| or a mdi_pathinfo_t	|
283  *						| depending on whether	|
284  *						| mpxio is on or off.	|
285  *						+-----------------------+
286  *							   |
287  *							   |
288  *							   v
289  *						+-----------------------+
290  *						|  fcp_online_child	|
291  *						|			|
292  *						| Set device online	|
293  *						| using NDI or MDI.	|
294  *						+-----------------------+
295  *
296  * ............................................................................
297  *
298  * STEP 5: The watchdog timer expires.	The watch dog timer does much more that
299  *	   what is described here.  We only show the target offline path.
300  *
301  *
302  *			 +--------------------------+
303  *			 |	  fcp_watch	    |
304  *			 +--------------------------+
305  *				       |
306  *				       |
307  *				       v
308  *			 +--------------------------+
309  *			 |  fcp_scan_offline_tgts   |
310  *			 +--------------------------+
311  *				       |
312  *				       |
313  *				       v
314  *			 +--------------------------+
315  *			 |  fcp_offline_target_now  |
316  *			 +--------------------------+
317  *				       |
318  *				       |
319  *				       v
320  *			 +--------------------------+
321  *			 |   fcp_offline_tgt_luns   |
322  *			 +--------------------------+
323  *				       |
324  *				       |
325  *				       v
326  *			 +--------------------------+
327  *			 |     fcp_offline_lun	    |
328  *			 +--------------------------+
329  *				       |
330  *				       |
331  *				       v
332  *		     +----------------------------------+
333  *		     |	     fcp_offline_lun_now	|
334  *		     |					|
335  *		     | A request (or two if mpxio) is	|
336  *		     | sent to the hot plug task using	|
337  *		     | a fcp_hp_elem structure.		|
338  *		     +----------------------------------+
339  */
340 
341 /*
342  * Functions registered with DDI framework
343  */
344 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
345 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
346 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
347 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
348 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
349     cred_t *credp, int *rval);
350 
351 /*
352  * Functions registered with FC Transport framework
353  */
354 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
355     fc_attach_cmd_t cmd,  uint32_t s_id);
356 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
357     fc_detach_cmd_t cmd);
358 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
359     int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
360     uint32_t claimed);
361 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
362     fc_unsol_buf_t *buf, uint32_t claimed);
363 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
364     fc_unsol_buf_t *buf, uint32_t claimed);
365 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
366     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
367     uint32_t  dev_cnt, uint32_t port_sid);
368 
369 /*
370  * Functions registered with SCSA framework
371  */
372 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
375     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
376 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
377     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
378 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
379 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
380 static int fcp_scsi_reset(struct scsi_address *ap, int level);
381 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
382 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
383     int whom);
384 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
385 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
386     void (*callback)(caddr_t), caddr_t arg);
387 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
388     char *name, ddi_eventcookie_t *event_cookiep);
389 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
390     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
391     ddi_callback_id_t *cb_id);
392 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
393     ddi_callback_id_t cb_id);
394 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
395     ddi_eventcookie_t eventid, void *impldata);
396 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
397     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
398 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
399     ddi_bus_config_op_t op, void *arg);
400 
401 /*
402  * Internal functions
403  */
404 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
405     int mode, int *rval);
406 
407 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
408     int mode, int *rval);
409 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
410     struct fcp_scsi_cmd *fscsi, int mode);
411 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
412     caddr_t base_addr, int mode);
413 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
414 
415 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
416     la_wwn_t *pwwn, int	*ret_val, int *fc_status, int *fc_pkt_state,
417     int *fc_pkt_reason, int *fc_pkt_action);
418 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
419     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
420 static int fcp_tgt_send_prli(struct fcp_tgt	*ptgt, int *fc_status,
421     int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
422 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
423 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
424 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
425 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
426 
427 static void fcp_handle_devices(struct fcp_port *pptr,
428     fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
429     fcp_map_tag_t *map_tag, int cause);
430 static int fcp_handle_mapflags(struct fcp_port *pptr,
431     struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
432     int tgt_cnt, int cause);
433 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
434 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
435     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
436 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
437     int cause);
438 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
439     uint32_t state);
440 static struct fcp_port *fcp_get_port(opaque_t port_handle);
441 static void fcp_unsol_callback(fc_packet_t *fpkt);
442 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
443     uchar_t r_ctl, uchar_t type);
444 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
445 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
446     struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
447     int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
448 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
449 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
450     int nodma, int flags);
451 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
452 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
453     uchar_t *wwn);
454 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
455     uint32_t d_id);
456 static void fcp_icmd_callback(fc_packet_t *fpkt);
457 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
458     int len, int lcount, int tcount, int cause, uint32_t rscn_count);
459 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
460 static void fcp_scsi_callback(fc_packet_t *fpkt);
461 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
462 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
463 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
464 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
465     uint16_t lun_num);
466 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
467     int link_cnt, int tgt_cnt, int cause);
468 static void fcp_finish_init(struct fcp_port *pptr);
469 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
470     int tgt_cnt, int cause);
471 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
472     int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
473 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
474     int link_cnt, int tgt_cnt, int nowait, int flags);
475 static void fcp_offline_target_now(struct fcp_port *pptr,
476     struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
477 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
478     int tgt_cnt, int flags);
479 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
480     int nowait, int flags);
481 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
482     int tgt_cnt);
483 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
484     int tgt_cnt, int flags);
485 static void fcp_scan_offline_luns(struct fcp_port *pptr);
486 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
487 static void fcp_update_offline_flags(struct fcp_lun *plun);
488 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
489 static void fcp_abort_commands(struct fcp_pkt *head, struct
490     fcp_port *pptr);
491 static void fcp_cmd_callback(fc_packet_t *fpkt);
492 static void fcp_complete_pkt(fc_packet_t *fpkt);
493 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
494     struct fcp_port *pptr);
495 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
496     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
497 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
498 static void fcp_dealloc_lun(struct fcp_lun *plun);
499 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
500     fc_portmap_t *map_entry, int link_cnt);
501 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
502 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
503 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
504     int internal);
505 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
506 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
507     uint32_t s_id, int instance);
508 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
509     int instance);
510 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
511 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
512     int);
513 static void fcp_kmem_cache_destructor(struct  scsi_pkt *, scsi_hba_tran_t *);
514 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
515 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
516     int flags);
517 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
518 static int fcp_reset_target(struct scsi_address *ap, int level);
519 static int fcp_commoncap(struct scsi_address *ap, char *cap,
520     int val, int tgtonly, int doset);
521 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
522 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
523 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
524     int sleep);
525 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
526     uint32_t s_id, fc_attach_cmd_t cmd, int instance);
527 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
528 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
529 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
530     int lcount, int tcount);
531 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
532 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
533 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
534     int tgt_cnt);
535 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
536     dev_info_t *pdip, caddr_t name);
537 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
538     int lcount, int tcount, int flags);
539 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
540     int lcount, int tcount, int flags);
541 static void fcp_remove_child(struct fcp_lun *plun);
542 static void fcp_watch(void *arg);
543 static void fcp_check_reset_delay(struct fcp_port *pptr);
544 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
545     struct fcp_lun *rlun, int tgt_cnt);
546 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
547 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
548     uchar_t *wwn, uint16_t lun);
549 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
550     struct fcp_lun *plun);
551 static void fcp_post_callback(struct fcp_pkt *cmd);
552 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
553 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
554 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
555     child_info_t *cip);
556 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
557     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
558     int tgt_cnt, int flags);
559 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
560     struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
561     int tgt_cnt, int flags, int wait);
562 static void fcp_retransport_cmd(struct fcp_port *pptr,
563     struct fcp_pkt *cmd);
564 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
565     uint_t statistics);
566 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
567 static void fcp_update_targets(struct fcp_port *pptr,
568     fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
569 static int fcp_call_finish_init(struct fcp_port *pptr,
570     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
571 static int fcp_call_finish_init_held(struct fcp_port *pptr,
572     struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
573 static void fcp_reconfigure_luns(void * tgt_handle);
574 static void fcp_free_targets(struct fcp_port *pptr);
575 static void fcp_free_target(struct fcp_tgt *ptgt);
576 static int fcp_is_retryable(struct fcp_ipkt *icmd);
577 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
578 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
579 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
580 static void fcp_print_error(fc_packet_t *fpkt);
581 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
582     struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
583 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
584 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
585     uint32_t *dev_cnt);
586 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
587 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
588 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
589     struct fcp_ioctl *, struct fcp_port **);
590 static char *fcp_get_lun_path(struct fcp_lun *plun);
591 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
592     int *rval);
593 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
594 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
595 static char *fcp_get_lun_path(struct fcp_lun *plun);
596 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
597     int *rval);
598 static void fcp_reconfig_wait(struct fcp_port *pptr);
599 
600 /*
601  * New functions added for mpxio support
602  */
603 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
604     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
605 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
606     int tcount);
607 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
608     dev_info_t *pdip);
609 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
610 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
611 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
612 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
613 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
614     int what);
615 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
616     fc_packet_t *fpkt);
617 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
618 
619 /*
620  * New functions added for lun masking support
621  */
622 static void fcp_read_blacklist(dev_info_t *dip,
623     struct fcp_black_list_entry **pplun_blacklist);
624 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
625     struct fcp_black_list_entry **pplun_blacklist);
626 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
627     struct fcp_black_list_entry **pplun_blacklist);
628 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
629 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
630 
631 /*
632  * New functions to support software FCA (like fcoei)
633  */
634 static struct scsi_pkt *fcp_pseudo_init_pkt(
635 	struct scsi_address *ap, struct scsi_pkt *pkt,
636 	struct buf *bp, int cmdlen, int statuslen,
637 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
638 static void fcp_pseudo_destroy_pkt(
639 	struct scsi_address *ap, struct scsi_pkt *pkt);
640 static void fcp_pseudo_sync_pkt(
641 	struct scsi_address *ap, struct scsi_pkt *pkt);
642 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
643 static void fcp_pseudo_dmafree(
644 	struct scsi_address *ap, struct scsi_pkt *pkt);
645 
646 extern struct mod_ops	mod_driverops;
647 /*
648  * This variable is defined in modctl.c and set to '1' after the root driver
649  * and fs are loaded.  It serves as an indication that the root filesystem can
650  * be used.
651  */
652 extern int		modrootloaded;
653 /*
654  * This table contains strings associated with the SCSI sense key codes.  It
655  * is used by FCP to print a clear explanation of the code returned in the
656  * sense information by a device.
657  */
658 extern char		*sense_keys[];
659 /*
660  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).	It is
661  * under this device that the paths to a physical device are created when
662  * MPxIO is used.
663  */
664 extern dev_info_t	*scsi_vhci_dip;
665 
666 /*
667  * Report lun processing
668  */
669 #define	FCP_LUN_ADDRESSING		0x80
670 #define	FCP_PD_ADDRESSING		0x00
671 #define	FCP_VOLUME_ADDRESSING		0x40
672 
673 #define	FCP_SVE_THROTTLE		0x28 /* Vicom */
674 #define	MAX_INT_DMA			0x7fffffff
675 /*
676  * Property definitions
677  */
678 #define	NODE_WWN_PROP	(char *)fcp_node_wwn_prop
679 #define	PORT_WWN_PROP	(char *)fcp_port_wwn_prop
680 #define	TARGET_PROP	(char *)fcp_target_prop
681 #define	LUN_PROP	(char *)fcp_lun_prop
682 #define	SAM_LUN_PROP	(char *)fcp_sam_lun_prop
683 #define	CONF_WWN_PROP	(char *)fcp_conf_wwn_prop
684 #define	OBP_BOOT_WWN	(char *)fcp_obp_boot_wwn
685 #define	MANUAL_CFG_ONLY	(char *)fcp_manual_config_only
686 #define	INIT_PORT_PROP	(char *)fcp_init_port_prop
687 #define	TGT_PORT_PROP	(char *)fcp_tgt_port_prop
688 #define	LUN_BLACKLIST_PROP	(char *)fcp_lun_blacklist_prop
689 /*
690  * Short hand macros.
691  */
692 #define	LUN_PORT	(plun->lun_tgt->tgt_port)
693 #define	LUN_TGT		(plun->lun_tgt)
694 
695 /*
696  * Driver private macros
697  */
698 #define	FCP_ATOB(x)	(((x) >= '0' && (x) <= '9') ? ((x) - '0') :	\
699 			((x) >= 'a' && (x) <= 'f') ?			\
700 			((x) - 'a' + 10) : ((x) - 'A' + 10))
701 
702 #define	FCP_MAX(a, b)	((a) > (b) ? (a) : (b))
703 
704 #define	FCP_N_NDI_EVENTS						\
705 	(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
706 
707 #define	FCP_LINK_STATE_CHANGED(p, c)			\
708 	((p)->port_link_cnt != (c)->ipkt_link_cnt)
709 
710 #define	FCP_TGT_STATE_CHANGED(t, c)			\
711 	((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
712 
713 #define	FCP_STATE_CHANGED(p, t, c)		\
714 	(FCP_TGT_STATE_CHANGED(t, c))
715 
716 #define	FCP_MUST_RETRY(fpkt)				\
717 	((fpkt)->pkt_state == FC_PKT_LOCAL_BSY ||	\
718 	(fpkt)->pkt_state == FC_PKT_LOCAL_RJT ||	\
719 	(fpkt)->pkt_state == FC_PKT_TRAN_BSY ||	\
720 	(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS ||	\
721 	(fpkt)->pkt_state == FC_PKT_NPORT_BSY ||	\
722 	(fpkt)->pkt_state == FC_PKT_FABRIC_BSY ||	\
723 	(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE ||	\
724 	(fpkt)->pkt_reason == FC_REASON_OFFLINE)
725 
726 #define	FCP_SENSE_REPORTLUN_CHANGED(es)		\
727 	((es)->es_key == KEY_UNIT_ATTENTION &&	\
728 	(es)->es_add_code == 0x3f &&		\
729 	(es)->es_qual_code == 0x0e)
730 
731 #define	FCP_SENSE_NO_LUN(es)			\
732 	((es)->es_key == KEY_ILLEGAL_REQUEST &&	\
733 	(es)->es_add_code == 0x25 &&		\
734 	(es)->es_qual_code == 0x0)
735 
736 #define	FCP_VERSION		"20091208-1.192"
737 #define	FCP_NAME_VERSION	"SunFC FCP v" FCP_VERSION
738 
739 #define	FCP_NUM_ELEMENTS(array)			\
740 	(sizeof (array) / sizeof ((array)[0]))
741 
742 /*
743  * Debugging, Error reporting, and tracing
744  */
745 #define	FCP_LOG_SIZE		1024 * 1024
746 
747 #define	FCP_LEVEL_1		0x00001		/* attach/detach PM CPR */
748 #define	FCP_LEVEL_2		0x00002		/* failures/Invalid data */
749 #define	FCP_LEVEL_3		0x00004		/* state change, discovery */
750 #define	FCP_LEVEL_4		0x00008		/* ULP messages */
751 #define	FCP_LEVEL_5		0x00010		/* ELS/SCSI cmds */
752 #define	FCP_LEVEL_6		0x00020		/* Transport failures */
753 #define	FCP_LEVEL_7		0x00040
754 #define	FCP_LEVEL_8		0x00080		/* I/O tracing */
755 #define	FCP_LEVEL_9		0x00100		/* I/O tracing */
756 
757 
758 
759 /*
760  * Log contents to system messages file
761  */
762 #define	FCP_MSG_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
763 #define	FCP_MSG_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
764 #define	FCP_MSG_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
765 #define	FCP_MSG_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
766 #define	FCP_MSG_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
767 #define	FCP_MSG_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
768 #define	FCP_MSG_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
769 #define	FCP_MSG_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
770 #define	FCP_MSG_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
771 
772 
773 /*
774  * Log contents to trace buffer
775  */
776 #define	FCP_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
777 #define	FCP_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
778 #define	FCP_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
779 #define	FCP_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
780 #define	FCP_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
781 #define	FCP_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
782 #define	FCP_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
783 #define	FCP_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
784 #define	FCP_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
785 
786 
787 /*
788  * Log contents to both system messages file and trace buffer
789  */
790 #define	FCP_MSG_BUF_LEVEL_1	(FCP_LEVEL_1 | FC_TRACE_LOG_BUF |	\
791 				FC_TRACE_LOG_MSG)
792 #define	FCP_MSG_BUF_LEVEL_2	(FCP_LEVEL_2 | FC_TRACE_LOG_BUF |	\
793 				FC_TRACE_LOG_MSG)
794 #define	FCP_MSG_BUF_LEVEL_3	(FCP_LEVEL_3 | FC_TRACE_LOG_BUF |	\
795 				FC_TRACE_LOG_MSG)
796 #define	FCP_MSG_BUF_LEVEL_4	(FCP_LEVEL_4 | FC_TRACE_LOG_BUF |	\
797 				FC_TRACE_LOG_MSG)
798 #define	FCP_MSG_BUF_LEVEL_5	(FCP_LEVEL_5 | FC_TRACE_LOG_BUF |	\
799 				FC_TRACE_LOG_MSG)
800 #define	FCP_MSG_BUF_LEVEL_6	(FCP_LEVEL_6 | FC_TRACE_LOG_BUF |	\
801 				FC_TRACE_LOG_MSG)
802 #define	FCP_MSG_BUF_LEVEL_7	(FCP_LEVEL_7 | FC_TRACE_LOG_BUF |	\
803 				FC_TRACE_LOG_MSG)
804 #define	FCP_MSG_BUF_LEVEL_8	(FCP_LEVEL_8 | FC_TRACE_LOG_BUF |	\
805 				FC_TRACE_LOG_MSG)
806 #define	FCP_MSG_BUF_LEVEL_9	(FCP_LEVEL_9 | FC_TRACE_LOG_BUF |	\
807 				FC_TRACE_LOG_MSG)
808 #ifdef DEBUG
809 #define	FCP_DTRACE	fc_trace_debug
810 #else
811 #define	FCP_DTRACE
812 #endif
813 
814 #define	FCP_TRACE	fc_trace_debug
815 
816 static struct cb_ops fcp_cb_ops = {
817 	fcp_open,			/* open */
818 	fcp_close,			/* close */
819 	nodev,				/* strategy */
820 	nodev,				/* print */
821 	nodev,				/* dump */
822 	nodev,				/* read */
823 	nodev,				/* write */
824 	fcp_ioctl,			/* ioctl */
825 	nodev,				/* devmap */
826 	nodev,				/* mmap */
827 	nodev,				/* segmap */
828 	nochpoll,			/* chpoll */
829 	ddi_prop_op,			/* cb_prop_op */
830 	0,				/* streamtab */
831 	D_NEW | D_MP | D_HOTPLUG,	/* cb_flag */
832 	CB_REV,				/* rev */
833 	nodev,				/* aread */
834 	nodev				/* awrite */
835 };
836 
837 
838 static struct dev_ops fcp_ops = {
839 	DEVO_REV,
840 	0,
841 	ddi_getinfo_1to1,
842 	nulldev,		/* identify */
843 	nulldev,		/* probe */
844 	fcp_attach,		/* attach and detach are mandatory */
845 	fcp_detach,
846 	nodev,			/* reset */
847 	&fcp_cb_ops,		/* cb_ops */
848 	NULL,			/* bus_ops */
849 	NULL,			/* power */
850 };
851 
852 
853 char *fcp_version = FCP_NAME_VERSION;
854 
855 static struct modldrv modldrv = {
856 	&mod_driverops,
857 	FCP_NAME_VERSION,
858 	&fcp_ops
859 };
860 
861 
862 static struct modlinkage modlinkage = {
863 	MODREV_1,
864 	&modldrv,
865 	NULL
866 };
867 
868 
869 static fc_ulp_modinfo_t fcp_modinfo = {
870 	&fcp_modinfo,			/* ulp_handle */
871 	FCTL_ULP_MODREV_4,		/* ulp_rev */
872 	FC4_SCSI_FCP,			/* ulp_type */
873 	"fcp",				/* ulp_name */
874 	FCP_STATEC_MASK,		/* ulp_statec_mask */
875 	fcp_port_attach,		/* ulp_port_attach */
876 	fcp_port_detach,		/* ulp_port_detach */
877 	fcp_port_ioctl,			/* ulp_port_ioctl */
878 	fcp_els_callback,		/* ulp_els_callback */
879 	fcp_data_callback,		/* ulp_data_callback */
880 	fcp_statec_callback		/* ulp_statec_callback */
881 };
882 
883 #ifdef	DEBUG
884 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
885 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
886 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
887 				FCP_LEVEL_6 | FCP_LEVEL_7)
888 #else
889 #define	FCP_TRACE_DEFAULT	(FC_TRACE_LOG_MASK | FCP_LEVEL_1 |	\
890 				FCP_LEVEL_2 | FCP_LEVEL_3 |		\
891 				FCP_LEVEL_4 | FCP_LEVEL_5 |		\
892 				FCP_LEVEL_6 | FCP_LEVEL_7)
893 #endif
894 
895 /* FCP global variables */
896 int			fcp_bus_config_debug = 0;
897 static int		fcp_log_size = FCP_LOG_SIZE;
898 static int		fcp_trace = FCP_TRACE_DEFAULT;
899 static fc_trace_logq_t	*fcp_logq = NULL;
900 static struct fcp_black_list_entry	*fcp_lun_blacklist = NULL;
901 /*
902  * The auto-configuration is set by default.  The only way of disabling it is
903  * through the property MANUAL_CFG_ONLY in the fcp.conf file.
904  */
905 static int		fcp_enable_auto_configuration = 1;
906 static int		fcp_max_bus_config_retries	= 4;
907 static int		fcp_lun_ready_retry = 300;
908 /*
909  * The value assigned to the following variable has changed several times due
910  * to a problem with the data underruns reporting of some firmware(s).	The
911  * current value of 50 gives a timeout value of 25 seconds for a max number
912  * of 256 LUNs.
913  */
914 static int		fcp_max_target_retries = 50;
915 /*
916  * Watchdog variables
917  * ------------------
918  *
919  * fcp_watchdog_init
920  *
921  *	Indicates if the watchdog timer is running or not.  This is actually
922  *	a counter of the number of Fibre Channel ports that attached.  When
923  *	the first port attaches the watchdog is started.  When the last port
924  *	detaches the watchdog timer is stopped.
925  *
926  * fcp_watchdog_time
927  *
928  *	This is the watchdog clock counter.  It is incremented by
929  *	fcp_watchdog_time each time the watchdog timer expires.
930  *
931  * fcp_watchdog_timeout
932  *
933  *	Increment value of the variable fcp_watchdog_time as well as the
934  *	the timeout value of the watchdog timer.  The unit is 1 second.	 It
935  *	is strange that this is not a #define	but a variable since the code
936  *	never changes this value.  The reason why it can be said that the
937  *	unit is 1 second is because the number of ticks for the watchdog
938  *	timer is determined like this:
939  *
940  *	    fcp_watchdog_tick = fcp_watchdog_timeout *
941  *				  drv_usectohz(1000000);
942  *
943  *	The value 1000000 is hard coded in the code.
944  *
945  * fcp_watchdog_tick
946  *
947  *	Watchdog timer value in ticks.
948  */
949 static int		fcp_watchdog_init = 0;
950 static int		fcp_watchdog_time = 0;
951 static int		fcp_watchdog_timeout = 1;
952 static int		fcp_watchdog_tick;
953 
954 /*
955  * fcp_offline_delay is a global variable to enable customisation of
956  * the timeout on link offlines or RSCNs. The default value is set
957  * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
958  * specified in FCP4 Chapter 11 (see www.t10.org).
959  *
960  * The variable fcp_offline_delay is specified in SECONDS.
961  *
962  * If we made this a static var then the user would not be able to
963  * change it. This variable is set in fcp_attach().
964  */
965 unsigned int		fcp_offline_delay = FCP_OFFLINE_DELAY;
966 
967 static void		*fcp_softstate = NULL; /* for soft state */
968 static uchar_t		fcp_oflag = FCP_IDLE; /* open flag */
969 static kmutex_t		fcp_global_mutex;
970 static kmutex_t		fcp_ioctl_mutex;
971 static dev_info_t	*fcp_global_dip = NULL;
972 static timeout_id_t	fcp_watchdog_id;
973 const char		*fcp_lun_prop = "lun";
974 const char		*fcp_sam_lun_prop = "sam-lun";
975 const char		*fcp_target_prop = "target";
976 /*
977  * NOTE: consumers of "node-wwn" property include stmsboot in ON
978  * consolidation.
979  */
980 const char		*fcp_node_wwn_prop = "node-wwn";
981 const char		*fcp_port_wwn_prop = "port-wwn";
982 const char		*fcp_conf_wwn_prop = "fc-port-wwn";
983 const char		*fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
984 const char		*fcp_manual_config_only = "manual_configuration_only";
985 const char		*fcp_init_port_prop = "initiator-port";
986 const char		*fcp_tgt_port_prop = "target-port";
987 const char		*fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
988 
989 static struct fcp_port	*fcp_port_head = NULL;
990 static ddi_eventcookie_t	fcp_insert_eid;
991 static ddi_eventcookie_t	fcp_remove_eid;
992 
993 static ndi_event_definition_t	fcp_ndi_event_defs[] = {
994 	{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
995 	{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
996 };
997 
998 /*
999  * List of valid commands for the scsi_ioctl call
1000  */
1001 static uint8_t scsi_ioctl_list[] = {
1002 	SCMD_INQUIRY,
1003 	SCMD_REPORT_LUN,
1004 	SCMD_READ_CAPACITY
1005 };
1006 
1007 /*
1008  * this is used to dummy up a report lun response for cases
1009  * where the target doesn't support it
1010  */
1011 static uchar_t fcp_dummy_lun[] = {
1012 	0x00,		/* MSB length (length = no of luns * 8) */
1013 	0x00,
1014 	0x00,
1015 	0x08,		/* LSB length */
1016 	0x00,		/* MSB reserved */
1017 	0x00,
1018 	0x00,
1019 	0x00,		/* LSB reserved */
1020 	FCP_PD_ADDRESSING,
1021 	0x00,		/* LUN is ZERO at the first level */
1022 	0x00,
1023 	0x00,		/* second level is zero */
1024 	0x00,
1025 	0x00,		/* third level is zero */
1026 	0x00,
1027 	0x00		/* fourth level is zero */
1028 };
1029 
1030 static uchar_t fcp_alpa_to_switch[] = {
1031 	0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1032 	0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1033 	0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1034 	0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1035 	0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1036 	0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1037 	0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1038 	0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1039 	0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1040 	0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1041 	0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1042 	0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1043 	0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1044 	0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1045 	0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1046 	0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1047 	0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1048 	0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1049 	0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1050 	0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1051 	0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1052 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1053 	0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1054 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1055 };
1056 
1057 static caddr_t pid = "SESS01	      ";
1058 
1059 #if	!defined(lint)
1060 
1061 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1062     fcp_port::fcp_next fcp_watchdog_id))
1063 
1064 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1065 
1066 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1067     fcp_insert_eid
1068     fcp_remove_eid
1069     fcp_watchdog_time))
1070 
1071 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1072     fcp_cb_ops
1073     fcp_ops
1074     callb_cpr))
1075 
1076 #endif /* lint */
1077 
1078 /*
1079  * This table is used to determine whether or not it's safe to copy in
1080  * the target node name for a lun.  Since all luns behind the same target
1081  * have the same wwnn, only tagets that do not support multiple luns are
1082  * eligible to be enumerated under mpxio if they aren't page83 compliant.
1083  */
1084 
1085 char *fcp_symmetric_disk_table[] = {
1086 	"SEAGATE ST",
1087 	"IBM	 DDYFT",
1088 	"SUNW	 SUNWGS",	/* Daktari enclosure */
1089 	"SUN	 SENA",		/* SES device */
1090 	"SUN	 SESS01"	/* VICOM SVE box */
1091 };
1092 
1093 int fcp_symmetric_disk_table_size =
1094 	sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1095 
1096 /*
1097  * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1098  * will panic if you don't pass this in to the routine, this information.
1099  * Need to determine what the actual impact to the system is by providing
1100  * this information if any. Since dma allocation is done in pkt_init it may
1101  * not have any impact. These values are straight from the Writing Device
1102  * Driver manual.
1103  */
1104 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1105 	DMA_ATTR_V0,	/* ddi_dma_attr version */
1106 	0,		/* low address */
1107 	0xffffffff,	/* high address */
1108 	0x00ffffff,	/* counter upper bound */
1109 	1,		/* alignment requirements */
1110 	0x3f,		/* burst sizes */
1111 	1,		/* minimum DMA access */
1112 	0xffffffff,	/* maximum DMA access */
1113 	(1 << 24) - 1,	/* segment boundary restrictions */
1114 	1,		/* scater/gather list length */
1115 	512,		/* device granularity */
1116 	0		/* DMA flags */
1117 };
1118 
1119 /*
1120  * The _init(9e) return value should be that of mod_install(9f). Under
1121  * some circumstances, a failure may not be related mod_install(9f) and
1122  * one would then require a return value to indicate the failure. Looking
1123  * at mod_install(9f), it is expected to return 0 for success and non-zero
1124  * for failure. mod_install(9f) for device drivers, further goes down the
1125  * calling chain and ends up in ddi_installdrv(), whose return values are
1126  * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1127  * calling chain of mod_install(9f) which return values like EINVAL and
1128  * in some even return -1.
1129  *
1130  * To work around the vagaries of the mod_install() calling chain, return
1131  * either 0 or ENODEV depending on the success or failure of mod_install()
1132  */
1133 int
_init(void)1134 _init(void)
1135 {
1136 	int rval;
1137 
1138 	/*
1139 	 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1140 	 * before registering with the transport first.
1141 	 */
1142 	if (ddi_soft_state_init(&fcp_softstate,
1143 	    sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1144 		return (EINVAL);
1145 	}
1146 
1147 	mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1148 	mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1149 
1150 	if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1151 		cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1152 		mutex_destroy(&fcp_global_mutex);
1153 		mutex_destroy(&fcp_ioctl_mutex);
1154 		ddi_soft_state_fini(&fcp_softstate);
1155 		return (ENODEV);
1156 	}
1157 
1158 	fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1159 
1160 	if ((rval = mod_install(&modlinkage)) != 0) {
1161 		fc_trace_free_logq(fcp_logq);
1162 		(void) fc_ulp_remove(&fcp_modinfo);
1163 		mutex_destroy(&fcp_global_mutex);
1164 		mutex_destroy(&fcp_ioctl_mutex);
1165 		ddi_soft_state_fini(&fcp_softstate);
1166 		rval = ENODEV;
1167 	}
1168 
1169 	return (rval);
1170 }
1171 
1172 
1173 /*
1174  * the system is done with us as a driver, so clean up
1175  */
1176 int
_fini(void)1177 _fini(void)
1178 {
1179 	int rval;
1180 
1181 	/*
1182 	 * don't start cleaning up until we know that the module remove
1183 	 * has worked  -- if this works, then we know that each instance
1184 	 * has successfully been DDI_DETACHed
1185 	 */
1186 	if ((rval = mod_remove(&modlinkage)) != 0) {
1187 		return (rval);
1188 	}
1189 
1190 	(void) fc_ulp_remove(&fcp_modinfo);
1191 
1192 	ddi_soft_state_fini(&fcp_softstate);
1193 	mutex_destroy(&fcp_global_mutex);
1194 	mutex_destroy(&fcp_ioctl_mutex);
1195 	fc_trace_free_logq(fcp_logq);
1196 
1197 	return (rval);
1198 }
1199 
1200 
1201 int
_info(struct modinfo * modinfop)1202 _info(struct modinfo *modinfop)
1203 {
1204 	return (mod_info(&modlinkage, modinfop));
1205 }
1206 
1207 
1208 /*
1209  * attach the module
1210  */
1211 static int
fcp_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)1212 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1213 {
1214 	int rval = DDI_SUCCESS;
1215 
1216 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1217 	    FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1218 
1219 	if (cmd == DDI_ATTACH) {
1220 		/* The FCP pseudo device is created here. */
1221 		mutex_enter(&fcp_global_mutex);
1222 		fcp_global_dip = devi;
1223 		mutex_exit(&fcp_global_mutex);
1224 
1225 		if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1226 		    0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1227 			ddi_report_dev(fcp_global_dip);
1228 		} else {
1229 			cmn_err(CE_WARN, "FCP: Cannot create minor node");
1230 			mutex_enter(&fcp_global_mutex);
1231 			fcp_global_dip = NULL;
1232 			mutex_exit(&fcp_global_mutex);
1233 
1234 			rval = DDI_FAILURE;
1235 		}
1236 		/*
1237 		 * We check the fcp_offline_delay property at this
1238 		 * point. This variable is global for the driver,
1239 		 * not specific to an instance.
1240 		 *
1241 		 * We do not recommend setting the value to less
1242 		 * than 10 seconds (RA_TOV_els), or greater than
1243 		 * 60 seconds.
1244 		 */
1245 		fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1246 		    devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1247 		    "fcp_offline_delay", FCP_OFFLINE_DELAY);
1248 		if ((fcp_offline_delay < 10) ||
1249 		    (fcp_offline_delay > 60)) {
1250 			cmn_err(CE_WARN, "Setting fcp_offline_delay "
1251 			    "to %d second(s). This is outside the "
1252 			    "recommended range of 10..60 seconds.",
1253 			    fcp_offline_delay);
1254 		}
1255 	}
1256 
1257 	return (rval);
1258 }
1259 
1260 
1261 /*ARGSUSED*/
1262 static int
fcp_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)1263 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1264 {
1265 	int	res = DDI_SUCCESS;
1266 
1267 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1268 	    FCP_BUF_LEVEL_8, 0,	 "module detach: cmd=0x%x", cmd);
1269 
1270 	if (cmd == DDI_DETACH) {
1271 		/*
1272 		 * Check if there are active ports/threads. If there
1273 		 * are any, we will fail, else we will succeed (there
1274 		 * should not be much to clean up)
1275 		 */
1276 		mutex_enter(&fcp_global_mutex);
1277 		FCP_DTRACE(fcp_logq, "fcp",
1278 		    fcp_trace, FCP_BUF_LEVEL_8, 0,  "port_head=%p",
1279 		    (void *) fcp_port_head);
1280 
1281 		if (fcp_port_head == NULL) {
1282 			ddi_remove_minor_node(fcp_global_dip, NULL);
1283 			fcp_global_dip = NULL;
1284 			mutex_exit(&fcp_global_mutex);
1285 		} else {
1286 			mutex_exit(&fcp_global_mutex);
1287 			res = DDI_FAILURE;
1288 		}
1289 	}
1290 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1291 	    FCP_BUF_LEVEL_8, 0,	 "module detach returning %d", res);
1292 
1293 	return (res);
1294 }
1295 
1296 
1297 /* ARGSUSED */
1298 static int
fcp_open(dev_t * devp,int flag,int otype,cred_t * credp)1299 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1300 {
1301 	if (otype != OTYP_CHR) {
1302 		return (EINVAL);
1303 	}
1304 
1305 	/*
1306 	 * Allow only root to talk;
1307 	 */
1308 	if (drv_priv(credp)) {
1309 		return (EPERM);
1310 	}
1311 
1312 	mutex_enter(&fcp_global_mutex);
1313 	if (fcp_oflag & FCP_EXCL) {
1314 		mutex_exit(&fcp_global_mutex);
1315 		return (EBUSY);
1316 	}
1317 
1318 	if (flag & FEXCL) {
1319 		if (fcp_oflag & FCP_OPEN) {
1320 			mutex_exit(&fcp_global_mutex);
1321 			return (EBUSY);
1322 		}
1323 		fcp_oflag |= FCP_EXCL;
1324 	}
1325 	fcp_oflag |= FCP_OPEN;
1326 	mutex_exit(&fcp_global_mutex);
1327 
1328 	return (0);
1329 }
1330 
1331 
1332 /* ARGSUSED */
1333 static int
fcp_close(dev_t dev,int flag,int otype,cred_t * credp)1334 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1335 {
1336 	if (otype != OTYP_CHR) {
1337 		return (EINVAL);
1338 	}
1339 
1340 	mutex_enter(&fcp_global_mutex);
1341 	if (!(fcp_oflag & FCP_OPEN)) {
1342 		mutex_exit(&fcp_global_mutex);
1343 		return (ENODEV);
1344 	}
1345 	fcp_oflag = FCP_IDLE;
1346 	mutex_exit(&fcp_global_mutex);
1347 
1348 	return (0);
1349 }
1350 
1351 
1352 /*
1353  * fcp_ioctl
1354  *	Entry point for the FCP ioctls
1355  *
1356  * Input:
1357  *	See ioctl(9E)
1358  *
1359  * Output:
1360  *	See ioctl(9E)
1361  *
1362  * Returns:
1363  *	See ioctl(9E)
1364  *
1365  * Context:
1366  *	Kernel context.
1367  */
1368 /* ARGSUSED */
1369 static int
fcp_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)1370 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1371     int *rval)
1372 {
1373 	int			ret = 0;
1374 
1375 	mutex_enter(&fcp_global_mutex);
1376 	if (!(fcp_oflag & FCP_OPEN)) {
1377 		mutex_exit(&fcp_global_mutex);
1378 		return (ENXIO);
1379 	}
1380 	mutex_exit(&fcp_global_mutex);
1381 
1382 	switch (cmd) {
1383 	case FCP_TGT_INQUIRY:
1384 	case FCP_TGT_CREATE:
1385 	case FCP_TGT_DELETE:
1386 		ret = fcp_setup_device_data_ioctl(cmd,
1387 		    (struct fcp_ioctl *)data, mode, rval);
1388 		break;
1389 
1390 	case FCP_TGT_SEND_SCSI:
1391 		mutex_enter(&fcp_ioctl_mutex);
1392 		ret = fcp_setup_scsi_ioctl(
1393 		    (struct fcp_scsi_cmd *)data, mode, rval);
1394 		mutex_exit(&fcp_ioctl_mutex);
1395 		break;
1396 
1397 	case FCP_STATE_COUNT:
1398 		ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1399 		    mode, rval);
1400 		break;
1401 	case FCP_GET_TARGET_MAPPINGS:
1402 		ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1403 		    mode, rval);
1404 		break;
1405 	default:
1406 		fcp_log(CE_WARN, NULL,
1407 		    "!Invalid ioctl opcode = 0x%x", cmd);
1408 		ret	= EINVAL;
1409 	}
1410 
1411 	return (ret);
1412 }
1413 
1414 
1415 /*
1416  * fcp_setup_device_data_ioctl
1417  *	Setup handler for the "device data" style of
1418  *	ioctl for FCP.	See "fcp_util.h" for data structure
1419  *	definition.
1420  *
1421  * Input:
1422  *	cmd	= FCP ioctl command
1423  *	data	= ioctl data
1424  *	mode	= See ioctl(9E)
1425  *
1426  * Output:
1427  *	data	= ioctl data
1428  *	rval	= return value - see ioctl(9E)
1429  *
1430  * Returns:
1431  *	See ioctl(9E)
1432  *
1433  * Context:
1434  *	Kernel context.
1435  */
1436 /* ARGSUSED */
1437 static int
fcp_setup_device_data_ioctl(int cmd,struct fcp_ioctl * data,int mode,int * rval)1438 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1439     int *rval)
1440 {
1441 	struct fcp_port	*pptr;
1442 	struct	device_data	*dev_data;
1443 	uint32_t		link_cnt;
1444 	la_wwn_t		*wwn_ptr = NULL;
1445 	struct fcp_tgt		*ptgt = NULL;
1446 	struct fcp_lun		*plun = NULL;
1447 	int			i, error;
1448 	struct fcp_ioctl	fioctl;
1449 
1450 #ifdef	_MULTI_DATAMODEL
1451 	switch (ddi_model_convert_from(mode & FMODELS)) {
1452 	case DDI_MODEL_ILP32: {
1453 		struct fcp32_ioctl f32_ioctl;
1454 
1455 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1456 		    sizeof (struct fcp32_ioctl), mode)) {
1457 			return (EFAULT);
1458 		}
1459 		fioctl.fp_minor = f32_ioctl.fp_minor;
1460 		fioctl.listlen = f32_ioctl.listlen;
1461 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1462 		break;
1463 	}
1464 	case DDI_MODEL_NONE:
1465 		if (ddi_copyin((void *)data, (void *)&fioctl,
1466 		    sizeof (struct fcp_ioctl), mode)) {
1467 			return (EFAULT);
1468 		}
1469 		break;
1470 	}
1471 
1472 #else	/* _MULTI_DATAMODEL */
1473 	if (ddi_copyin((void *)data, (void *)&fioctl,
1474 	    sizeof (struct fcp_ioctl), mode)) {
1475 		return (EFAULT);
1476 	}
1477 #endif	/* _MULTI_DATAMODEL */
1478 
1479 	/*
1480 	 * Right now we can assume that the minor number matches with
1481 	 * this instance of fp. If this changes we will need to
1482 	 * revisit this logic.
1483 	 */
1484 	mutex_enter(&fcp_global_mutex);
1485 	pptr = fcp_port_head;
1486 	while (pptr) {
1487 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1488 			break;
1489 		} else {
1490 			pptr = pptr->port_next;
1491 		}
1492 	}
1493 	mutex_exit(&fcp_global_mutex);
1494 	if (pptr == NULL) {
1495 		return (ENXIO);
1496 	}
1497 	mutex_enter(&pptr->port_mutex);
1498 
1499 
1500 	if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1501 	    fioctl.listlen, KM_NOSLEEP)) == NULL) {
1502 		mutex_exit(&pptr->port_mutex);
1503 		return (ENOMEM);
1504 	}
1505 
1506 	if (ddi_copyin(fioctl.list, dev_data,
1507 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1508 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1509 		mutex_exit(&pptr->port_mutex);
1510 		return (EFAULT);
1511 	}
1512 	link_cnt = pptr->port_link_cnt;
1513 
1514 	if (cmd == FCP_TGT_INQUIRY) {
1515 		wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1516 		if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1517 		    sizeof (wwn_ptr->raw_wwn)) == 0) {
1518 			/* This ioctl is requesting INQ info of local HBA */
1519 			mutex_exit(&pptr->port_mutex);
1520 			dev_data[0].dev0_type = DTYPE_UNKNOWN;
1521 			dev_data[0].dev_status = 0;
1522 			if (ddi_copyout(dev_data, fioctl.list,
1523 			    (sizeof (struct device_data)) * fioctl.listlen,
1524 			    mode)) {
1525 				kmem_free(dev_data,
1526 				    sizeof (*dev_data) * fioctl.listlen);
1527 				return (EFAULT);
1528 			}
1529 			kmem_free(dev_data,
1530 			    sizeof (*dev_data) * fioctl.listlen);
1531 #ifdef	_MULTI_DATAMODEL
1532 			switch (ddi_model_convert_from(mode & FMODELS)) {
1533 			case DDI_MODEL_ILP32: {
1534 				struct fcp32_ioctl f32_ioctl;
1535 				f32_ioctl.fp_minor = fioctl.fp_minor;
1536 				f32_ioctl.listlen = fioctl.listlen;
1537 				f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1538 				if (ddi_copyout((void *)&f32_ioctl,
1539 				    (void *)data,
1540 				    sizeof (struct fcp32_ioctl), mode)) {
1541 					return (EFAULT);
1542 				}
1543 				break;
1544 			}
1545 			case DDI_MODEL_NONE:
1546 				if (ddi_copyout((void *)&fioctl, (void *)data,
1547 				    sizeof (struct fcp_ioctl), mode)) {
1548 					return (EFAULT);
1549 				}
1550 				break;
1551 			}
1552 #else	/* _MULTI_DATAMODEL */
1553 			if (ddi_copyout((void *)&fioctl, (void *)data,
1554 			    sizeof (struct fcp_ioctl), mode)) {
1555 				return (EFAULT);
1556 			}
1557 #endif	/* _MULTI_DATAMODEL */
1558 			return (0);
1559 		}
1560 	}
1561 
1562 	if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1563 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1564 		mutex_exit(&pptr->port_mutex);
1565 		return (ENXIO);
1566 	}
1567 
1568 	for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1569 	    i++) {
1570 		wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1571 
1572 		dev_data[i].dev0_type = DTYPE_UNKNOWN;
1573 
1574 
1575 		dev_data[i].dev_status = ENXIO;
1576 
1577 		if ((ptgt = fcp_lookup_target(pptr,
1578 		    (uchar_t *)wwn_ptr)) == NULL) {
1579 			mutex_exit(&pptr->port_mutex);
1580 			if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1581 			    wwn_ptr, &error, 0) == NULL) {
1582 				dev_data[i].dev_status = ENODEV;
1583 				mutex_enter(&pptr->port_mutex);
1584 				continue;
1585 			} else {
1586 
1587 				dev_data[i].dev_status = EAGAIN;
1588 
1589 				mutex_enter(&pptr->port_mutex);
1590 				continue;
1591 			}
1592 		} else {
1593 			mutex_enter(&ptgt->tgt_mutex);
1594 			if (ptgt->tgt_state & (FCP_TGT_MARK |
1595 			    FCP_TGT_BUSY)) {
1596 				dev_data[i].dev_status = EAGAIN;
1597 				mutex_exit(&ptgt->tgt_mutex);
1598 				continue;
1599 			}
1600 
1601 			if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1602 				if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1603 					dev_data[i].dev_status = ENOTSUP;
1604 				} else {
1605 					dev_data[i].dev_status = ENXIO;
1606 				}
1607 				mutex_exit(&ptgt->tgt_mutex);
1608 				continue;
1609 			}
1610 
1611 			switch (cmd) {
1612 			case FCP_TGT_INQUIRY:
1613 				/*
1614 				 * The reason we give device type of
1615 				 * lun 0 only even though in some
1616 				 * cases(like maxstrat) lun 0 device
1617 				 * type may be 0x3f(invalid) is that
1618 				 * for bridge boxes target will appear
1619 				 * as luns and the first lun could be
1620 				 * a device that utility may not care
1621 				 * about (like a tape device).
1622 				 */
1623 				dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1624 				dev_data[i].dev_status = 0;
1625 				mutex_exit(&ptgt->tgt_mutex);
1626 
1627 				if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1628 					dev_data[i].dev0_type = DTYPE_UNKNOWN;
1629 				} else {
1630 					dev_data[i].dev0_type = plun->lun_type;
1631 				}
1632 				mutex_enter(&ptgt->tgt_mutex);
1633 				break;
1634 
1635 			case FCP_TGT_CREATE:
1636 				mutex_exit(&ptgt->tgt_mutex);
1637 				mutex_exit(&pptr->port_mutex);
1638 
1639 				/*
1640 				 * serialize state change call backs.
1641 				 * only one call back will be handled
1642 				 * at a time.
1643 				 */
1644 				mutex_enter(&fcp_global_mutex);
1645 				if (fcp_oflag & FCP_BUSY) {
1646 					mutex_exit(&fcp_global_mutex);
1647 					if (dev_data) {
1648 						kmem_free(dev_data,
1649 						    sizeof (*dev_data) *
1650 						    fioctl.listlen);
1651 					}
1652 					return (EBUSY);
1653 				}
1654 				fcp_oflag |= FCP_BUSY;
1655 				mutex_exit(&fcp_global_mutex);
1656 
1657 				dev_data[i].dev_status =
1658 				    fcp_create_on_demand(pptr,
1659 				    wwn_ptr->raw_wwn);
1660 
1661 				if (dev_data[i].dev_status != 0) {
1662 					char	buf[25];
1663 
1664 					for (i = 0; i < FC_WWN_SIZE; i++) {
1665 						(void) sprintf(&buf[i << 1],
1666 						    "%02x",
1667 						    wwn_ptr->raw_wwn[i]);
1668 					}
1669 
1670 					fcp_log(CE_WARN, pptr->port_dip,
1671 					    "!Failed to create nodes for"
1672 					    " pwwn=%s; error=%x", buf,
1673 					    dev_data[i].dev_status);
1674 				}
1675 
1676 				/* allow state change call backs again */
1677 				mutex_enter(&fcp_global_mutex);
1678 				fcp_oflag &= ~FCP_BUSY;
1679 				mutex_exit(&fcp_global_mutex);
1680 
1681 				mutex_enter(&pptr->port_mutex);
1682 				mutex_enter(&ptgt->tgt_mutex);
1683 
1684 				break;
1685 
1686 			case FCP_TGT_DELETE:
1687 				break;
1688 
1689 			default:
1690 				fcp_log(CE_WARN, pptr->port_dip,
1691 				    "!Invalid device data ioctl "
1692 				    "opcode = 0x%x", cmd);
1693 			}
1694 			mutex_exit(&ptgt->tgt_mutex);
1695 		}
1696 	}
1697 	mutex_exit(&pptr->port_mutex);
1698 
1699 	if (ddi_copyout(dev_data, fioctl.list,
1700 	    (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1701 		kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1702 		return (EFAULT);
1703 	}
1704 	kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1705 
1706 #ifdef	_MULTI_DATAMODEL
1707 	switch (ddi_model_convert_from(mode & FMODELS)) {
1708 	case DDI_MODEL_ILP32: {
1709 		struct fcp32_ioctl f32_ioctl;
1710 
1711 		f32_ioctl.fp_minor = fioctl.fp_minor;
1712 		f32_ioctl.listlen = fioctl.listlen;
1713 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1714 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1715 		    sizeof (struct fcp32_ioctl), mode)) {
1716 			return (EFAULT);
1717 		}
1718 		break;
1719 	}
1720 	case DDI_MODEL_NONE:
1721 		if (ddi_copyout((void *)&fioctl, (void *)data,
1722 		    sizeof (struct fcp_ioctl), mode)) {
1723 			return (EFAULT);
1724 		}
1725 		break;
1726 	}
1727 #else	/* _MULTI_DATAMODEL */
1728 
1729 	if (ddi_copyout((void *)&fioctl, (void *)data,
1730 	    sizeof (struct fcp_ioctl), mode)) {
1731 		return (EFAULT);
1732 	}
1733 #endif	/* _MULTI_DATAMODEL */
1734 
1735 	return (0);
1736 }
1737 
1738 /*
1739  * Fetch the target mappings (path, etc.) for all LUNs
1740  * on this port.
1741  */
1742 /* ARGSUSED */
1743 static int
fcp_get_target_mappings(struct fcp_ioctl * data,int mode,int * rval)1744 fcp_get_target_mappings(struct fcp_ioctl *data,
1745     int mode, int *rval)
1746 {
1747 	struct fcp_port	    *pptr;
1748 	fc_hba_target_mappings_t    *mappings;
1749 	fc_hba_mapping_entry_t	    *map;
1750 	struct fcp_tgt	    *ptgt = NULL;
1751 	struct fcp_lun	    *plun = NULL;
1752 	int			    i, mapIndex, mappingSize;
1753 	int			    listlen;
1754 	struct fcp_ioctl	    fioctl;
1755 	char			    *path;
1756 	fcp_ent_addr_t		    sam_lun_addr;
1757 
1758 #ifdef	_MULTI_DATAMODEL
1759 	switch (ddi_model_convert_from(mode & FMODELS)) {
1760 	case DDI_MODEL_ILP32: {
1761 		struct fcp32_ioctl f32_ioctl;
1762 
1763 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1764 		    sizeof (struct fcp32_ioctl), mode)) {
1765 			return (EFAULT);
1766 		}
1767 		fioctl.fp_minor = f32_ioctl.fp_minor;
1768 		fioctl.listlen = f32_ioctl.listlen;
1769 		fioctl.list = (caddr_t)(long)f32_ioctl.list;
1770 		break;
1771 	}
1772 	case DDI_MODEL_NONE:
1773 		if (ddi_copyin((void *)data, (void *)&fioctl,
1774 		    sizeof (struct fcp_ioctl), mode)) {
1775 			return (EFAULT);
1776 		}
1777 		break;
1778 	}
1779 
1780 #else	/* _MULTI_DATAMODEL */
1781 	if (ddi_copyin((void *)data, (void *)&fioctl,
1782 	    sizeof (struct fcp_ioctl), mode)) {
1783 		return (EFAULT);
1784 	}
1785 #endif	/* _MULTI_DATAMODEL */
1786 
1787 	/*
1788 	 * Right now we can assume that the minor number matches with
1789 	 * this instance of fp. If this changes we will need to
1790 	 * revisit this logic.
1791 	 */
1792 	mutex_enter(&fcp_global_mutex);
1793 	pptr = fcp_port_head;
1794 	while (pptr) {
1795 		if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1796 			break;
1797 		} else {
1798 			pptr = pptr->port_next;
1799 		}
1800 	}
1801 	mutex_exit(&fcp_global_mutex);
1802 	if (pptr == NULL) {
1803 		cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1804 		    fioctl.fp_minor);
1805 		return (ENXIO);
1806 	}
1807 
1808 
1809 	/* We use listlen to show the total buffer size */
1810 	mappingSize = fioctl.listlen;
1811 
1812 	/* Now calculate how many mapping entries will fit */
1813 	listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1814 	    - sizeof (fc_hba_target_mappings_t);
1815 	if (listlen <= 0) {
1816 		cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1817 		return (ENXIO);
1818 	}
1819 	listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1820 
1821 	if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1822 		return (ENOMEM);
1823 	}
1824 	mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1825 
1826 	/* Now get to work */
1827 	mapIndex = 0;
1828 
1829 	mutex_enter(&pptr->port_mutex);
1830 	/* Loop through all targets on this port */
1831 	for (i = 0; i < FCP_NUM_HASH; i++) {
1832 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1833 		    ptgt = ptgt->tgt_next) {
1834 
1835 			mutex_enter(&ptgt->tgt_mutex);
1836 
1837 			/* Loop through all LUNs on this target */
1838 			for (plun = ptgt->tgt_lun; plun != NULL;
1839 			    plun = plun->lun_next) {
1840 				if (plun->lun_state & FCP_LUN_OFFLINE) {
1841 					continue;
1842 				}
1843 
1844 				path = fcp_get_lun_path(plun);
1845 				if (path == NULL) {
1846 					continue;
1847 				}
1848 
1849 				if (mapIndex >= listlen) {
1850 					mapIndex ++;
1851 					kmem_free(path, MAXPATHLEN);
1852 					continue;
1853 				}
1854 				map = &mappings->entries[mapIndex++];
1855 				bcopy(path, map->targetDriver,
1856 				    sizeof (map->targetDriver));
1857 				map->d_id = ptgt->tgt_d_id;
1858 				map->busNumber = 0;
1859 				map->targetNumber = ptgt->tgt_d_id;
1860 				map->osLUN = plun->lun_num;
1861 
1862 				/*
1863 				 * We had swapped lun when we stored it in
1864 				 * lun_addr. We need to swap it back before
1865 				 * returning it to user land
1866 				 */
1867 
1868 				sam_lun_addr.ent_addr_0 =
1869 				    BE_16(plun->lun_addr.ent_addr_0);
1870 				sam_lun_addr.ent_addr_1 =
1871 				    BE_16(plun->lun_addr.ent_addr_1);
1872 				sam_lun_addr.ent_addr_2 =
1873 				    BE_16(plun->lun_addr.ent_addr_2);
1874 				sam_lun_addr.ent_addr_3 =
1875 				    BE_16(plun->lun_addr.ent_addr_3);
1876 
1877 				bcopy(&sam_lun_addr, &map->samLUN,
1878 				    FCP_LUN_SIZE);
1879 				bcopy(ptgt->tgt_node_wwn.raw_wwn,
1880 				    map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1881 				bcopy(ptgt->tgt_port_wwn.raw_wwn,
1882 				    map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1883 
1884 				if (plun->lun_guid) {
1885 
1886 					/* convert ascii wwn to bytes */
1887 					fcp_ascii_to_wwn(plun->lun_guid,
1888 					    map->guid, sizeof (map->guid));
1889 
1890 					if ((sizeof (map->guid)) <
1891 					    plun->lun_guid_size / 2) {
1892 						cmn_err(CE_WARN,
1893 						    "fcp_get_target_mappings:"
1894 						    "guid copy space "
1895 						    "insufficient."
1896 						    "Copy Truncation - "
1897 						    "available %d; need %d",
1898 						    (int)sizeof (map->guid),
1899 						    (int)
1900 						    plun->lun_guid_size / 2);
1901 					}
1902 				}
1903 				kmem_free(path, MAXPATHLEN);
1904 			}
1905 			mutex_exit(&ptgt->tgt_mutex);
1906 		}
1907 	}
1908 	mutex_exit(&pptr->port_mutex);
1909 	mappings->numLuns = mapIndex;
1910 
1911 	if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1912 		kmem_free(mappings, mappingSize);
1913 		return (EFAULT);
1914 	}
1915 	kmem_free(mappings, mappingSize);
1916 
1917 #ifdef	_MULTI_DATAMODEL
1918 	switch (ddi_model_convert_from(mode & FMODELS)) {
1919 	case DDI_MODEL_ILP32: {
1920 		struct fcp32_ioctl f32_ioctl;
1921 
1922 		f32_ioctl.fp_minor = fioctl.fp_minor;
1923 		f32_ioctl.listlen = fioctl.listlen;
1924 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1925 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1926 		    sizeof (struct fcp32_ioctl), mode)) {
1927 			return (EFAULT);
1928 		}
1929 		break;
1930 	}
1931 	case DDI_MODEL_NONE:
1932 		if (ddi_copyout((void *)&fioctl, (void *)data,
1933 		    sizeof (struct fcp_ioctl), mode)) {
1934 			return (EFAULT);
1935 		}
1936 		break;
1937 	}
1938 #else	/* _MULTI_DATAMODEL */
1939 
1940 	if (ddi_copyout((void *)&fioctl, (void *)data,
1941 	    sizeof (struct fcp_ioctl), mode)) {
1942 		return (EFAULT);
1943 	}
1944 #endif	/* _MULTI_DATAMODEL */
1945 
1946 	return (0);
1947 }
1948 
1949 /*
1950  * fcp_setup_scsi_ioctl
1951  *	Setup handler for the "scsi passthru" style of
1952  *	ioctl for FCP.	See "fcp_util.h" for data structure
1953  *	definition.
1954  *
1955  * Input:
1956  *	u_fscsi	= ioctl data (user address space)
1957  *	mode	= See ioctl(9E)
1958  *
1959  * Output:
1960  *	u_fscsi	= ioctl data (user address space)
1961  *	rval	= return value - see ioctl(9E)
1962  *
1963  * Returns:
1964  *	0	= OK
1965  *	EAGAIN	= See errno.h
1966  *	EBUSY	= See errno.h
1967  *	EFAULT	= See errno.h
1968  *	EINTR	= See errno.h
1969  *	EINVAL	= See errno.h
1970  *	EIO	= See errno.h
1971  *	ENOMEM	= See errno.h
1972  *	ENXIO	= See errno.h
1973  *
1974  * Context:
1975  *	Kernel context.
1976  */
1977 /* ARGSUSED */
1978 static int
fcp_setup_scsi_ioctl(struct fcp_scsi_cmd * u_fscsi,int mode,int * rval)1979 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1980     int mode, int *rval)
1981 {
1982 	int			ret		= 0;
1983 	int			temp_ret;
1984 	caddr_t			k_cdbbufaddr	= NULL;
1985 	caddr_t			k_bufaddr	= NULL;
1986 	caddr_t			k_rqbufaddr	= NULL;
1987 	caddr_t			u_cdbbufaddr;
1988 	caddr_t			u_bufaddr;
1989 	caddr_t			u_rqbufaddr;
1990 	struct fcp_scsi_cmd	k_fscsi;
1991 
1992 	/*
1993 	 * Get fcp_scsi_cmd array element from user address space
1994 	 */
1995 	if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1996 	    != 0) {
1997 		return (ret);
1998 	}
1999 
2000 
2001 	/*
2002 	 * Even though kmem_alloc() checks the validity of the
2003 	 * buffer length, this check is needed when the
2004 	 * kmem_flags set and the zero buffer length is passed.
2005 	 */
2006 	if ((k_fscsi.scsi_cdblen <= 0) ||
2007 	    (k_fscsi.scsi_buflen <= 0) ||
2008 	    (k_fscsi.scsi_rqlen <= 0)) {
2009 		return (EINVAL);
2010 	}
2011 
2012 	/*
2013 	 * Allocate data for fcp_scsi_cmd pointer fields
2014 	 */
2015 	if (ret == 0) {
2016 		k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2017 		k_bufaddr    = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2018 		k_rqbufaddr  = kmem_alloc(k_fscsi.scsi_rqlen,  KM_NOSLEEP);
2019 
2020 		if (k_cdbbufaddr == NULL ||
2021 		    k_bufaddr	 == NULL ||
2022 		    k_rqbufaddr	 == NULL) {
2023 			ret = ENOMEM;
2024 		}
2025 	}
2026 
2027 	/*
2028 	 * Get fcp_scsi_cmd pointer fields from user
2029 	 * address space
2030 	 */
2031 	if (ret == 0) {
2032 		u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2033 		u_bufaddr    = k_fscsi.scsi_bufaddr;
2034 		u_rqbufaddr  = k_fscsi.scsi_rqbufaddr;
2035 
2036 		if (ddi_copyin(u_cdbbufaddr,
2037 		    k_cdbbufaddr,
2038 		    k_fscsi.scsi_cdblen,
2039 		    mode)) {
2040 			ret = EFAULT;
2041 		} else if (ddi_copyin(u_bufaddr,
2042 		    k_bufaddr,
2043 		    k_fscsi.scsi_buflen,
2044 		    mode)) {
2045 			ret = EFAULT;
2046 		} else if (ddi_copyin(u_rqbufaddr,
2047 		    k_rqbufaddr,
2048 		    k_fscsi.scsi_rqlen,
2049 		    mode)) {
2050 			ret = EFAULT;
2051 		}
2052 	}
2053 
2054 	/*
2055 	 * Send scsi command (blocking)
2056 	 */
2057 	if (ret == 0) {
2058 		/*
2059 		 * Prior to sending the scsi command, the
2060 		 * fcp_scsi_cmd data structure must contain kernel,
2061 		 * not user, addresses.
2062 		 */
2063 		k_fscsi.scsi_cdbbufaddr	= k_cdbbufaddr;
2064 		k_fscsi.scsi_bufaddr	= k_bufaddr;
2065 		k_fscsi.scsi_rqbufaddr	= k_rqbufaddr;
2066 
2067 		ret = fcp_send_scsi_ioctl(&k_fscsi);
2068 
2069 		/*
2070 		 * After sending the scsi command, the
2071 		 * fcp_scsi_cmd data structure must contain user,
2072 		 * not kernel, addresses.
2073 		 */
2074 		k_fscsi.scsi_cdbbufaddr	= u_cdbbufaddr;
2075 		k_fscsi.scsi_bufaddr	= u_bufaddr;
2076 		k_fscsi.scsi_rqbufaddr	= u_rqbufaddr;
2077 	}
2078 
2079 	/*
2080 	 * Put fcp_scsi_cmd pointer fields to user address space
2081 	 */
2082 	if (ret == 0) {
2083 		if (ddi_copyout(k_cdbbufaddr,
2084 		    u_cdbbufaddr,
2085 		    k_fscsi.scsi_cdblen,
2086 		    mode)) {
2087 			ret = EFAULT;
2088 		} else if (ddi_copyout(k_bufaddr,
2089 		    u_bufaddr,
2090 		    k_fscsi.scsi_buflen,
2091 		    mode)) {
2092 			ret = EFAULT;
2093 		} else if (ddi_copyout(k_rqbufaddr,
2094 		    u_rqbufaddr,
2095 		    k_fscsi.scsi_rqlen,
2096 		    mode)) {
2097 			ret = EFAULT;
2098 		}
2099 	}
2100 
2101 	/*
2102 	 * Free data for fcp_scsi_cmd pointer fields
2103 	 */
2104 	if (k_cdbbufaddr != NULL) {
2105 		kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2106 	}
2107 	if (k_bufaddr != NULL) {
2108 		kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2109 	}
2110 	if (k_rqbufaddr != NULL) {
2111 		kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2112 	}
2113 
2114 	/*
2115 	 * Put fcp_scsi_cmd array element to user address space
2116 	 */
2117 	temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2118 	if (temp_ret != 0) {
2119 		ret = temp_ret;
2120 	}
2121 
2122 	/*
2123 	 * Return status
2124 	 */
2125 	return (ret);
2126 }
2127 
2128 
2129 /*
2130  * fcp_copyin_scsi_cmd
2131  *	Copy in fcp_scsi_cmd data structure from user address space.
2132  *	The data may be in 32 bit or 64 bit modes.
2133  *
2134  * Input:
2135  *	base_addr	= from address (user address space)
2136  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2137  *
2138  * Output:
2139  *	fscsi		= to address (kernel address space)
2140  *
2141  * Returns:
2142  *	0	= OK
2143  *	EFAULT	= Error
2144  *
2145  * Context:
2146  *	Kernel context.
2147  */
2148 static int
fcp_copyin_scsi_cmd(caddr_t base_addr,struct fcp_scsi_cmd * fscsi,int mode)2149 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2150 {
2151 #ifdef	_MULTI_DATAMODEL
2152 	struct fcp32_scsi_cmd	f32scsi;
2153 
2154 	switch (ddi_model_convert_from(mode & FMODELS)) {
2155 	case DDI_MODEL_ILP32:
2156 		/*
2157 		 * Copy data from user address space
2158 		 */
2159 		if (ddi_copyin((void *)base_addr,
2160 		    &f32scsi,
2161 		    sizeof (struct fcp32_scsi_cmd),
2162 		    mode)) {
2163 			return (EFAULT);
2164 		}
2165 		/*
2166 		 * Convert from 32 bit to 64 bit
2167 		 */
2168 		FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2169 		break;
2170 	case DDI_MODEL_NONE:
2171 		/*
2172 		 * Copy data from user address space
2173 		 */
2174 		if (ddi_copyin((void *)base_addr,
2175 		    fscsi,
2176 		    sizeof (struct fcp_scsi_cmd),
2177 		    mode)) {
2178 			return (EFAULT);
2179 		}
2180 		break;
2181 	}
2182 #else	/* _MULTI_DATAMODEL */
2183 	/*
2184 	 * Copy data from user address space
2185 	 */
2186 	if (ddi_copyin((void *)base_addr,
2187 	    fscsi,
2188 	    sizeof (struct fcp_scsi_cmd),
2189 	    mode)) {
2190 		return (EFAULT);
2191 	}
2192 #endif	/* _MULTI_DATAMODEL */
2193 
2194 	return (0);
2195 }
2196 
2197 
2198 /*
2199  * fcp_copyout_scsi_cmd
2200  *	Copy out fcp_scsi_cmd data structure to user address space.
2201  *	The data may be in 32 bit or 64 bit modes.
2202  *
2203  * Input:
2204  *	fscsi		= to address (kernel address space)
2205  *	mode		= See ioctl(9E) and ddi_copyin(9F)
2206  *
2207  * Output:
2208  *	base_addr	= from address (user address space)
2209  *
2210  * Returns:
2211  *	0	= OK
2212  *	EFAULT	= Error
2213  *
2214  * Context:
2215  *	Kernel context.
2216  */
2217 static int
fcp_copyout_scsi_cmd(struct fcp_scsi_cmd * fscsi,caddr_t base_addr,int mode)2218 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2219 {
2220 #ifdef	_MULTI_DATAMODEL
2221 	struct fcp32_scsi_cmd	f32scsi;
2222 
2223 	switch (ddi_model_convert_from(mode & FMODELS)) {
2224 	case DDI_MODEL_ILP32:
2225 		/*
2226 		 * Convert from 64 bit to 32 bit
2227 		 */
2228 		FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2229 		/*
2230 		 * Copy data to user address space
2231 		 */
2232 		if (ddi_copyout(&f32scsi,
2233 		    (void *)base_addr,
2234 		    sizeof (struct fcp32_scsi_cmd),
2235 		    mode)) {
2236 			return (EFAULT);
2237 		}
2238 		break;
2239 	case DDI_MODEL_NONE:
2240 		/*
2241 		 * Copy data to user address space
2242 		 */
2243 		if (ddi_copyout(fscsi,
2244 		    (void *)base_addr,
2245 		    sizeof (struct fcp_scsi_cmd),
2246 		    mode)) {
2247 			return (EFAULT);
2248 		}
2249 		break;
2250 	}
2251 #else	/* _MULTI_DATAMODEL */
2252 	/*
2253 	 * Copy data to user address space
2254 	 */
2255 	if (ddi_copyout(fscsi,
2256 	    (void *)base_addr,
2257 	    sizeof (struct fcp_scsi_cmd),
2258 	    mode)) {
2259 		return (EFAULT);
2260 	}
2261 #endif	/* _MULTI_DATAMODEL */
2262 
2263 	return (0);
2264 }
2265 
2266 
2267 /*
2268  * fcp_send_scsi_ioctl
2269  *	Sends the SCSI command in blocking mode.
2270  *
2271  * Input:
2272  *	fscsi		= SCSI command data structure
2273  *
2274  * Output:
2275  *	fscsi		= SCSI command data structure
2276  *
2277  * Returns:
2278  *	0	= OK
2279  *	EAGAIN	= See errno.h
2280  *	EBUSY	= See errno.h
2281  *	EINTR	= See errno.h
2282  *	EINVAL	= See errno.h
2283  *	EIO	= See errno.h
2284  *	ENOMEM	= See errno.h
2285  *	ENXIO	= See errno.h
2286  *
2287  * Context:
2288  *	Kernel context.
2289  */
2290 static int
fcp_send_scsi_ioctl(struct fcp_scsi_cmd * fscsi)2291 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2292 {
2293 	struct fcp_lun	*plun		= NULL;
2294 	struct fcp_port	*pptr		= NULL;
2295 	struct fcp_tgt	*ptgt		= NULL;
2296 	fc_packet_t		*fpkt		= NULL;
2297 	struct fcp_ipkt	*icmd		= NULL;
2298 	int			target_created	= FALSE;
2299 	fc_frame_hdr_t		*hp;
2300 	struct fcp_cmd		fcp_cmd;
2301 	struct fcp_cmd		*fcmd;
2302 	union scsi_cdb		*scsi_cdb;
2303 	la_wwn_t		*wwn_ptr;
2304 	int			nodma;
2305 	struct fcp_rsp		*rsp;
2306 	struct fcp_rsp_info	*rsp_info;
2307 	caddr_t			rsp_sense;
2308 	int			buf_len;
2309 	int			info_len;
2310 	int			sense_len;
2311 	struct scsi_extended_sense	*sense_to = NULL;
2312 	timeout_id_t		tid;
2313 	uint8_t			reconfig_lun = FALSE;
2314 	uint8_t			reconfig_pending = FALSE;
2315 	uint8_t			scsi_cmd;
2316 	int			rsp_len;
2317 	int			cmd_index;
2318 	int			fc_status;
2319 	int			pkt_state;
2320 	int			pkt_action;
2321 	int			pkt_reason;
2322 	int			ret, xport_retval = ~FC_SUCCESS;
2323 	int			lcount;
2324 	int			tcount;
2325 	int			reconfig_status;
2326 	int			port_busy = FALSE;
2327 	uchar_t			*lun_string;
2328 
2329 	/*
2330 	 * Check valid SCSI command
2331 	 */
2332 	scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2333 	ret = EINVAL;
2334 	for (cmd_index = 0;
2335 	    cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2336 	    ret != 0;
2337 	    cmd_index++) {
2338 		/*
2339 		 * First byte of CDB is the SCSI command
2340 		 */
2341 		if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2342 			ret = 0;
2343 		}
2344 	}
2345 
2346 	/*
2347 	 * Check inputs
2348 	 */
2349 	if (fscsi->scsi_flags != FCP_SCSI_READ) {
2350 		ret = EINVAL;
2351 	} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2352 		/* no larger than */
2353 		ret = EINVAL;
2354 	}
2355 
2356 
2357 	/*
2358 	 * Find FC port
2359 	 */
2360 	if (ret == 0) {
2361 		/*
2362 		 * Acquire global mutex
2363 		 */
2364 		mutex_enter(&fcp_global_mutex);
2365 
2366 		pptr = fcp_port_head;
2367 		while (pptr) {
2368 			if (pptr->port_instance ==
2369 			    (uint32_t)fscsi->scsi_fc_port_num) {
2370 				break;
2371 			} else {
2372 				pptr = pptr->port_next;
2373 			}
2374 		}
2375 
2376 		if (pptr == NULL) {
2377 			ret = ENXIO;
2378 		} else {
2379 			/*
2380 			 * fc_ulp_busy_port can raise power
2381 			 *  so, we must not hold any mutexes involved in PM
2382 			 */
2383 			mutex_exit(&fcp_global_mutex);
2384 			ret = fc_ulp_busy_port(pptr->port_fp_handle);
2385 		}
2386 
2387 		if (ret == 0) {
2388 
2389 			/* remember port is busy, so we will release later */
2390 			port_busy = TRUE;
2391 
2392 			/*
2393 			 * If there is a reconfiguration in progress, wait
2394 			 * for it to complete.
2395 			 */
2396 
2397 			fcp_reconfig_wait(pptr);
2398 
2399 			/* reacquire mutexes in order */
2400 			mutex_enter(&fcp_global_mutex);
2401 			mutex_enter(&pptr->port_mutex);
2402 
2403 			/*
2404 			 * Will port accept DMA?
2405 			 */
2406 			nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2407 			    ? 1 : 0;
2408 
2409 			/*
2410 			 * If init or offline, device not known
2411 			 *
2412 			 * If we are discovering (onlining), we can
2413 			 * NOT obviously provide reliable data about
2414 			 * devices until it is complete
2415 			 */
2416 			if (pptr->port_state &	  (FCP_STATE_INIT |
2417 			    FCP_STATE_OFFLINE)) {
2418 				ret = ENXIO;
2419 			} else if (pptr->port_state & FCP_STATE_ONLINING) {
2420 				ret = EBUSY;
2421 			} else {
2422 				/*
2423 				 * Find target from pwwn
2424 				 *
2425 				 * The wwn must be put into a local
2426 				 * variable to ensure alignment.
2427 				 */
2428 				wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2429 				ptgt = fcp_lookup_target(pptr,
2430 				    (uchar_t *)wwn_ptr);
2431 
2432 				/*
2433 				 * If target not found,
2434 				 */
2435 				if (ptgt == NULL) {
2436 					/*
2437 					 * Note: Still have global &
2438 					 * port mutexes
2439 					 */
2440 					mutex_exit(&pptr->port_mutex);
2441 					ptgt = fcp_port_create_tgt(pptr,
2442 					    wwn_ptr, &ret, &fc_status,
2443 					    &pkt_state, &pkt_action,
2444 					    &pkt_reason);
2445 					mutex_enter(&pptr->port_mutex);
2446 
2447 					fscsi->scsi_fc_status  = fc_status;
2448 					fscsi->scsi_pkt_state  =
2449 					    (uchar_t)pkt_state;
2450 					fscsi->scsi_pkt_reason = pkt_reason;
2451 					fscsi->scsi_pkt_action =
2452 					    (uchar_t)pkt_action;
2453 
2454 					if (ptgt != NULL) {
2455 						target_created = TRUE;
2456 					} else if (ret == 0) {
2457 						ret = ENOMEM;
2458 					}
2459 				}
2460 
2461 				if (ret == 0) {
2462 					/*
2463 					 * Acquire target
2464 					 */
2465 					mutex_enter(&ptgt->tgt_mutex);
2466 
2467 					/*
2468 					 * If target is mark or busy,
2469 					 * then target can not be used
2470 					 */
2471 					if (ptgt->tgt_state &
2472 					    (FCP_TGT_MARK |
2473 					    FCP_TGT_BUSY)) {
2474 						ret = EBUSY;
2475 					} else {
2476 						/*
2477 						 * Mark target as busy
2478 						 */
2479 						ptgt->tgt_state |=
2480 						    FCP_TGT_BUSY;
2481 					}
2482 
2483 					/*
2484 					 * Release target
2485 					 */
2486 					lcount = pptr->port_link_cnt;
2487 					tcount = ptgt->tgt_change_cnt;
2488 					mutex_exit(&ptgt->tgt_mutex);
2489 				}
2490 			}
2491 
2492 			/*
2493 			 * Release port
2494 			 */
2495 			mutex_exit(&pptr->port_mutex);
2496 		}
2497 
2498 		/*
2499 		 * Release global mutex
2500 		 */
2501 		mutex_exit(&fcp_global_mutex);
2502 	}
2503 
2504 	if (ret == 0) {
2505 		uint64_t belun = BE_64(fscsi->scsi_lun);
2506 
2507 		/*
2508 		 * If it's a target device, find lun from pwwn
2509 		 * The wwn must be put into a local
2510 		 * variable to ensure alignment.
2511 		 */
2512 		mutex_enter(&pptr->port_mutex);
2513 		wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2514 		if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2515 			/* this is not a target */
2516 			fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2517 			ret = ENXIO;
2518 		} else if ((belun << 16) != 0) {
2519 			/*
2520 			 * Since fcp only support PD and LU addressing method
2521 			 * so far, the last 6 bytes of a valid LUN are expected
2522 			 * to be filled with 00h.
2523 			 */
2524 			fscsi->scsi_fc_status = FC_INVALID_LUN;
2525 			cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2526 			    " method 0x%02x with LUN number 0x%016" PRIx64,
2527 			    (uint8_t)(belun >> 62), belun);
2528 			ret = ENXIO;
2529 		} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2530 		    (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2531 			/*
2532 			 * This is a SCSI target, but no LUN at this
2533 			 * address.
2534 			 *
2535 			 * In the future, we may want to send this to
2536 			 * the target, and let it respond
2537 			 * appropriately
2538 			 */
2539 			ret = ENXIO;
2540 		}
2541 		mutex_exit(&pptr->port_mutex);
2542 	}
2543 
2544 	/*
2545 	 * Finished grabbing external resources
2546 	 * Allocate internal packet (icmd)
2547 	 */
2548 	if (ret == 0) {
2549 		/*
2550 		 * Calc rsp len assuming rsp info included
2551 		 */
2552 		rsp_len = sizeof (struct fcp_rsp) +
2553 		    sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2554 
2555 		icmd = fcp_icmd_alloc(pptr, ptgt,
2556 		    sizeof (struct fcp_cmd),
2557 		    rsp_len,
2558 		    fscsi->scsi_buflen,
2559 		    nodma,
2560 		    lcount,			/* ipkt_link_cnt */
2561 		    tcount,			/* ipkt_change_cnt */
2562 		    0,				/* cause */
2563 		    FC_INVALID_RSCN_COUNT);	/* invalidate the count */
2564 
2565 		if (icmd == NULL) {
2566 			ret = ENOMEM;
2567 		} else {
2568 			/*
2569 			 * Setup internal packet as sema sync
2570 			 */
2571 			fcp_ipkt_sema_init(icmd);
2572 		}
2573 	}
2574 
2575 	if (ret == 0) {
2576 		/*
2577 		 * Init fpkt pointer for use.
2578 		 */
2579 
2580 		fpkt = icmd->ipkt_fpkt;
2581 
2582 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
2583 		fpkt->pkt_tran_type	= FC_PKT_FCP_READ; /* only rd for now */
2584 		fpkt->pkt_timeout	= fscsi->scsi_timeout;
2585 
2586 		/*
2587 		 * Init fcmd pointer for use by SCSI command
2588 		 */
2589 
2590 		if (nodma) {
2591 			fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2592 		} else {
2593 			fcmd = &fcp_cmd;
2594 		}
2595 		bzero(fcmd, sizeof (struct fcp_cmd));
2596 		ptgt = plun->lun_tgt;
2597 
2598 		lun_string = (uchar_t *)&fscsi->scsi_lun;
2599 
2600 		fcmd->fcp_ent_addr.ent_addr_0 =
2601 		    BE_16(*(uint16_t *)&(lun_string[0]));
2602 		fcmd->fcp_ent_addr.ent_addr_1 =
2603 		    BE_16(*(uint16_t *)&(lun_string[2]));
2604 		fcmd->fcp_ent_addr.ent_addr_2 =
2605 		    BE_16(*(uint16_t *)&(lun_string[4]));
2606 		fcmd->fcp_ent_addr.ent_addr_3 =
2607 		    BE_16(*(uint16_t *)&(lun_string[6]));
2608 
2609 		/*
2610 		 * Setup internal packet(icmd)
2611 		 */
2612 		icmd->ipkt_lun		= plun;
2613 		icmd->ipkt_restart	= 0;
2614 		icmd->ipkt_retries	= 0;
2615 		icmd->ipkt_opcode	= 0;
2616 
2617 		/*
2618 		 * Init the frame HEADER Pointer for use
2619 		 */
2620 		hp = &fpkt->pkt_cmd_fhdr;
2621 
2622 		hp->s_id	= pptr->port_id;
2623 		hp->d_id	= ptgt->tgt_d_id;
2624 		hp->r_ctl	= R_CTL_COMMAND;
2625 		hp->type	= FC_TYPE_SCSI_FCP;
2626 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2627 		hp->rsvd	= 0;
2628 		hp->seq_id	= 0;
2629 		hp->seq_cnt	= 0;
2630 		hp->ox_id	= 0xffff;
2631 		hp->rx_id	= 0xffff;
2632 		hp->ro		= 0;
2633 
2634 		fcmd->fcp_cntl.cntl_qtype	= FCP_QTYPE_SIMPLE;
2635 		fcmd->fcp_cntl.cntl_read_data	= 1;	/* only rd for now */
2636 		fcmd->fcp_cntl.cntl_write_data	= 0;
2637 		fcmd->fcp_data_len	= fscsi->scsi_buflen;
2638 
2639 		scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2640 		bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2641 		    fscsi->scsi_cdblen);
2642 
2643 		if (!nodma) {
2644 			FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2645 			    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2646 		}
2647 
2648 		/*
2649 		 * Send SCSI command to FC transport
2650 		 */
2651 
2652 		if (ret == 0) {
2653 			mutex_enter(&ptgt->tgt_mutex);
2654 
2655 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2656 				mutex_exit(&ptgt->tgt_mutex);
2657 				fscsi->scsi_fc_status = xport_retval =
2658 				    fc_ulp_transport(pptr->port_fp_handle,
2659 				    fpkt);
2660 				if (fscsi->scsi_fc_status != FC_SUCCESS) {
2661 					ret = EIO;
2662 				}
2663 			} else {
2664 				mutex_exit(&ptgt->tgt_mutex);
2665 				ret = EBUSY;
2666 			}
2667 		}
2668 	}
2669 
2670 	/*
2671 	 * Wait for completion only if fc_ulp_transport was called and it
2672 	 * returned a success. This is the only time callback will happen.
2673 	 * Otherwise, there is no point in waiting
2674 	 */
2675 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2676 		ret = fcp_ipkt_sema_wait(icmd);
2677 	}
2678 
2679 	/*
2680 	 * Copy data to IOCTL data structures
2681 	 */
2682 	rsp = NULL;
2683 	if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2684 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2685 
2686 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2687 			fcp_log(CE_WARN, pptr->port_dip,
2688 			    "!SCSI command to d_id=0x%x lun=0x%x"
2689 			    " failed, Bad FCP response values:"
2690 			    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2691 			    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2692 			    ptgt->tgt_d_id, plun->lun_num,
2693 			    rsp->reserved_0, rsp->reserved_1,
2694 			    rsp->fcp_u.fcp_status.reserved_0,
2695 			    rsp->fcp_u.fcp_status.reserved_1,
2696 			    rsp->fcp_response_len, rsp->fcp_sense_len);
2697 
2698 			ret = EIO;
2699 		}
2700 	}
2701 
2702 	if ((ret == 0) && (rsp != NULL)) {
2703 		/*
2704 		 * Calc response lengths
2705 		 */
2706 		sense_len = 0;
2707 		info_len = 0;
2708 
2709 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
2710 			info_len = rsp->fcp_response_len;
2711 		}
2712 
2713 		rsp_info   = (struct fcp_rsp_info *)
2714 		    ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2715 
2716 		/*
2717 		 * Get SCSI status
2718 		 */
2719 		fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2720 		/*
2721 		 * If a lun was just added or removed and the next command
2722 		 * comes through this interface, we need to capture the check
2723 		 * condition so we can discover the new topology.
2724 		 */
2725 		if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2726 		    rsp->fcp_u.fcp_status.sense_len_set) {
2727 			sense_len = rsp->fcp_sense_len;
2728 			rsp_sense  = (caddr_t)((uint8_t *)rsp_info + info_len);
2729 			sense_to = (struct scsi_extended_sense *)rsp_sense;
2730 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2731 			    (FCP_SENSE_NO_LUN(sense_to))) {
2732 				reconfig_lun = TRUE;
2733 			}
2734 		}
2735 
2736 		if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2737 		    (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2738 			if (reconfig_lun == FALSE) {
2739 				reconfig_status =
2740 				    fcp_is_reconfig_needed(ptgt, fpkt);
2741 			}
2742 
2743 			if ((reconfig_lun == TRUE) ||
2744 			    (reconfig_status == TRUE)) {
2745 				mutex_enter(&ptgt->tgt_mutex);
2746 				if (ptgt->tgt_tid == NULL) {
2747 					/*
2748 					 * Either we've been notified the
2749 					 * REPORT_LUN data has changed, or
2750 					 * we've determined on our own that
2751 					 * we're out of date.  Kick off
2752 					 * rediscovery.
2753 					 */
2754 					tid = timeout(fcp_reconfigure_luns,
2755 					    (caddr_t)ptgt, drv_usectohz(1));
2756 
2757 					ptgt->tgt_tid = tid;
2758 					ptgt->tgt_state |= FCP_TGT_BUSY;
2759 					ret = EBUSY;
2760 					reconfig_pending = TRUE;
2761 				}
2762 				mutex_exit(&ptgt->tgt_mutex);
2763 			}
2764 		}
2765 
2766 		/*
2767 		 * Calc residuals and buffer lengths
2768 		 */
2769 
2770 		if (ret == 0) {
2771 			buf_len = fscsi->scsi_buflen;
2772 			fscsi->scsi_bufresid	= 0;
2773 			if (rsp->fcp_u.fcp_status.resid_under) {
2774 				if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2775 					fscsi->scsi_bufresid = rsp->fcp_resid;
2776 				} else {
2777 					cmn_err(CE_WARN, "fcp: bad residue %x "
2778 					    "for txfer len %x", rsp->fcp_resid,
2779 					    fscsi->scsi_buflen);
2780 					fscsi->scsi_bufresid =
2781 					    fscsi->scsi_buflen;
2782 				}
2783 				buf_len -= fscsi->scsi_bufresid;
2784 			}
2785 			if (rsp->fcp_u.fcp_status.resid_over) {
2786 				fscsi->scsi_bufresid = -rsp->fcp_resid;
2787 			}
2788 
2789 			fscsi->scsi_rqresid	= fscsi->scsi_rqlen - sense_len;
2790 			if (fscsi->scsi_rqlen < sense_len) {
2791 				sense_len = fscsi->scsi_rqlen;
2792 			}
2793 
2794 			fscsi->scsi_fc_rspcode	= 0;
2795 			if (rsp->fcp_u.fcp_status.rsp_len_set) {
2796 				fscsi->scsi_fc_rspcode	= rsp_info->rsp_code;
2797 			}
2798 			fscsi->scsi_pkt_state	= fpkt->pkt_state;
2799 			fscsi->scsi_pkt_action	= fpkt->pkt_action;
2800 			fscsi->scsi_pkt_reason	= fpkt->pkt_reason;
2801 
2802 			/*
2803 			 * Copy data and request sense
2804 			 *
2805 			 * Data must be copied by using the FCP_CP_IN macro.
2806 			 * This will ensure the proper byte order since the data
2807 			 * is being copied directly from the memory mapped
2808 			 * device register.
2809 			 *
2810 			 * The response (and request sense) will be in the
2811 			 * correct byte order.	No special copy is necessary.
2812 			 */
2813 
2814 			if (buf_len) {
2815 				FCP_CP_IN(fpkt->pkt_data,
2816 				    fscsi->scsi_bufaddr,
2817 				    fpkt->pkt_data_acc,
2818 				    buf_len);
2819 			}
2820 			bcopy((void *)rsp_sense,
2821 			    (void *)fscsi->scsi_rqbufaddr,
2822 			    sense_len);
2823 		}
2824 	}
2825 
2826 	/*
2827 	 * Cleanup transport data structures if icmd was alloc-ed
2828 	 * So, cleanup happens in the same thread that icmd was alloc-ed
2829 	 */
2830 	if (icmd != NULL) {
2831 		fcp_ipkt_sema_cleanup(icmd);
2832 	}
2833 
2834 	/* restore pm busy/idle status */
2835 	if (port_busy) {
2836 		fc_ulp_idle_port(pptr->port_fp_handle);
2837 	}
2838 
2839 	/*
2840 	 * Cleanup target.  if a reconfig is pending, don't clear the BUSY
2841 	 * flag, it'll be cleared when the reconfig is complete.
2842 	 */
2843 	if ((ptgt != NULL) && !reconfig_pending) {
2844 		/*
2845 		 * If target was created,
2846 		 */
2847 		if (target_created) {
2848 			mutex_enter(&ptgt->tgt_mutex);
2849 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2850 			mutex_exit(&ptgt->tgt_mutex);
2851 		} else {
2852 			/*
2853 			 * De-mark target as busy
2854 			 */
2855 			mutex_enter(&ptgt->tgt_mutex);
2856 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
2857 			mutex_exit(&ptgt->tgt_mutex);
2858 		}
2859 	}
2860 	return (ret);
2861 }
2862 
2863 
2864 static int
fcp_is_reconfig_needed(struct fcp_tgt * ptgt,fc_packet_t * fpkt)2865 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2866     fc_packet_t	*fpkt)
2867 {
2868 	uchar_t			*lun_string;
2869 	uint16_t		lun_num, i;
2870 	int			num_luns;
2871 	int			actual_luns;
2872 	int			num_masked_luns;
2873 	int			lun_buflen;
2874 	struct fcp_lun	*plun	= NULL;
2875 	struct fcp_reportlun_resp	*report_lun;
2876 	uint8_t			reconfig_needed = FALSE;
2877 	uint8_t			lun_exists = FALSE;
2878 	fcp_port_t			*pptr		 = ptgt->tgt_port;
2879 
2880 	report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2881 
2882 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2883 	    fpkt->pkt_datalen);
2884 
2885 	/* get number of luns (which is supplied as LUNS * 8) */
2886 	num_luns = BE_32(report_lun->num_lun) >> 3;
2887 
2888 	/*
2889 	 * Figure out exactly how many lun strings our response buffer
2890 	 * can hold.
2891 	 */
2892 	lun_buflen = (fpkt->pkt_datalen -
2893 	    2 * sizeof (uint32_t)) / sizeof (longlong_t);
2894 
2895 	/*
2896 	 * Is our response buffer full or not? We don't want to
2897 	 * potentially walk beyond the number of luns we have.
2898 	 */
2899 	if (num_luns <= lun_buflen) {
2900 		actual_luns = num_luns;
2901 	} else {
2902 		actual_luns = lun_buflen;
2903 	}
2904 
2905 	mutex_enter(&ptgt->tgt_mutex);
2906 
2907 	/* Scan each lun to see if we have masked it. */
2908 	num_masked_luns = 0;
2909 	if (fcp_lun_blacklist != NULL) {
2910 		for (i = 0; i < actual_luns; i++) {
2911 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2912 			switch (lun_string[0] & 0xC0) {
2913 			case FCP_LUN_ADDRESSING:
2914 			case FCP_PD_ADDRESSING:
2915 			case FCP_VOLUME_ADDRESSING:
2916 				lun_num = ((lun_string[0] & 0x3F) << 8)
2917 				    | lun_string[1];
2918 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
2919 				    lun_num) == TRUE) {
2920 					num_masked_luns++;
2921 				}
2922 				break;
2923 			default:
2924 				break;
2925 			}
2926 		}
2927 	}
2928 
2929 	/*
2930 	 * The quick and easy check.  If the number of LUNs reported
2931 	 * doesn't match the number we currently know about, we need
2932 	 * to reconfigure.
2933 	 */
2934 	if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2935 		mutex_exit(&ptgt->tgt_mutex);
2936 		kmem_free(report_lun, fpkt->pkt_datalen);
2937 		return (TRUE);
2938 	}
2939 
2940 	/*
2941 	 * If the quick and easy check doesn't turn up anything, we walk
2942 	 * the list of luns from the REPORT_LUN response and look for
2943 	 * any luns we don't know about.  If we find one, we know we need
2944 	 * to reconfigure. We will skip LUNs that are masked because of the
2945 	 * blacklist.
2946 	 */
2947 	for (i = 0; i < actual_luns; i++) {
2948 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2949 		lun_exists = FALSE;
2950 		switch (lun_string[0] & 0xC0) {
2951 		case FCP_LUN_ADDRESSING:
2952 		case FCP_PD_ADDRESSING:
2953 		case FCP_VOLUME_ADDRESSING:
2954 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2955 
2956 			if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2957 			    &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2958 				lun_exists = TRUE;
2959 				break;
2960 			}
2961 
2962 			for (plun = ptgt->tgt_lun; plun;
2963 			    plun = plun->lun_next) {
2964 				if (plun->lun_num == lun_num) {
2965 					lun_exists = TRUE;
2966 					break;
2967 				}
2968 			}
2969 			break;
2970 		default:
2971 			break;
2972 		}
2973 
2974 		if (lun_exists == FALSE) {
2975 			reconfig_needed = TRUE;
2976 			break;
2977 		}
2978 	}
2979 
2980 	mutex_exit(&ptgt->tgt_mutex);
2981 	kmem_free(report_lun, fpkt->pkt_datalen);
2982 
2983 	return (reconfig_needed);
2984 }
2985 
2986 /*
2987  * This function is called by fcp_handle_page83 and uses inquiry response data
2988  * stored in plun->lun_inq to determine whether or not a device is a member of
2989  * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2990  * otherwise 1.
2991  */
2992 static int
fcp_symmetric_device_probe(struct fcp_lun * plun)2993 fcp_symmetric_device_probe(struct fcp_lun *plun)
2994 {
2995 	struct scsi_inquiry	*stdinq = &plun->lun_inq;
2996 	char			*devidptr;
2997 	int			i, len;
2998 
2999 	for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
3000 		devidptr = fcp_symmetric_disk_table[i];
3001 		len = (int)strlen(devidptr);
3002 
3003 		if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3004 			return (0);
3005 		}
3006 	}
3007 	return (1);
3008 }
3009 
3010 
3011 /*
3012  * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3013  * It basically returns the current count of # of state change callbacks
3014  * i.e the value of tgt_change_cnt.
3015  *
3016  * INPUT:
3017  *   fcp_ioctl.fp_minor -> The minor # of the fp port
3018  *   fcp_ioctl.listlen	-> 1
3019  *   fcp_ioctl.list	-> Pointer to a 32 bit integer
3020  */
3021 /*ARGSUSED2*/
3022 static int
fcp_get_statec_count(struct fcp_ioctl * data,int mode,int * rval)3023 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3024 {
3025 	int			ret;
3026 	uint32_t		link_cnt;
3027 	struct fcp_ioctl	fioctl;
3028 	struct fcp_port	*pptr = NULL;
3029 
3030 	if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3031 	    &pptr)) != 0) {
3032 		return (ret);
3033 	}
3034 
3035 	ASSERT(pptr != NULL);
3036 
3037 	if (fioctl.listlen != 1) {
3038 		return (EINVAL);
3039 	}
3040 
3041 	mutex_enter(&pptr->port_mutex);
3042 	if (pptr->port_state & FCP_STATE_OFFLINE) {
3043 		mutex_exit(&pptr->port_mutex);
3044 		return (ENXIO);
3045 	}
3046 
3047 	/*
3048 	 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3049 	 * When the fcp initially attaches to the port and there are nothing
3050 	 * hanging out of the port or if there was a repeat offline state change
3051 	 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3052 	 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3053 	 * will differentiate the 2 cases.
3054 	 */
3055 	if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3056 		mutex_exit(&pptr->port_mutex);
3057 		return (ENXIO);
3058 	}
3059 
3060 	link_cnt = pptr->port_link_cnt;
3061 	mutex_exit(&pptr->port_mutex);
3062 
3063 	if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3064 		return (EFAULT);
3065 	}
3066 
3067 #ifdef	_MULTI_DATAMODEL
3068 	switch (ddi_model_convert_from(mode & FMODELS)) {
3069 	case DDI_MODEL_ILP32: {
3070 		struct fcp32_ioctl f32_ioctl;
3071 
3072 		f32_ioctl.fp_minor = fioctl.fp_minor;
3073 		f32_ioctl.listlen = fioctl.listlen;
3074 		f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3075 		if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3076 		    sizeof (struct fcp32_ioctl), mode)) {
3077 			return (EFAULT);
3078 		}
3079 		break;
3080 	}
3081 	case DDI_MODEL_NONE:
3082 		if (ddi_copyout((void *)&fioctl, (void *)data,
3083 		    sizeof (struct fcp_ioctl), mode)) {
3084 			return (EFAULT);
3085 		}
3086 		break;
3087 	}
3088 #else	/* _MULTI_DATAMODEL */
3089 
3090 	if (ddi_copyout((void *)&fioctl, (void *)data,
3091 	    sizeof (struct fcp_ioctl), mode)) {
3092 		return (EFAULT);
3093 	}
3094 #endif	/* _MULTI_DATAMODEL */
3095 
3096 	return (0);
3097 }
3098 
3099 /*
3100  * This function copies the fcp_ioctl structure passed in from user land
3101  * into kernel land. Handles 32 bit applications.
3102  */
3103 /*ARGSUSED*/
3104 static int
fcp_copyin_fcp_ioctl_data(struct fcp_ioctl * data,int mode,int * rval,struct fcp_ioctl * fioctl,struct fcp_port ** pptr)3105 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3106     struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3107 {
3108 	struct fcp_port	*t_pptr;
3109 
3110 #ifdef	_MULTI_DATAMODEL
3111 	switch (ddi_model_convert_from(mode & FMODELS)) {
3112 	case DDI_MODEL_ILP32: {
3113 		struct fcp32_ioctl f32_ioctl;
3114 
3115 		if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3116 		    sizeof (struct fcp32_ioctl), mode)) {
3117 			return (EFAULT);
3118 		}
3119 		fioctl->fp_minor = f32_ioctl.fp_minor;
3120 		fioctl->listlen = f32_ioctl.listlen;
3121 		fioctl->list = (caddr_t)(long)f32_ioctl.list;
3122 		break;
3123 	}
3124 	case DDI_MODEL_NONE:
3125 		if (ddi_copyin((void *)data, (void *)fioctl,
3126 		    sizeof (struct fcp_ioctl), mode)) {
3127 			return (EFAULT);
3128 		}
3129 		break;
3130 	}
3131 
3132 #else	/* _MULTI_DATAMODEL */
3133 	if (ddi_copyin((void *)data, (void *)fioctl,
3134 	    sizeof (struct fcp_ioctl), mode)) {
3135 		return (EFAULT);
3136 	}
3137 #endif	/* _MULTI_DATAMODEL */
3138 
3139 	/*
3140 	 * Right now we can assume that the minor number matches with
3141 	 * this instance of fp. If this changes we will need to
3142 	 * revisit this logic.
3143 	 */
3144 	mutex_enter(&fcp_global_mutex);
3145 	t_pptr = fcp_port_head;
3146 	while (t_pptr) {
3147 		if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3148 			break;
3149 		} else {
3150 			t_pptr = t_pptr->port_next;
3151 		}
3152 	}
3153 	*pptr = t_pptr;
3154 	mutex_exit(&fcp_global_mutex);
3155 	if (t_pptr == NULL) {
3156 		return (ENXIO);
3157 	}
3158 
3159 	return (0);
3160 }
3161 
3162 /*
3163  *     Function: fcp_port_create_tgt
3164  *
3165  *  Description: As the name suggest this function creates the target context
3166  *		 specified by the the WWN provided by the caller.  If the
3167  *		 creation goes well and the target is known by fp/fctl a PLOGI
3168  *		 followed by a PRLI are issued.
3169  *
3170  *     Argument: pptr		fcp port structure
3171  *		 pwwn		WWN of the target
3172  *		 ret_val	Address of the return code.  It could be:
3173  *				EIO, ENOMEM or 0.
3174  *		 fc_status	PLOGI or PRLI status completion
3175  *		 fc_pkt_state	PLOGI or PRLI state completion
3176  *		 fc_pkt_reason	PLOGI or PRLI reason completion
3177  *		 fc_pkt_action	PLOGI or PRLI action completion
3178  *
3179  * Return Value: NULL if it failed
3180  *		 Target structure address if it succeeds
3181  */
3182 static struct fcp_tgt *
fcp_port_create_tgt(struct fcp_port * pptr,la_wwn_t * pwwn,int * ret_val,int * fc_status,int * fc_pkt_state,int * fc_pkt_reason,int * fc_pkt_action)3183 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3184     int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3185 {
3186 	struct fcp_tgt	*ptgt = NULL;
3187 	fc_portmap_t		devlist;
3188 	int			lcount;
3189 	int			error;
3190 
3191 	*ret_val = 0;
3192 
3193 	/*
3194 	 * Check FC port device & get port map
3195 	 */
3196 	if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3197 	    &error, 1) == NULL) {
3198 		*ret_val = EIO;
3199 	} else {
3200 		if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3201 		    &devlist) != FC_SUCCESS) {
3202 			*ret_val = EIO;
3203 		}
3204 	}
3205 
3206 	/* Set port map flags */
3207 	devlist.map_type = PORT_DEVICE_USER_CREATE;
3208 
3209 	/* Allocate target */
3210 	if (*ret_val == 0) {
3211 		lcount = pptr->port_link_cnt;
3212 		ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3213 		if (ptgt == NULL) {
3214 			fcp_log(CE_WARN, pptr->port_dip,
3215 			    "!FC target allocation failed");
3216 			*ret_val = ENOMEM;
3217 		} else {
3218 			/* Setup target */
3219 			mutex_enter(&ptgt->tgt_mutex);
3220 
3221 			ptgt->tgt_statec_cause	= FCP_CAUSE_TGT_CHANGE;
3222 			ptgt->tgt_tmp_cnt	= 1;
3223 			ptgt->tgt_d_id		= devlist.map_did.port_id;
3224 			ptgt->tgt_hard_addr	=
3225 			    devlist.map_hard_addr.hard_addr;
3226 			ptgt->tgt_pd_handle	= devlist.map_pd;
3227 			ptgt->tgt_fca_dev	= NULL;
3228 
3229 			bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3230 			    FC_WWN_SIZE);
3231 			bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3232 			    FC_WWN_SIZE);
3233 
3234 			mutex_exit(&ptgt->tgt_mutex);
3235 		}
3236 	}
3237 
3238 	/* Release global mutex for PLOGI and PRLI */
3239 	mutex_exit(&fcp_global_mutex);
3240 
3241 	/* Send PLOGI (If necessary) */
3242 	if (*ret_val == 0) {
3243 		*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3244 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3245 	}
3246 
3247 	/* Send PRLI (If necessary) */
3248 	if (*ret_val == 0) {
3249 		*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3250 		    fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3251 	}
3252 
3253 	mutex_enter(&fcp_global_mutex);
3254 
3255 	return (ptgt);
3256 }
3257 
3258 /*
3259  *     Function: fcp_tgt_send_plogi
3260  *
3261  *  Description: This function sends a PLOGI to the target specified by the
3262  *		 caller and waits till it completes.
3263  *
3264  *     Argument: ptgt		Target to send the plogi to.
3265  *		 fc_status	Status returned by fp/fctl in the PLOGI request.
3266  *		 fc_pkt_state	State returned by fp/fctl in the PLOGI request.
3267  *		 fc_pkt_reason	Reason returned by fp/fctl in the PLOGI request.
3268  *		 fc_pkt_action	Action returned by fp/fctl in the PLOGI request.
3269  *
3270  * Return Value: 0
3271  *		 ENOMEM
3272  *		 EIO
3273  *
3274  *	Context: User context.
3275  */
3276 static int
fcp_tgt_send_plogi(struct fcp_tgt * ptgt,int * fc_status,int * fc_pkt_state,int * fc_pkt_reason,int * fc_pkt_action)3277 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3278     int *fc_pkt_reason, int *fc_pkt_action)
3279 {
3280 	struct fcp_port	*pptr;
3281 	struct fcp_ipkt	*icmd;
3282 	struct fc_packet	*fpkt;
3283 	fc_frame_hdr_t		*hp;
3284 	struct la_els_logi	logi;
3285 	int			tcount;
3286 	int			lcount;
3287 	int			ret, login_retval = ~FC_SUCCESS;
3288 
3289 	ret = 0;
3290 
3291 	pptr = ptgt->tgt_port;
3292 
3293 	lcount = pptr->port_link_cnt;
3294 	tcount = ptgt->tgt_change_cnt;
3295 
3296 	/* Alloc internal packet */
3297 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3298 	    sizeof (la_els_logi_t), 0,
3299 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3300 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3301 
3302 	if (icmd == NULL) {
3303 		ret = ENOMEM;
3304 	} else {
3305 		/*
3306 		 * Setup internal packet as sema sync
3307 		 */
3308 		fcp_ipkt_sema_init(icmd);
3309 
3310 		/*
3311 		 * Setup internal packet (icmd)
3312 		 */
3313 		icmd->ipkt_lun		= NULL;
3314 		icmd->ipkt_restart	= 0;
3315 		icmd->ipkt_retries	= 0;
3316 		icmd->ipkt_opcode	= LA_ELS_PLOGI;
3317 
3318 		/*
3319 		 * Setup fc_packet
3320 		 */
3321 		fpkt = icmd->ipkt_fpkt;
3322 
3323 		fpkt->pkt_tran_flags	= FC_TRAN_CLASS3 | FC_TRAN_INTR;
3324 		fpkt->pkt_tran_type	= FC_PKT_EXCHANGE;
3325 		fpkt->pkt_timeout	= FCP_ELS_TIMEOUT;
3326 
3327 		/*
3328 		 * Setup FC frame header
3329 		 */
3330 		hp = &fpkt->pkt_cmd_fhdr;
3331 
3332 		hp->s_id	= pptr->port_id;	/* source ID */
3333 		hp->d_id	= ptgt->tgt_d_id;	/* dest ID */
3334 		hp->r_ctl	= R_CTL_ELS_REQ;
3335 		hp->type	= FC_TYPE_EXTENDED_LS;
3336 		hp->f_ctl	= F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3337 		hp->seq_id	= 0;
3338 		hp->rsvd	= 0;
3339 		hp->df_ctl	= 0;
3340 		hp->seq_cnt	= 0;
3341 		hp->ox_id	= 0xffff;		/* i.e. none */
3342 		hp->rx_id	= 0xffff;		/* i.e. none */
3343 		hp->ro		= 0;
3344 
3345 		/*
3346 		 * Setup PLOGI
3347 		 */
3348 		bzero(&logi, sizeof (struct la_els_logi));
3349 		logi.ls_code.ls_code = LA_ELS_PLOGI;
3350 
3351 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3352 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3353 
3354 		/*
3355 		 * Send PLOGI
3356 		 */
3357 		*fc_status = login_retval =
3358 		    fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3359 		if (*fc_status != FC_SUCCESS) {
3360 			ret = EIO;
3361 		}
3362 	}
3363 
3364 	/*
3365 	 * Wait for completion
3366 	 */
3367 	if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3368 		ret = fcp_ipkt_sema_wait(icmd);
3369 
3370 		*fc_pkt_state	= fpkt->pkt_state;
3371 		*fc_pkt_reason	= fpkt->pkt_reason;
3372 		*fc_pkt_action	= fpkt->pkt_action;
3373 	}
3374 
3375 	/*
3376 	 * Cleanup transport data structures if icmd was alloc-ed AND if there
3377 	 * is going to be no callback (i.e if fc_ulp_login() failed).
3378 	 * Otherwise, cleanup happens in callback routine.
3379 	 */
3380 	if (icmd != NULL) {
3381 		fcp_ipkt_sema_cleanup(icmd);
3382 	}
3383 
3384 	return (ret);
3385 }
3386 
3387 /*
3388  *     Function: fcp_tgt_send_prli
3389  *
3390  *  Description: Does nothing as of today.
3391  *
3392  *     Argument: ptgt		Target to send the prli to.
3393  *		 fc_status	Status returned by fp/fctl in the PRLI request.
3394  *		 fc_pkt_state	State returned by fp/fctl in the PRLI request.
3395  *		 fc_pkt_reason	Reason returned by fp/fctl in the PRLI request.
3396  *		 fc_pkt_action	Action returned by fp/fctl in the PRLI request.
3397  *
3398  * Return Value: 0
3399  */
3400 /*ARGSUSED*/
3401 static int
fcp_tgt_send_prli(struct fcp_tgt * ptgt,int * fc_status,int * fc_pkt_state,int * fc_pkt_reason,int * fc_pkt_action)3402 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3403     int *fc_pkt_reason, int *fc_pkt_action)
3404 {
3405 	return (0);
3406 }
3407 
3408 /*
3409  *     Function: fcp_ipkt_sema_init
3410  *
3411  *  Description: Initializes the semaphore contained in the internal packet.
3412  *
3413  *     Argument: icmd	Internal packet the semaphore of which must be
3414  *			initialized.
3415  *
3416  * Return Value: None
3417  *
3418  *	Context: User context only.
3419  */
3420 static void
fcp_ipkt_sema_init(struct fcp_ipkt * icmd)3421 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3422 {
3423 	struct fc_packet	*fpkt;
3424 
3425 	fpkt = icmd->ipkt_fpkt;
3426 
3427 	/* Create semaphore for sync */
3428 	sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3429 
3430 	/* Setup the completion callback */
3431 	fpkt->pkt_comp = fcp_ipkt_sema_callback;
3432 }
3433 
3434 /*
3435  *     Function: fcp_ipkt_sema_wait
3436  *
3437  *  Description: Wait on the semaphore embedded in the internal packet.	 The
3438  *		 semaphore is released in the callback.
3439  *
3440  *     Argument: icmd	Internal packet to wait on for completion.
3441  *
3442  * Return Value: 0
3443  *		 EIO
3444  *		 EBUSY
3445  *		 EAGAIN
3446  *
3447  *	Context: User context only.
3448  *
3449  * This function does a conversion between the field pkt_state of the fc_packet
3450  * embedded in the internal packet (icmd) and the code it returns.
3451  */
3452 static int
fcp_ipkt_sema_wait(struct fcp_ipkt * icmd)3453 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3454 {
3455 	struct fc_packet	*fpkt;
3456 	int	ret;
3457 
3458 	ret = EIO;
3459 	fpkt = icmd->ipkt_fpkt;
3460 
3461 	/*
3462 	 * Wait on semaphore
3463 	 */
3464 	sema_p(&(icmd->ipkt_sema));
3465 
3466 	/*
3467 	 * Check the status of the FC packet
3468 	 */
3469 	switch (fpkt->pkt_state) {
3470 	case FC_PKT_SUCCESS:
3471 		ret = 0;
3472 		break;
3473 	case FC_PKT_LOCAL_RJT:
3474 		switch (fpkt->pkt_reason) {
3475 		case FC_REASON_SEQ_TIMEOUT:
3476 		case FC_REASON_RX_BUF_TIMEOUT:
3477 			ret = EAGAIN;
3478 			break;
3479 		case FC_REASON_PKT_BUSY:
3480 			ret = EBUSY;
3481 			break;
3482 		}
3483 		break;
3484 	case FC_PKT_TIMEOUT:
3485 		ret = EAGAIN;
3486 		break;
3487 	case FC_PKT_LOCAL_BSY:
3488 	case FC_PKT_TRAN_BSY:
3489 	case FC_PKT_NPORT_BSY:
3490 	case FC_PKT_FABRIC_BSY:
3491 		ret = EBUSY;
3492 		break;
3493 	case FC_PKT_LS_RJT:
3494 	case FC_PKT_BA_RJT:
3495 		switch (fpkt->pkt_reason) {
3496 		case FC_REASON_LOGICAL_BSY:
3497 			ret = EBUSY;
3498 			break;
3499 		}
3500 		break;
3501 	case FC_PKT_FS_RJT:
3502 		switch (fpkt->pkt_reason) {
3503 		case FC_REASON_FS_LOGICAL_BUSY:
3504 			ret = EBUSY;
3505 			break;
3506 		}
3507 		break;
3508 	}
3509 
3510 	return (ret);
3511 }
3512 
3513 /*
3514  *     Function: fcp_ipkt_sema_callback
3515  *
3516  *  Description: Registered as the completion callback function for the FC
3517  *		 transport when the ipkt semaphore is used for sync. This will
3518  *		 cleanup the used data structures, if necessary and wake up
3519  *		 the user thread to complete the transaction.
3520  *
3521  *     Argument: fpkt	FC packet (points to the icmd)
3522  *
3523  * Return Value: None
3524  *
3525  *	Context: User context only
3526  */
3527 static void
fcp_ipkt_sema_callback(struct fc_packet * fpkt)3528 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3529 {
3530 	struct fcp_ipkt	*icmd;
3531 
3532 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3533 
3534 	/*
3535 	 * Wake up user thread
3536 	 */
3537 	sema_v(&(icmd->ipkt_sema));
3538 }
3539 
3540 /*
3541  *     Function: fcp_ipkt_sema_cleanup
3542  *
3543  *  Description: Called to cleanup (if necessary) the data structures used
3544  *		 when ipkt sema is used for sync.  This function will detect
3545  *		 whether the caller is the last thread (via counter) and
3546  *		 cleanup only if necessary.
3547  *
3548  *     Argument: icmd	Internal command packet
3549  *
3550  * Return Value: None
3551  *
3552  *	Context: User context only
3553  */
3554 static void
fcp_ipkt_sema_cleanup(struct fcp_ipkt * icmd)3555 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3556 {
3557 	struct fcp_tgt	*ptgt;
3558 	struct fcp_port	*pptr;
3559 
3560 	ptgt = icmd->ipkt_tgt;
3561 	pptr = icmd->ipkt_port;
3562 
3563 	/*
3564 	 * Acquire data structure
3565 	 */
3566 	mutex_enter(&ptgt->tgt_mutex);
3567 
3568 	/*
3569 	 * Destroy semaphore
3570 	 */
3571 	sema_destroy(&(icmd->ipkt_sema));
3572 
3573 	/*
3574 	 * Cleanup internal packet
3575 	 */
3576 	mutex_exit(&ptgt->tgt_mutex);
3577 	fcp_icmd_free(pptr, icmd);
3578 }
3579 
3580 /*
3581  *     Function: fcp_port_attach
3582  *
3583  *  Description: Called by the transport framework to resume, suspend or
3584  *		 attach a new port.
3585  *
3586  *     Argument: ulph		Port handle
3587  *		 *pinfo		Port information
3588  *		 cmd		Command
3589  *		 s_id		Port ID
3590  *
3591  * Return Value: FC_FAILURE or FC_SUCCESS
3592  */
3593 /*ARGSUSED*/
3594 static int
fcp_port_attach(opaque_t ulph,fc_ulp_port_info_t * pinfo,fc_attach_cmd_t cmd,uint32_t s_id)3595 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3596     fc_attach_cmd_t cmd, uint32_t s_id)
3597 {
3598 	int	instance;
3599 	int	res = FC_FAILURE; /* default result */
3600 
3601 	ASSERT(pinfo != NULL);
3602 
3603 	instance = ddi_get_instance(pinfo->port_dip);
3604 
3605 	switch (cmd) {
3606 	case FC_CMD_ATTACH:
3607 		/*
3608 		 * this port instance attaching for the first time (or after
3609 		 * being detached before)
3610 		 */
3611 		if (fcp_handle_port_attach(ulph, pinfo, s_id,
3612 		    instance) == DDI_SUCCESS) {
3613 			res = FC_SUCCESS;
3614 		} else {
3615 			ASSERT(ddi_get_soft_state(fcp_softstate,
3616 			    instance) == NULL);
3617 		}
3618 		break;
3619 
3620 	case FC_CMD_RESUME:
3621 	case FC_CMD_POWER_UP:
3622 		/*
3623 		 * this port instance was attached and the suspended and
3624 		 * will now be resumed
3625 		 */
3626 		if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3627 		    instance) == DDI_SUCCESS) {
3628 			res = FC_SUCCESS;
3629 		}
3630 		break;
3631 
3632 	default:
3633 		/* shouldn't happen */
3634 		FCP_TRACE(fcp_logq, "fcp",
3635 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
3636 		    "port_attach: unknown cmdcommand: %d", cmd);
3637 		break;
3638 	}
3639 
3640 	/* return result */
3641 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3642 	    FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3643 
3644 	return (res);
3645 }
3646 
3647 
3648 /*
3649  * detach or suspend this port instance
3650  *
3651  * acquires and releases the global mutex
3652  *
3653  * acquires and releases the mutex for this port
3654  *
3655  * acquires and releases the hotplug mutex for this port
3656  */
3657 /*ARGSUSED*/
3658 static int
fcp_port_detach(opaque_t ulph,fc_ulp_port_info_t * info,fc_detach_cmd_t cmd)3659 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3660     fc_detach_cmd_t cmd)
3661 {
3662 	int			flag;
3663 	int			instance;
3664 	struct fcp_port		*pptr;
3665 
3666 	instance = ddi_get_instance(info->port_dip);
3667 	pptr = ddi_get_soft_state(fcp_softstate, instance);
3668 
3669 	switch (cmd) {
3670 	case FC_CMD_SUSPEND:
3671 		FCP_DTRACE(fcp_logq, "fcp",
3672 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3673 		    "port suspend called for port %d", instance);
3674 		flag = FCP_STATE_SUSPENDED;
3675 		break;
3676 
3677 	case FC_CMD_POWER_DOWN:
3678 		FCP_DTRACE(fcp_logq, "fcp",
3679 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3680 		    "port power down called for port %d", instance);
3681 		flag = FCP_STATE_POWER_DOWN;
3682 		break;
3683 
3684 	case FC_CMD_DETACH:
3685 		FCP_DTRACE(fcp_logq, "fcp",
3686 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
3687 		    "port detach called for port %d", instance);
3688 		flag = FCP_STATE_DETACHING;
3689 		break;
3690 
3691 	default:
3692 		/* shouldn't happen */
3693 		return (FC_FAILURE);
3694 	}
3695 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3696 	    FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3697 
3698 	return (fcp_handle_port_detach(pptr, flag, instance));
3699 }
3700 
3701 
3702 /*
3703  * called for ioctls on the transport's devctl interface, and the transport
3704  * has passed it to us
3705  *
3706  * this will only be called for device control ioctls (i.e. hotplugging stuff)
3707  *
3708  * return FC_SUCCESS if we decide to claim the ioctl,
3709  * else return FC_UNCLAIMED
3710  *
3711  * *rval is set iff we decide to claim the ioctl
3712  */
3713 /*ARGSUSED*/
3714 static int
fcp_port_ioctl(opaque_t ulph,opaque_t port_handle,dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval,uint32_t claimed)3715 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3716     intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3717 {
3718 	int			retval = FC_UNCLAIMED;	/* return value */
3719 	struct fcp_port		*pptr = NULL;		/* our soft state */
3720 	struct devctl_iocdata	*dcp = NULL;		/* for devctl */
3721 	dev_info_t		*cdip;
3722 	mdi_pathinfo_t		*pip = NULL;
3723 	char			*ndi_nm;		/* NDI name */
3724 	char			*ndi_addr;		/* NDI addr */
3725 	int			is_mpxio;
3726 	boolean_t		enteredv;
3727 	int			devi_entered = 0;
3728 	clock_t			end_time;
3729 
3730 	ASSERT(rval != NULL);
3731 
3732 	FCP_DTRACE(fcp_logq, "fcp",
3733 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3734 	    "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3735 
3736 	/* if already claimed then forget it */
3737 	if (claimed) {
3738 		/*
3739 		 * for now, if this ioctl has already been claimed, then
3740 		 * we just ignore it
3741 		 */
3742 		return (retval);
3743 	}
3744 
3745 	/* get our port info */
3746 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
3747 		fcp_log(CE_WARN, NULL,
3748 		    "!fcp:Invalid port handle handle in ioctl");
3749 		*rval = ENXIO;
3750 		return (retval);
3751 	}
3752 	is_mpxio = pptr->port_mpxio;
3753 
3754 	switch (cmd) {
3755 	case DEVCTL_BUS_GETSTATE:
3756 	case DEVCTL_BUS_QUIESCE:
3757 	case DEVCTL_BUS_UNQUIESCE:
3758 	case DEVCTL_BUS_RESET:
3759 	case DEVCTL_BUS_RESETALL:
3760 
3761 	case DEVCTL_BUS_DEV_CREATE:
3762 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3763 			return (retval);
3764 		}
3765 		break;
3766 
3767 	case DEVCTL_DEVICE_GETSTATE:
3768 	case DEVCTL_DEVICE_OFFLINE:
3769 	case DEVCTL_DEVICE_ONLINE:
3770 	case DEVCTL_DEVICE_REMOVE:
3771 	case DEVCTL_DEVICE_RESET:
3772 		if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3773 			return (retval);
3774 		}
3775 
3776 		ASSERT(dcp != NULL);
3777 
3778 		/* ensure we have a name and address */
3779 		if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3780 		    ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3781 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
3782 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
3783 			    "ioctl: can't get name (%s) or addr (%s)",
3784 			    ndi_nm ? ndi_nm : "<null ptr>",
3785 			    ndi_addr ? ndi_addr : "<null ptr>");
3786 			ndi_dc_freehdl(dcp);
3787 			return (retval);
3788 		}
3789 
3790 
3791 		/* get our child's DIP */
3792 		ASSERT(pptr != NULL);
3793 		if (is_mpxio) {
3794 			mdi_devi_enter(pptr->port_dip, &enteredv);
3795 		} else {
3796 			ndi_devi_enter(pptr->port_dip);
3797 		}
3798 		devi_entered = 1;
3799 
3800 		if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3801 		    ndi_addr)) == NULL) {
3802 			/* Look for virtually enumerated devices. */
3803 			pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3804 			if (pip == NULL ||
3805 			    ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3806 				*rval = ENXIO;
3807 				goto out;
3808 			}
3809 		}
3810 		break;
3811 
3812 	default:
3813 		*rval = ENOTTY;
3814 		return (retval);
3815 	}
3816 
3817 	/* this ioctl is ours -- process it */
3818 
3819 	retval = FC_SUCCESS;		/* just means we claim the ioctl */
3820 
3821 	/* we assume it will be a success; else we'll set error value */
3822 	*rval = 0;
3823 
3824 
3825 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3826 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
3827 	    "ioctl: claiming this one");
3828 
3829 	/* handle ioctls now */
3830 	switch (cmd) {
3831 	case DEVCTL_DEVICE_GETSTATE:
3832 		ASSERT(cdip != NULL);
3833 		ASSERT(dcp != NULL);
3834 		if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3835 			*rval = EFAULT;
3836 		}
3837 		break;
3838 
3839 	case DEVCTL_DEVICE_REMOVE:
3840 	case DEVCTL_DEVICE_OFFLINE: {
3841 		int			flag = 0;
3842 		int			lcount;
3843 		int			tcount;
3844 		struct fcp_pkt	*head = NULL;
3845 		struct fcp_lun	*plun;
3846 		child_info_t		*cip = CIP(cdip);
3847 		int			all = 1;
3848 		struct fcp_lun	*tplun;
3849 		struct fcp_tgt	*ptgt;
3850 
3851 		ASSERT(pptr != NULL);
3852 		ASSERT(cdip != NULL);
3853 
3854 		mutex_enter(&pptr->port_mutex);
3855 		if (pip != NULL) {
3856 			cip = CIP(pip);
3857 		}
3858 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3859 			mutex_exit(&pptr->port_mutex);
3860 			*rval = ENXIO;
3861 			break;
3862 		}
3863 
3864 		head = fcp_scan_commands(plun);
3865 		if (head != NULL) {
3866 			fcp_abort_commands(head, LUN_PORT);
3867 		}
3868 		lcount = pptr->port_link_cnt;
3869 		tcount = plun->lun_tgt->tgt_change_cnt;
3870 		mutex_exit(&pptr->port_mutex);
3871 
3872 		if (cmd == DEVCTL_DEVICE_REMOVE) {
3873 			flag = NDI_DEVI_REMOVE;
3874 		}
3875 
3876 		if (is_mpxio) {
3877 			mdi_devi_exit(pptr->port_dip, enteredv);
3878 		} else {
3879 			ndi_devi_exit(pptr->port_dip);
3880 		}
3881 		devi_entered = 0;
3882 
3883 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3884 		    FCP_OFFLINE, lcount, tcount, flag);
3885 
3886 		if (*rval != NDI_SUCCESS) {
3887 			*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3888 			break;
3889 		}
3890 
3891 		fcp_update_offline_flags(plun);
3892 
3893 		ptgt = plun->lun_tgt;
3894 		mutex_enter(&ptgt->tgt_mutex);
3895 		for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3896 		    tplun->lun_next) {
3897 			mutex_enter(&tplun->lun_mutex);
3898 			if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3899 				all = 0;
3900 			}
3901 			mutex_exit(&tplun->lun_mutex);
3902 		}
3903 
3904 		if (all) {
3905 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3906 			/*
3907 			 * The user is unconfiguring/offlining the device.
3908 			 * If fabric and the auto configuration is set
3909 			 * then make sure the user is the only one who
3910 			 * can reconfigure the device.
3911 			 */
3912 			if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3913 			    fcp_enable_auto_configuration) {
3914 				ptgt->tgt_manual_config_only = 1;
3915 			}
3916 		}
3917 		mutex_exit(&ptgt->tgt_mutex);
3918 		break;
3919 	}
3920 
3921 	case DEVCTL_DEVICE_ONLINE: {
3922 		int			lcount;
3923 		int			tcount;
3924 		struct fcp_lun	*plun;
3925 		child_info_t		*cip = CIP(cdip);
3926 
3927 		ASSERT(cdip != NULL);
3928 		ASSERT(pptr != NULL);
3929 
3930 		mutex_enter(&pptr->port_mutex);
3931 		if (pip != NULL) {
3932 			cip = CIP(pip);
3933 		}
3934 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3935 			mutex_exit(&pptr->port_mutex);
3936 			*rval = ENXIO;
3937 			break;
3938 		}
3939 		lcount = pptr->port_link_cnt;
3940 		tcount = plun->lun_tgt->tgt_change_cnt;
3941 		mutex_exit(&pptr->port_mutex);
3942 
3943 		/*
3944 		 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3945 		 * to allow the device attach to occur when the device is
3946 		 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3947 		 * from the scsi_probe()).
3948 		 */
3949 		mutex_enter(&LUN_TGT->tgt_mutex);
3950 		plun->lun_state |= FCP_LUN_ONLINING;
3951 		mutex_exit(&LUN_TGT->tgt_mutex);
3952 
3953 		if (is_mpxio) {
3954 			mdi_devi_exit(pptr->port_dip, enteredv);
3955 		} else {
3956 			ndi_devi_exit(pptr->port_dip);
3957 		}
3958 		devi_entered = 0;
3959 
3960 		*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3961 		    FCP_ONLINE, lcount, tcount, 0);
3962 
3963 		if (*rval != NDI_SUCCESS) {
3964 			/* Reset the FCP_LUN_ONLINING bit */
3965 			mutex_enter(&LUN_TGT->tgt_mutex);
3966 			plun->lun_state &= ~FCP_LUN_ONLINING;
3967 			mutex_exit(&LUN_TGT->tgt_mutex);
3968 			*rval = EIO;
3969 			break;
3970 		}
3971 		mutex_enter(&LUN_TGT->tgt_mutex);
3972 		plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3973 		    FCP_LUN_ONLINING);
3974 		mutex_exit(&LUN_TGT->tgt_mutex);
3975 		break;
3976 	}
3977 
3978 	case DEVCTL_BUS_DEV_CREATE: {
3979 		uchar_t			*bytes = NULL;
3980 		uint_t			nbytes;
3981 		struct fcp_tgt		*ptgt = NULL;
3982 		struct fcp_lun		*plun = NULL;
3983 		dev_info_t		*useless_dip = NULL;
3984 
3985 		*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3986 		    DEVCTL_CONSTRUCT, &useless_dip);
3987 		if (*rval != 0 || useless_dip == NULL) {
3988 			break;
3989 		}
3990 
3991 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3992 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3993 		    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3994 			*rval = EINVAL;
3995 			(void) ndi_devi_free(useless_dip);
3996 			if (bytes != NULL) {
3997 				ddi_prop_free(bytes);
3998 			}
3999 			break;
4000 		}
4001 
4002 		*rval = fcp_create_on_demand(pptr, bytes);
4003 		if (*rval == 0) {
4004 			mutex_enter(&pptr->port_mutex);
4005 			ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4006 			if (ptgt) {
4007 				/*
4008 				 * We now have a pointer to the target that
4009 				 * was created. Lets point to the first LUN on
4010 				 * this new target.
4011 				 */
4012 				mutex_enter(&ptgt->tgt_mutex);
4013 
4014 				plun = ptgt->tgt_lun;
4015 				/*
4016 				 * There may be stale/offline LUN entries on
4017 				 * this list (this is by design) and so we have
4018 				 * to make sure we point to the first online
4019 				 * LUN
4020 				 */
4021 				while (plun &&
4022 				    plun->lun_state & FCP_LUN_OFFLINE) {
4023 					plun = plun->lun_next;
4024 				}
4025 
4026 				mutex_exit(&ptgt->tgt_mutex);
4027 			}
4028 			mutex_exit(&pptr->port_mutex);
4029 		}
4030 
4031 		if (*rval == 0 && ptgt && plun) {
4032 			mutex_enter(&plun->lun_mutex);
4033 			/*
4034 			 * Allow up to fcp_lun_ready_retry seconds to
4035 			 * configure all the luns behind the target.
4036 			 *
4037 			 * The intent here is to allow targets with long
4038 			 * reboot/reset-recovery times to become available
4039 			 * while limiting the maximum wait time for an
4040 			 * unresponsive target.
4041 			 */
4042 			end_time = ddi_get_lbolt() +
4043 			    SEC_TO_TICK(fcp_lun_ready_retry);
4044 
4045 			while (ddi_get_lbolt() < end_time) {
4046 				retval = FC_SUCCESS;
4047 
4048 				/*
4049 				 * The new ndi interfaces for on-demand creation
4050 				 * are inflexible, Do some more work to pass on
4051 				 * a path name of some LUN (design is broken !)
4052 				 */
4053 				if (plun->lun_cip) {
4054 					if (plun->lun_mpxio == 0) {
4055 						cdip = DIP(plun->lun_cip);
4056 					} else {
4057 						cdip = mdi_pi_get_client(
4058 						    PIP(plun->lun_cip));
4059 					}
4060 					if (cdip == NULL) {
4061 						*rval = ENXIO;
4062 						break;
4063 					}
4064 
4065 					if (!i_ddi_devi_attached(cdip)) {
4066 						mutex_exit(&plun->lun_mutex);
4067 						delay(drv_usectohz(1000000));
4068 						mutex_enter(&plun->lun_mutex);
4069 					} else {
4070 						/*
4071 						 * This Lun is ready, lets
4072 						 * check the next one.
4073 						 */
4074 						mutex_exit(&plun->lun_mutex);
4075 						plun = plun->lun_next;
4076 						while (plun && (plun->lun_state
4077 						    & FCP_LUN_OFFLINE)) {
4078 							plun = plun->lun_next;
4079 						}
4080 						if (!plun) {
4081 							break;
4082 						}
4083 						mutex_enter(&plun->lun_mutex);
4084 					}
4085 				} else {
4086 					/*
4087 					 * lun_cip field for a valid lun
4088 					 * should never be NULL. Fail the
4089 					 * command.
4090 					 */
4091 					*rval = ENXIO;
4092 					break;
4093 				}
4094 			}
4095 			if (plun) {
4096 				mutex_exit(&plun->lun_mutex);
4097 			} else {
4098 				char devnm[MAXNAMELEN];
4099 				int nmlen;
4100 
4101 				nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4102 				    ddi_node_name(cdip),
4103 				    ddi_get_name_addr(cdip));
4104 
4105 				if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4106 				    0) {
4107 					*rval = EFAULT;
4108 				}
4109 			}
4110 		} else {
4111 			int	i;
4112 			char	buf[25];
4113 
4114 			for (i = 0; i < FC_WWN_SIZE; i++) {
4115 				(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4116 			}
4117 
4118 			fcp_log(CE_WARN, pptr->port_dip,
4119 			    "!Failed to create nodes for pwwn=%s; error=%x",
4120 			    buf, *rval);
4121 		}
4122 
4123 		(void) ndi_devi_free(useless_dip);
4124 		ddi_prop_free(bytes);
4125 		break;
4126 	}
4127 
4128 	case DEVCTL_DEVICE_RESET: {
4129 		struct fcp_lun		*plun;
4130 		child_info_t		*cip = CIP(cdip);
4131 
4132 		ASSERT(cdip != NULL);
4133 		ASSERT(pptr != NULL);
4134 		mutex_enter(&pptr->port_mutex);
4135 		if (pip != NULL) {
4136 			cip = CIP(pip);
4137 		}
4138 		if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4139 			mutex_exit(&pptr->port_mutex);
4140 			*rval = ENXIO;
4141 			break;
4142 		}
4143 		mutex_exit(&pptr->port_mutex);
4144 
4145 		mutex_enter(&plun->lun_tgt->tgt_mutex);
4146 		if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4147 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4148 
4149 			*rval = ENXIO;
4150 			break;
4151 		}
4152 
4153 		if (plun->lun_sd == NULL) {
4154 			mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 
4156 			*rval = ENXIO;
4157 			break;
4158 		}
4159 		mutex_exit(&plun->lun_tgt->tgt_mutex);
4160 
4161 		/*
4162 		 * set up ap so that fcp_reset can figure out
4163 		 * which target to reset
4164 		 */
4165 		if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4166 		    RESET_TARGET) == FALSE) {
4167 			*rval = EIO;
4168 		}
4169 		break;
4170 	}
4171 
4172 	case DEVCTL_BUS_GETSTATE:
4173 		ASSERT(dcp != NULL);
4174 		ASSERT(pptr != NULL);
4175 		ASSERT(pptr->port_dip != NULL);
4176 		if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4177 		    NDI_SUCCESS) {
4178 			*rval = EFAULT;
4179 		}
4180 		break;
4181 
4182 	case DEVCTL_BUS_QUIESCE:
4183 	case DEVCTL_BUS_UNQUIESCE:
4184 		*rval = ENOTSUP;
4185 		break;
4186 
4187 	case DEVCTL_BUS_RESET:
4188 	case DEVCTL_BUS_RESETALL:
4189 		ASSERT(pptr != NULL);
4190 		(void) fcp_linkreset(pptr, NULL,  KM_SLEEP);
4191 		break;
4192 
4193 	default:
4194 		ASSERT(dcp != NULL);
4195 		*rval = ENOTTY;
4196 		break;
4197 	}
4198 
4199 	/* all done -- clean up and return */
4200 out:	if (devi_entered) {
4201 		if (is_mpxio) {
4202 			mdi_devi_exit(pptr->port_dip, enteredv);
4203 		} else {
4204 			ndi_devi_exit(pptr->port_dip);
4205 		}
4206 	}
4207 
4208 	if (dcp != NULL) {
4209 		ndi_dc_freehdl(dcp);
4210 	}
4211 
4212 	return (retval);
4213 }
4214 
4215 
4216 /*ARGSUSED*/
4217 static int
fcp_els_callback(opaque_t ulph,opaque_t port_handle,fc_unsol_buf_t * buf,uint32_t claimed)4218 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4219     uint32_t claimed)
4220 {
4221 	uchar_t			r_ctl;
4222 	uchar_t			ls_code;
4223 	struct fcp_port	*pptr;
4224 
4225 	if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4226 		return (FC_UNCLAIMED);
4227 	}
4228 
4229 	mutex_enter(&pptr->port_mutex);
4230 	if (pptr->port_state & (FCP_STATE_DETACHING |
4231 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4232 		mutex_exit(&pptr->port_mutex);
4233 		return (FC_UNCLAIMED);
4234 	}
4235 	mutex_exit(&pptr->port_mutex);
4236 
4237 	r_ctl = buf->ub_frame.r_ctl;
4238 
4239 	switch (r_ctl & R_CTL_ROUTING) {
4240 	case R_CTL_EXTENDED_SVC:
4241 		if (r_ctl == R_CTL_ELS_REQ) {
4242 			ls_code = buf->ub_buffer[0];
4243 
4244 			switch (ls_code) {
4245 			case LA_ELS_PRLI:
4246 				/*
4247 				 * We really don't care if something fails.
4248 				 * If the PRLI was not sent out, then the
4249 				 * other end will time it out.
4250 				 */
4251 				if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4252 					return (FC_SUCCESS);
4253 				}
4254 				return (FC_UNCLAIMED);
4255 				/* NOTREACHED */
4256 
4257 			default:
4258 				break;
4259 			}
4260 		}
4261 		/* FALLTHROUGH */
4262 
4263 	default:
4264 		return (FC_UNCLAIMED);
4265 	}
4266 }
4267 
4268 
4269 /*ARGSUSED*/
4270 static int
fcp_data_callback(opaque_t ulph,opaque_t port_handle,fc_unsol_buf_t * buf,uint32_t claimed)4271 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4272     uint32_t claimed)
4273 {
4274 	return (FC_UNCLAIMED);
4275 }
4276 
4277 /*
4278  *     Function: fcp_statec_callback
4279  *
4280  *  Description: The purpose of this function is to handle a port state change.
4281  *		 It is called from fp/fctl and, in a few instances, internally.
4282  *
4283  *     Argument: ulph		fp/fctl port handle
4284  *		 port_handle	fcp_port structure
4285  *		 port_state	Physical state of the port
4286  *		 port_top	Topology
4287  *		 *devlist	Pointer to the first entry of a table
4288  *				containing the remote ports that can be
4289  *				reached.
4290  *		 dev_cnt	Number of entries pointed by devlist.
4291  *		 port_sid	Port ID of the local port.
4292  *
4293  * Return Value: None
4294  */
4295 /*ARGSUSED*/
4296 static void
fcp_statec_callback(opaque_t ulph,opaque_t port_handle,uint32_t port_state,uint32_t port_top,fc_portmap_t * devlist,uint32_t dev_cnt,uint32_t port_sid)4297 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4298     uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4299     uint32_t dev_cnt, uint32_t port_sid)
4300 {
4301 	uint32_t		link_count;
4302 	int			map_len = 0;
4303 	struct fcp_port	*pptr;
4304 	fcp_map_tag_t		*map_tag = NULL;
4305 
4306 	if ((pptr = fcp_get_port(port_handle)) == NULL) {
4307 		fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4308 		return;			/* nothing to work with! */
4309 	}
4310 
4311 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4312 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
4313 	    "fcp_statec_callback: port state/dev_cnt/top ="
4314 	    "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4315 	    dev_cnt, port_top);
4316 
4317 	mutex_enter(&pptr->port_mutex);
4318 
4319 	/*
4320 	 * If a thread is in detach, don't do anything.
4321 	 */
4322 	if (pptr->port_state & (FCP_STATE_DETACHING |
4323 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4324 		mutex_exit(&pptr->port_mutex);
4325 		return;
4326 	}
4327 
4328 	/*
4329 	 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4330 	 * init_pkt is called, it knows whether or not the target's status
4331 	 * (or pd) might be changing.
4332 	 */
4333 
4334 	if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4335 		pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4336 	}
4337 
4338 	/*
4339 	 * the transport doesn't allocate or probe unless being
4340 	 * asked to by either the applications or ULPs
4341 	 *
4342 	 * in cases where the port is OFFLINE at the time of port
4343 	 * attach callback and the link comes ONLINE later, for
4344 	 * easier automatic node creation (i.e. without you having to
4345 	 * go out and run the utility to perform LOGINs) the
4346 	 * following conditional is helpful
4347 	 */
4348 	pptr->port_phys_state = port_state;
4349 
4350 	if (dev_cnt) {
4351 		mutex_exit(&pptr->port_mutex);
4352 
4353 		map_len = sizeof (*map_tag) * dev_cnt;
4354 		map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4355 		if (map_tag == NULL) {
4356 			fcp_log(CE_WARN, pptr->port_dip,
4357 			    "!fcp%d: failed to allocate for map tags; "
4358 			    " state change will not be processed",
4359 			    pptr->port_instance);
4360 
4361 			mutex_enter(&pptr->port_mutex);
4362 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4363 			mutex_exit(&pptr->port_mutex);
4364 
4365 			return;
4366 		}
4367 
4368 		mutex_enter(&pptr->port_mutex);
4369 	}
4370 
4371 	if (pptr->port_id != port_sid) {
4372 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4373 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4374 		    "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4375 		    port_sid);
4376 		/*
4377 		 * The local port changed ID. It is the first time a port ID
4378 		 * is assigned or something drastic happened.  We might have
4379 		 * been unplugged and replugged on another loop or fabric port
4380 		 * or somebody grabbed the AL_PA we had or somebody rezoned
4381 		 * the fabric we were plugged into.
4382 		 */
4383 		pptr->port_id = port_sid;
4384 	}
4385 
4386 	switch (FC_PORT_STATE_MASK(port_state)) {
4387 	case FC_STATE_OFFLINE:
4388 	case FC_STATE_RESET_REQUESTED:
4389 		/*
4390 		 * link has gone from online to offline -- just update the
4391 		 * state of this port to BUSY and MARKed to go offline
4392 		 */
4393 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4394 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4395 		    "link went offline");
4396 		if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4397 			/*
4398 			 * We were offline a while ago and this one
4399 			 * seems to indicate that the loop has gone
4400 			 * dead forever.
4401 			 */
4402 			pptr->port_tmp_cnt += dev_cnt;
4403 			pptr->port_state &= ~FCP_STATE_OFFLINE;
4404 			pptr->port_state |= FCP_STATE_INIT;
4405 			link_count = pptr->port_link_cnt;
4406 			fcp_handle_devices(pptr, devlist, dev_cnt,
4407 			    link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4408 		} else {
4409 			pptr->port_link_cnt++;
4410 			ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4411 			fcp_update_state(pptr, (FCP_LUN_BUSY |
4412 			    FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4413 			if (pptr->port_mpxio) {
4414 				fcp_update_mpxio_path_verifybusy(pptr);
4415 			}
4416 			pptr->port_state |= FCP_STATE_OFFLINE;
4417 			pptr->port_state &=
4418 			    ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4419 			pptr->port_tmp_cnt = 0;
4420 		}
4421 		mutex_exit(&pptr->port_mutex);
4422 		break;
4423 
4424 	case FC_STATE_ONLINE:
4425 	case FC_STATE_LIP:
4426 	case FC_STATE_LIP_LBIT_SET:
4427 		/*
4428 		 * link has gone from offline to online
4429 		 */
4430 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4431 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4432 		    "link went online");
4433 
4434 		pptr->port_link_cnt++;
4435 
4436 		while (pptr->port_ipkt_cnt) {
4437 			mutex_exit(&pptr->port_mutex);
4438 			delay(drv_usectohz(1000000));
4439 			mutex_enter(&pptr->port_mutex);
4440 		}
4441 
4442 		pptr->port_topology = port_top;
4443 
4444 		/*
4445 		 * The state of the targets and luns accessible through this
4446 		 * port is updated.
4447 		 */
4448 		fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4449 		    FCP_CAUSE_LINK_CHANGE);
4450 
4451 		pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4452 		pptr->port_state |= FCP_STATE_ONLINING;
4453 		pptr->port_tmp_cnt = dev_cnt;
4454 		link_count = pptr->port_link_cnt;
4455 
4456 		pptr->port_deadline = fcp_watchdog_time +
4457 		    FCP_ICMD_DEADLINE;
4458 
4459 		if (!dev_cnt) {
4460 			/*
4461 			 * We go directly to the online state if no remote
4462 			 * ports were discovered.
4463 			 */
4464 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4465 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4466 			    "No remote ports discovered");
4467 
4468 			pptr->port_state &= ~FCP_STATE_ONLINING;
4469 			pptr->port_state |= FCP_STATE_ONLINE;
4470 		}
4471 
4472 		switch (port_top) {
4473 		case FC_TOP_FABRIC:
4474 		case FC_TOP_PUBLIC_LOOP:
4475 		case FC_TOP_PRIVATE_LOOP:
4476 		case FC_TOP_PT_PT:
4477 
4478 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4479 				fcp_retry_ns_registry(pptr, port_sid);
4480 			}
4481 
4482 			fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4483 			    map_tag, FCP_CAUSE_LINK_CHANGE);
4484 			break;
4485 
4486 		default:
4487 			/*
4488 			 * We got here because we were provided with an unknown
4489 			 * topology.
4490 			 */
4491 			if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4492 				pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4493 			}
4494 
4495 			pptr->port_tmp_cnt -= dev_cnt;
4496 			fcp_log(CE_WARN, pptr->port_dip,
4497 			    "!unknown/unsupported topology (0x%x)", port_top);
4498 			break;
4499 		}
4500 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4501 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4502 		    "Notify ssd of the reset to reinstate the reservations");
4503 
4504 		scsi_hba_reset_notify_callback(&pptr->port_mutex,
4505 		    &pptr->port_reset_notify_listf);
4506 
4507 		mutex_exit(&pptr->port_mutex);
4508 
4509 		break;
4510 
4511 	case FC_STATE_RESET:
4512 		ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4513 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4514 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
4515 		    "RESET state, waiting for Offline/Online state_cb");
4516 		mutex_exit(&pptr->port_mutex);
4517 		break;
4518 
4519 	case FC_STATE_DEVICE_CHANGE:
4520 		/*
4521 		 * We come here when an application has requested
4522 		 * Dynamic node creation/deletion in Fabric connectivity.
4523 		 */
4524 		if (pptr->port_state & (FCP_STATE_OFFLINE |
4525 		    FCP_STATE_INIT)) {
4526 			/*
4527 			 * This case can happen when the FCTL is in the
4528 			 * process of giving us on online and the host on
4529 			 * the other side issues a PLOGI/PLOGO. Ideally
4530 			 * the state changes should be serialized unless
4531 			 * they are opposite (online-offline).
4532 			 * The transport will give us a final state change
4533 			 * so we can ignore this for the time being.
4534 			 */
4535 			pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4536 			mutex_exit(&pptr->port_mutex);
4537 			break;
4538 		}
4539 
4540 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4541 			fcp_retry_ns_registry(pptr, port_sid);
4542 		}
4543 
4544 		/*
4545 		 * Extend the deadline under steady state conditions
4546 		 * to provide more time for the device-change-commands
4547 		 */
4548 		if (!pptr->port_ipkt_cnt) {
4549 			pptr->port_deadline = fcp_watchdog_time +
4550 			    FCP_ICMD_DEADLINE;
4551 		}
4552 
4553 		/*
4554 		 * There is another race condition here, where if we were
4555 		 * in ONLINEING state and a devices in the map logs out,
4556 		 * fp will give another state change as DEVICE_CHANGE
4557 		 * and OLD. This will result in that target being offlined.
4558 		 * The pd_handle is freed. If from the first statec callback
4559 		 * we were going to fire a PLOGI/PRLI, the system will
4560 		 * panic in fc_ulp_transport with invalid pd_handle.
4561 		 * The fix is to check for the link_cnt before issuing
4562 		 * any command down.
4563 		 */
4564 		fcp_update_targets(pptr, devlist, dev_cnt,
4565 		    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4566 
4567 		link_count = pptr->port_link_cnt;
4568 
4569 		fcp_handle_devices(pptr, devlist, dev_cnt,
4570 		    link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4571 
4572 		pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4573 
4574 		mutex_exit(&pptr->port_mutex);
4575 		break;
4576 
4577 	case FC_STATE_TARGET_PORT_RESET:
4578 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4579 			fcp_retry_ns_registry(pptr, port_sid);
4580 		}
4581 
4582 		/* Do nothing else */
4583 		mutex_exit(&pptr->port_mutex);
4584 		break;
4585 
4586 	default:
4587 		fcp_log(CE_WARN, pptr->port_dip,
4588 		    "!Invalid state change=0x%x", port_state);
4589 		mutex_exit(&pptr->port_mutex);
4590 		break;
4591 	}
4592 
4593 	if (map_tag) {
4594 		kmem_free(map_tag, map_len);
4595 	}
4596 }
4597 
4598 /*
4599  *     Function: fcp_handle_devices
4600  *
4601  *  Description: This function updates the devices currently known by
4602  *		 walking the list provided by the caller.  The list passed
4603  *		 by the caller is supposed to be the list of reachable
4604  *		 devices.
4605  *
4606  *     Argument: *pptr		Fcp port structure.
4607  *		 *devlist	Pointer to the first entry of a table
4608  *				containing the remote ports that can be
4609  *				reached.
4610  *		 dev_cnt	Number of entries pointed by devlist.
4611  *		 link_cnt	Link state count.
4612  *		 *map_tag	Array of fcp_map_tag_t structures.
4613  *		 cause		What caused this function to be called.
4614  *
4615  * Return Value: None
4616  *
4617  *	  Notes: The pptr->port_mutex must be held.
4618  */
4619 static void
fcp_handle_devices(struct fcp_port * pptr,fc_portmap_t devlist[],uint32_t dev_cnt,int link_cnt,fcp_map_tag_t * map_tag,int cause)4620 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4621     uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4622 {
4623 	int			i;
4624 	int			check_finish_init = 0;
4625 	fc_portmap_t		*map_entry;
4626 	struct fcp_tgt	*ptgt = NULL;
4627 
4628 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
4629 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
4630 	    "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4631 
4632 	if (dev_cnt) {
4633 		ASSERT(map_tag != NULL);
4634 	}
4635 
4636 	/*
4637 	 * The following code goes through the list of remote ports that are
4638 	 * accessible through this (pptr) local port (The list walked is the
4639 	 * one provided by the caller which is the list of the remote ports
4640 	 * currently reachable).  It checks if any of them was already
4641 	 * known by looking for the corresponding target structure based on
4642 	 * the world wide name.	 If a target is part of the list it is tagged
4643 	 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4644 	 *
4645 	 * Old comment
4646 	 * -----------
4647 	 * Before we drop port mutex; we MUST get the tags updated; This
4648 	 * two step process is somewhat slow, but more reliable.
4649 	 */
4650 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4651 		map_entry = &(devlist[i]);
4652 
4653 		/*
4654 		 * get ptr to this map entry in our port's
4655 		 * list (if any)
4656 		 */
4657 		ptgt = fcp_lookup_target(pptr,
4658 		    (uchar_t *)&(map_entry->map_pwwn));
4659 
4660 		if (ptgt) {
4661 			map_tag[i] = ptgt->tgt_change_cnt;
4662 			if (cause == FCP_CAUSE_LINK_CHANGE) {
4663 				ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4664 			}
4665 		}
4666 	}
4667 
4668 	/*
4669 	 * At this point we know which devices of the new list were already
4670 	 * known (The field tgt_aux_state of the target structure has been
4671 	 * set to FCP_TGT_TAGGED).
4672 	 *
4673 	 * The following code goes through the list of targets currently known
4674 	 * by the local port (the list is actually a hashing table).  If a
4675 	 * target is found and is not tagged, it means the target cannot
4676 	 * be reached anymore through the local port (pptr).  It is offlined.
4677 	 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4678 	 */
4679 	for (i = 0; i < FCP_NUM_HASH; i++) {
4680 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4681 		    ptgt = ptgt->tgt_next) {
4682 			mutex_enter(&ptgt->tgt_mutex);
4683 			if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4684 			    (cause == FCP_CAUSE_LINK_CHANGE) &&
4685 			    !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4686 				fcp_offline_target_now(pptr, ptgt,
4687 				    link_cnt, ptgt->tgt_change_cnt, 0);
4688 			}
4689 			mutex_exit(&ptgt->tgt_mutex);
4690 		}
4691 	}
4692 
4693 	/*
4694 	 * At this point, the devices that were known but cannot be reached
4695 	 * anymore, have most likely been offlined.
4696 	 *
4697 	 * The following section of code seems to go through the list of
4698 	 * remote ports that can now be reached.  For every single one it
4699 	 * checks if it is already known or if it is a new port.
4700 	 */
4701 	for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4702 
4703 		if (check_finish_init) {
4704 			ASSERT(i > 0);
4705 			(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4706 			    map_tag[i - 1], cause);
4707 			check_finish_init = 0;
4708 		}
4709 
4710 		/* get a pointer to this map entry */
4711 		map_entry = &(devlist[i]);
4712 
4713 		/*
4714 		 * Check for the duplicate map entry flag. If we have marked
4715 		 * this entry as a duplicate we skip it since the correct
4716 		 * (perhaps even same) state change will be encountered
4717 		 * later in the list.
4718 		 */
4719 		if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4720 			continue;
4721 		}
4722 
4723 		/* get ptr to this map entry in our port's list (if any) */
4724 		ptgt = fcp_lookup_target(pptr,
4725 		    (uchar_t *)&(map_entry->map_pwwn));
4726 
4727 		if (ptgt) {
4728 			/*
4729 			 * This device was already known.  The field
4730 			 * tgt_aux_state is reset (was probably set to
4731 			 * FCP_TGT_TAGGED previously in this routine).
4732 			 */
4733 			ptgt->tgt_aux_state = 0;
4734 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4735 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
4736 			    "handle_devices: map did/state/type/flags = "
4737 			    "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4738 			    "tgt_state=%d",
4739 			    map_entry->map_did.port_id, map_entry->map_state,
4740 			    map_entry->map_type, map_entry->map_flags,
4741 			    ptgt->tgt_d_id, ptgt->tgt_state);
4742 		}
4743 
4744 		if (map_entry->map_type == PORT_DEVICE_OLD ||
4745 		    map_entry->map_type == PORT_DEVICE_NEW ||
4746 		    map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4747 		    map_entry->map_type == PORT_DEVICE_CHANGED) {
4748 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
4749 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
4750 			    "map_type=%x, did = %x",
4751 			    map_entry->map_type,
4752 			    map_entry->map_did.port_id);
4753 		}
4754 
4755 		switch (map_entry->map_type) {
4756 		case PORT_DEVICE_NOCHANGE:
4757 		case PORT_DEVICE_USER_CREATE:
4758 		case PORT_DEVICE_USER_LOGIN:
4759 		case PORT_DEVICE_NEW:
4760 		case PORT_DEVICE_REPORTLUN_CHANGED:
4761 			FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4762 
4763 			if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4764 			    link_cnt, (ptgt) ? map_tag[i] : 0,
4765 			    cause) == TRUE) {
4766 
4767 				FCP_TGT_TRACE(ptgt, map_tag[i],
4768 				    FCP_TGT_TRACE_2);
4769 				check_finish_init++;
4770 			}
4771 			break;
4772 
4773 		case PORT_DEVICE_OLD:
4774 			if (ptgt != NULL) {
4775 				FCP_TGT_TRACE(ptgt, map_tag[i],
4776 				    FCP_TGT_TRACE_3);
4777 
4778 				mutex_enter(&ptgt->tgt_mutex);
4779 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4780 					/*
4781 					 * Must do an in-line wait for I/Os
4782 					 * to get drained
4783 					 */
4784 					mutex_exit(&ptgt->tgt_mutex);
4785 					mutex_exit(&pptr->port_mutex);
4786 
4787 					mutex_enter(&ptgt->tgt_mutex);
4788 					while (ptgt->tgt_ipkt_cnt ||
4789 					    fcp_outstanding_lun_cmds(ptgt)
4790 					    == FC_SUCCESS) {
4791 						mutex_exit(&ptgt->tgt_mutex);
4792 						delay(drv_usectohz(1000000));
4793 						mutex_enter(&ptgt->tgt_mutex);
4794 					}
4795 					mutex_exit(&ptgt->tgt_mutex);
4796 
4797 					mutex_enter(&pptr->port_mutex);
4798 					mutex_enter(&ptgt->tgt_mutex);
4799 
4800 					(void) fcp_offline_target(pptr, ptgt,
4801 					    link_cnt, map_tag[i], 0, 0);
4802 				}
4803 				mutex_exit(&ptgt->tgt_mutex);
4804 			}
4805 			check_finish_init++;
4806 			break;
4807 
4808 		case PORT_DEVICE_USER_DELETE:
4809 		case PORT_DEVICE_USER_LOGOUT:
4810 			if (ptgt != NULL) {
4811 				FCP_TGT_TRACE(ptgt, map_tag[i],
4812 				    FCP_TGT_TRACE_4);
4813 
4814 				mutex_enter(&ptgt->tgt_mutex);
4815 				if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4816 					(void) fcp_offline_target(pptr, ptgt,
4817 					    link_cnt, map_tag[i], 1, 0);
4818 				}
4819 				mutex_exit(&ptgt->tgt_mutex);
4820 			}
4821 			check_finish_init++;
4822 			break;
4823 
4824 		case PORT_DEVICE_CHANGED:
4825 			if (ptgt != NULL) {
4826 				FCP_TGT_TRACE(ptgt, map_tag[i],
4827 				    FCP_TGT_TRACE_5);
4828 
4829 				if (fcp_device_changed(pptr, ptgt,
4830 				    map_entry, link_cnt, map_tag[i],
4831 				    cause) == TRUE) {
4832 					check_finish_init++;
4833 				}
4834 			} else {
4835 				if (fcp_handle_mapflags(pptr, ptgt,
4836 				    map_entry, link_cnt, 0, cause) == TRUE) {
4837 					check_finish_init++;
4838 				}
4839 			}
4840 			break;
4841 
4842 		default:
4843 			fcp_log(CE_WARN, pptr->port_dip,
4844 			    "!Invalid map_type=0x%x", map_entry->map_type);
4845 			check_finish_init++;
4846 			break;
4847 		}
4848 	}
4849 
4850 	if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4851 		ASSERT(i > 0);
4852 		(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4853 		    map_tag[i-1], cause);
4854 	} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4855 		fcp_offline_all(pptr, link_cnt, cause);
4856 	}
4857 }
4858 
4859 static int
fcp_handle_reportlun_changed(struct fcp_tgt * ptgt,int cause)4860 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4861 {
4862 	struct fcp_lun	*plun;
4863 	struct fcp_port *pptr;
4864 	int		 rscn_count;
4865 	int		 lun0_newalloc;
4866 	int		 ret  = TRUE;
4867 
4868 	ASSERT(ptgt);
4869 	pptr = ptgt->tgt_port;
4870 	lun0_newalloc = 0;
4871 	if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4872 		/*
4873 		 * no LUN struct for LUN 0 yet exists,
4874 		 * so create one
4875 		 */
4876 		plun = fcp_alloc_lun(ptgt);
4877 		if (plun == NULL) {
4878 			fcp_log(CE_WARN, pptr->port_dip,
4879 			    "!Failed to allocate lun 0 for"
4880 			    " D_ID=%x", ptgt->tgt_d_id);
4881 			return (ret);
4882 		}
4883 		lun0_newalloc = 1;
4884 	}
4885 
4886 	mutex_enter(&ptgt->tgt_mutex);
4887 	/*
4888 	 * consider lun 0 as device not connected if it is
4889 	 * offlined or newly allocated
4890 	 */
4891 	if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4892 		plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4893 	}
4894 	plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4895 	plun->lun_state &= ~FCP_LUN_OFFLINE;
4896 	ptgt->tgt_lun_cnt = 1;
4897 	ptgt->tgt_report_lun_cnt = 0;
4898 	mutex_exit(&ptgt->tgt_mutex);
4899 
4900 	rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4901 	if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4902 	    sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4903 	    ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4904 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
4905 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4906 		    "to D_ID=%x", ptgt->tgt_d_id);
4907 	} else {
4908 		ret = FALSE;
4909 	}
4910 
4911 	return (ret);
4912 }
4913 
4914 /*
4915  *     Function: fcp_handle_mapflags
4916  *
4917  *  Description: This function creates a target structure if the ptgt passed
4918  *		 is NULL.  It also kicks off the PLOGI if we are not logged
4919  *		 into the target yet or the PRLI if we are logged into the
4920  *		 target already.  The rest of the treatment is done in the
4921  *		 callbacks of the PLOGI or PRLI.
4922  *
4923  *     Argument: *pptr		FCP Port structure.
4924  *		 *ptgt		Target structure.
4925  *		 *map_entry	Array of fc_portmap_t structures.
4926  *		 link_cnt	Link state count.
4927  *		 tgt_cnt	Target state count.
4928  *		 cause		What caused this function to be called.
4929  *
4930  * Return Value: TRUE	Failed
4931  *		 FALSE	Succeeded
4932  *
4933  *	  Notes: pptr->port_mutex must be owned.
4934  */
4935 static int
fcp_handle_mapflags(struct fcp_port * pptr,struct fcp_tgt * ptgt,fc_portmap_t * map_entry,int link_cnt,int tgt_cnt,int cause)4936 fcp_handle_mapflags(struct fcp_port	*pptr, struct fcp_tgt	*ptgt,
4937     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4938 {
4939 	int			lcount;
4940 	int			tcount;
4941 	int			ret = TRUE;
4942 	int			alloc;
4943 	struct fcp_ipkt	*icmd;
4944 	struct fcp_lun	*pseq_lun = NULL;
4945 	uchar_t			opcode;
4946 	int			valid_ptgt_was_passed = FALSE;
4947 
4948 	ASSERT(mutex_owned(&pptr->port_mutex));
4949 
4950 	/*
4951 	 * This case is possible where the FCTL has come up and done discovery
4952 	 * before FCP was loaded and attached. FCTL would have discovered the
4953 	 * devices and later the ULP came online. In this case ULP's would get
4954 	 * PORT_DEVICE_NOCHANGE but target would be NULL.
4955 	 */
4956 	if (ptgt == NULL) {
4957 		/* don't already have a target */
4958 		mutex_exit(&pptr->port_mutex);
4959 		ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4960 		mutex_enter(&pptr->port_mutex);
4961 
4962 		if (ptgt == NULL) {
4963 			fcp_log(CE_WARN, pptr->port_dip,
4964 			    "!FC target allocation failed");
4965 			return (ret);
4966 		}
4967 		mutex_enter(&ptgt->tgt_mutex);
4968 		ptgt->tgt_statec_cause = cause;
4969 		ptgt->tgt_tmp_cnt = 1;
4970 		mutex_exit(&ptgt->tgt_mutex);
4971 	} else {
4972 		valid_ptgt_was_passed = TRUE;
4973 	}
4974 
4975 	/*
4976 	 * Copy in the target parameters
4977 	 */
4978 	mutex_enter(&ptgt->tgt_mutex);
4979 	ptgt->tgt_d_id = map_entry->map_did.port_id;
4980 	ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4981 	ptgt->tgt_pd_handle = map_entry->map_pd;
4982 	ptgt->tgt_fca_dev = NULL;
4983 
4984 	/* Copy port and node WWNs */
4985 	bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4986 	    FC_WWN_SIZE);
4987 	bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4988 	    FC_WWN_SIZE);
4989 
4990 	if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4991 	    (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4992 	    (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4993 	    valid_ptgt_was_passed) {
4994 		/*
4995 		 * determine if there are any tape LUNs on this target
4996 		 */
4997 		for (pseq_lun = ptgt->tgt_lun;
4998 		    pseq_lun != NULL;
4999 		    pseq_lun = pseq_lun->lun_next) {
5000 			if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
5001 			    !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
5002 				fcp_update_tgt_state(ptgt, FCP_RESET,
5003 				    FCP_LUN_MARK);
5004 				mutex_exit(&ptgt->tgt_mutex);
5005 				return (ret);
5006 			}
5007 		}
5008 	}
5009 
5010 	/*
5011 	 * if UA'REPORT_LUN_CHANGED received,
5012 	 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5013 	 */
5014 	if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5015 		ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5016 		mutex_exit(&ptgt->tgt_mutex);
5017 		mutex_exit(&pptr->port_mutex);
5018 
5019 		ret = fcp_handle_reportlun_changed(ptgt, cause);
5020 
5021 		mutex_enter(&pptr->port_mutex);
5022 		return (ret);
5023 	}
5024 
5025 	/*
5026 	 * If ptgt was NULL when this function was entered, then tgt_node_state
5027 	 * was never specifically initialized but zeroed out which means
5028 	 * FCP_TGT_NODE_NONE.
5029 	 */
5030 	switch (ptgt->tgt_node_state) {
5031 	case FCP_TGT_NODE_NONE:
5032 	case FCP_TGT_NODE_ON_DEMAND:
5033 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5034 		    !fcp_enable_auto_configuration &&
5035 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5037 		} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5038 		    fcp_enable_auto_configuration &&
5039 		    (ptgt->tgt_manual_config_only == 1) &&
5040 		    map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5041 			/*
5042 			 * If auto configuration is set and
5043 			 * the tgt_manual_config_only flag is set then
5044 			 * we only want the user to be able to change
5045 			 * the state through create_on_demand.
5046 			 */
5047 			ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5048 		} else {
5049 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5050 		}
5051 		break;
5052 
5053 	case FCP_TGT_NODE_PRESENT:
5054 		break;
5055 	}
5056 	/*
5057 	 * If we are booting from a fabric device, make sure we
5058 	 * mark the node state appropriately for this target to be
5059 	 * enumerated
5060 	 */
5061 	if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5062 		if (bcmp((caddr_t)pptr->port_boot_wwn,
5063 		    (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5064 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
5065 			ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5066 		}
5067 	}
5068 	mutex_exit(&ptgt->tgt_mutex);
5069 
5070 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5071 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
5072 	    "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5073 	    map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5074 	    map_entry->map_rscn_info.ulp_rscn_count);
5075 
5076 	mutex_enter(&ptgt->tgt_mutex);
5077 
5078 	/*
5079 	 * Reset target OFFLINE state and mark the target BUSY
5080 	 */
5081 	ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5082 	ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5083 
5084 	tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5085 	lcount = link_cnt;
5086 
5087 	mutex_exit(&ptgt->tgt_mutex);
5088 	mutex_exit(&pptr->port_mutex);
5089 
5090 	/*
5091 	 * if we are already logged in, then we do a PRLI, else
5092 	 * we do a PLOGI first (to get logged in)
5093 	 *
5094 	 * We will not check if we are the PLOGI initiator
5095 	 */
5096 	opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5097 	    map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5098 
5099 	alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5100 
5101 	icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5102 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5103 	    cause, map_entry->map_rscn_info.ulp_rscn_count);
5104 
5105 	if (icmd == NULL) {
5106 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5107 		/*
5108 		 * We've exited port_mutex before calling fcp_icmd_alloc,
5109 		 * we need to make sure we reacquire it before returning.
5110 		 */
5111 		mutex_enter(&pptr->port_mutex);
5112 		return (FALSE);
5113 	}
5114 
5115 	/* TRUE is only returned while target is intended skipped */
5116 	ret = FALSE;
5117 	/* discover info about this target */
5118 	if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5119 	    lcount, tcount, cause)) == DDI_SUCCESS) {
5120 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5121 	} else {
5122 		fcp_icmd_free(pptr, icmd);
5123 		ret = TRUE;
5124 	}
5125 	mutex_enter(&pptr->port_mutex);
5126 
5127 	return (ret);
5128 }
5129 
5130 /*
5131  *     Function: fcp_send_els
5132  *
5133  *  Description: Sends an ELS to the target specified by the caller.  Supports
5134  *		 PLOGI and PRLI.
5135  *
5136  *     Argument: *pptr		Fcp port.
5137  *		 *ptgt		Target to send the ELS to.
5138  *		 *icmd		Internal packet
5139  *		 opcode		ELS opcode
5140  *		 lcount		Link state change counter
5141  *		 tcount		Target state change counter
5142  *		 cause		What caused the call
5143  *
5144  * Return Value: DDI_SUCCESS
5145  *		 Others
5146  */
5147 static int
fcp_send_els(struct fcp_port * pptr,struct fcp_tgt * ptgt,struct fcp_ipkt * icmd,uchar_t opcode,int lcount,int tcount,int cause)5148 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5149     struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5150 {
5151 	fc_packet_t		*fpkt;
5152 	fc_frame_hdr_t		*hp;
5153 	int			internal = 0;
5154 	int			alloc;
5155 	int			cmd_len;
5156 	int			resp_len;
5157 	int			res = DDI_FAILURE; /* default result */
5158 	int			rval = DDI_FAILURE;
5159 
5160 	ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5161 	ASSERT(ptgt->tgt_port == pptr);
5162 
5163 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5164 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5165 	    "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5166 	    (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5167 
5168 	if (opcode == LA_ELS_PLOGI) {
5169 		cmd_len = sizeof (la_els_logi_t);
5170 		resp_len = sizeof (la_els_logi_t);
5171 	} else {
5172 		ASSERT(opcode == LA_ELS_PRLI);
5173 		cmd_len = sizeof (la_els_prli_t);
5174 		resp_len = sizeof (la_els_prli_t);
5175 	}
5176 
5177 	if (icmd == NULL) {
5178 		alloc = FCP_MAX(sizeof (la_els_logi_t),
5179 		    sizeof (la_els_prli_t));
5180 		icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5181 		    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5182 		    lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5183 		if (icmd == NULL) {
5184 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5185 			return (res);
5186 		}
5187 		internal++;
5188 	}
5189 	fpkt = icmd->ipkt_fpkt;
5190 
5191 	fpkt->pkt_cmdlen = cmd_len;
5192 	fpkt->pkt_rsplen = resp_len;
5193 	fpkt->pkt_datalen = 0;
5194 	icmd->ipkt_retries = 0;
5195 
5196 	/* fill in fpkt info */
5197 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5198 	fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5199 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5200 
5201 	/* get ptr to frame hdr in fpkt */
5202 	hp = &fpkt->pkt_cmd_fhdr;
5203 
5204 	/*
5205 	 * fill in frame hdr
5206 	 */
5207 	hp->r_ctl = R_CTL_ELS_REQ;
5208 	hp->s_id = pptr->port_id;	/* source ID */
5209 	hp->d_id = ptgt->tgt_d_id;	/* dest ID */
5210 	hp->type = FC_TYPE_EXTENDED_LS;
5211 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5212 	hp->seq_id = 0;
5213 	hp->rsvd = 0;
5214 	hp->df_ctl  = 0;
5215 	hp->seq_cnt = 0;
5216 	hp->ox_id = 0xffff;		/* i.e. none */
5217 	hp->rx_id = 0xffff;		/* i.e. none */
5218 	hp->ro = 0;
5219 
5220 	/*
5221 	 * at this point we have a filled in cmd pkt
5222 	 *
5223 	 * fill in the respective info, then use the transport to send
5224 	 * the packet
5225 	 *
5226 	 * for a PLOGI call fc_ulp_login(), and
5227 	 * for a PRLI call fc_ulp_issue_els()
5228 	 */
5229 	switch (opcode) {
5230 	case LA_ELS_PLOGI: {
5231 		struct la_els_logi logi;
5232 
5233 		bzero(&logi, sizeof (struct la_els_logi));
5234 
5235 		hp = &fpkt->pkt_cmd_fhdr;
5236 		hp->r_ctl = R_CTL_ELS_REQ;
5237 		logi.ls_code.ls_code = LA_ELS_PLOGI;
5238 		logi.ls_code.mbz = 0;
5239 
5240 		FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5241 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5242 
5243 		icmd->ipkt_opcode = LA_ELS_PLOGI;
5244 
5245 		mutex_enter(&pptr->port_mutex);
5246 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5247 
5248 			mutex_exit(&pptr->port_mutex);
5249 
5250 			rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5251 			if (rval == FC_SUCCESS) {
5252 				res = DDI_SUCCESS;
5253 				break;
5254 			}
5255 
5256 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5257 
5258 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5259 			    rval, "PLOGI");
5260 		} else {
5261 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5262 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
5263 			    "fcp_send_els1: state change occured"
5264 			    " for D_ID=0x%x", ptgt->tgt_d_id);
5265 			mutex_exit(&pptr->port_mutex);
5266 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5267 		}
5268 		break;
5269 	}
5270 
5271 	case LA_ELS_PRLI: {
5272 		struct la_els_prli	prli;
5273 		struct fcp_prli		*fprli;
5274 
5275 		bzero(&prli, sizeof (struct la_els_prli));
5276 
5277 		hp = &fpkt->pkt_cmd_fhdr;
5278 		hp->r_ctl = R_CTL_ELS_REQ;
5279 
5280 		/* fill in PRLI cmd ELS fields */
5281 		prli.ls_code = LA_ELS_PRLI;
5282 		prli.page_length = 0x10;	/* huh? */
5283 		prli.payload_length = sizeof (struct la_els_prli);
5284 
5285 		icmd->ipkt_opcode = LA_ELS_PRLI;
5286 
5287 		/* get ptr to PRLI service params */
5288 		fprli = (struct fcp_prli *)prli.service_params;
5289 
5290 		/* fill in service params */
5291 		fprli->type = 0x08;
5292 		fprli->resvd1 = 0;
5293 		fprli->orig_process_assoc_valid = 0;
5294 		fprli->resp_process_assoc_valid = 0;
5295 		fprli->establish_image_pair = 1;
5296 		fprli->resvd2 = 0;
5297 		fprli->resvd3 = 0;
5298 		fprli->obsolete_1 = 0;
5299 		fprli->obsolete_2 = 0;
5300 		fprli->data_overlay_allowed = 0;
5301 		fprli->initiator_fn = 1;
5302 		fprli->confirmed_compl_allowed = 1;
5303 
5304 		if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5305 			fprli->target_fn = 1;
5306 		} else {
5307 			fprli->target_fn = 0;
5308 		}
5309 
5310 		fprli->retry = 1;
5311 		fprli->read_xfer_rdy_disabled = 1;
5312 		fprli->write_xfer_rdy_disabled = 0;
5313 
5314 		FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5315 		    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5316 
5317 		/* issue the PRLI request */
5318 
5319 		mutex_enter(&pptr->port_mutex);
5320 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5321 
5322 			mutex_exit(&pptr->port_mutex);
5323 
5324 			rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5325 			if (rval == FC_SUCCESS) {
5326 				res = DDI_SUCCESS;
5327 				break;
5328 			}
5329 
5330 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5331 
5332 			res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5333 			    rval, "PRLI");
5334 		} else {
5335 			mutex_exit(&pptr->port_mutex);
5336 			FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5337 		}
5338 		break;
5339 	}
5340 
5341 	default:
5342 		fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5343 		break;
5344 	}
5345 
5346 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
5347 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
5348 	    "fcp_send_els: returning %d", res);
5349 
5350 	if (res != DDI_SUCCESS) {
5351 		if (internal) {
5352 			fcp_icmd_free(pptr, icmd);
5353 		}
5354 	}
5355 
5356 	return (res);
5357 }
5358 
5359 
5360 /*
5361  * called internally update the state of all of the tgts and each LUN
5362  * for this port (i.e. each target  known to be attached to this port)
5363  * if they are not already offline
5364  *
5365  * must be called with the port mutex owned
5366  *
5367  * acquires and releases the target mutexes for each target attached
5368  * to this port
5369  */
5370 void
fcp_update_state(struct fcp_port * pptr,uint32_t state,int cause)5371 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5372 {
5373 	int i;
5374 	struct fcp_tgt *ptgt;
5375 
5376 	ASSERT(mutex_owned(&pptr->port_mutex));
5377 
5378 	for (i = 0; i < FCP_NUM_HASH; i++) {
5379 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5380 		    ptgt = ptgt->tgt_next) {
5381 			mutex_enter(&ptgt->tgt_mutex);
5382 			fcp_update_tgt_state(ptgt, FCP_SET, state);
5383 			ptgt->tgt_change_cnt++;
5384 			ptgt->tgt_statec_cause = cause;
5385 			ptgt->tgt_tmp_cnt = 1;
5386 			ptgt->tgt_done = 0;
5387 			mutex_exit(&ptgt->tgt_mutex);
5388 		}
5389 	}
5390 }
5391 
5392 
5393 static void
fcp_offline_all(struct fcp_port * pptr,int lcount,int cause)5394 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5395 {
5396 	int i;
5397 	int ndevs;
5398 	struct fcp_tgt *ptgt;
5399 
5400 	ASSERT(mutex_owned(&pptr->port_mutex));
5401 
5402 	for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5403 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5404 		    ptgt = ptgt->tgt_next) {
5405 			ndevs++;
5406 		}
5407 	}
5408 
5409 	if (ndevs == 0) {
5410 		return;
5411 	}
5412 	pptr->port_tmp_cnt = ndevs;
5413 
5414 	for (i = 0; i < FCP_NUM_HASH; i++) {
5415 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5416 		    ptgt = ptgt->tgt_next) {
5417 			(void) fcp_call_finish_init_held(pptr, ptgt,
5418 			    lcount, ptgt->tgt_change_cnt, cause);
5419 		}
5420 	}
5421 }
5422 
5423 /*
5424  *     Function: fcp_update_tgt_state
5425  *
5426  *  Description: This function updates the field tgt_state of a target.	 That
5427  *		 field is a bitmap and which bit can be set or reset
5428  *		 individually.	The action applied to the target state is also
5429  *		 applied to all the LUNs belonging to the target (provided the
5430  *		 LUN is not offline).  A side effect of applying the state
5431  *		 modification to the target and the LUNs is the field tgt_trace
5432  *		 of the target and lun_trace of the LUNs is set to zero.
5433  *
5434  *
5435  *     Argument: *ptgt	Target structure.
5436  *		 flag	Flag indication what action to apply (set/reset).
5437  *		 state	State bits to update.
5438  *
5439  * Return Value: None
5440  *
5441  *	Context: Interrupt, Kernel or User context.
5442  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5443  *		 calling this function.
5444  */
5445 void
fcp_update_tgt_state(struct fcp_tgt * ptgt,int flag,uint32_t state)5446 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5447 {
5448 	struct fcp_lun *plun;
5449 
5450 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5451 
5452 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5453 		/* The target is not offline. */
5454 		if (flag == FCP_SET) {
5455 			ptgt->tgt_state |= state;
5456 			ptgt->tgt_trace = 0;
5457 		} else {
5458 			ptgt->tgt_state &= ~state;
5459 		}
5460 
5461 		for (plun = ptgt->tgt_lun; plun != NULL;
5462 		    plun = plun->lun_next) {
5463 			if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5464 				/* The LUN is not offline. */
5465 				if (flag == FCP_SET) {
5466 					plun->lun_state |= state;
5467 					plun->lun_trace = 0;
5468 				} else {
5469 					plun->lun_state &= ~state;
5470 				}
5471 			}
5472 		}
5473 	}
5474 }
5475 
5476 /*
5477  *     Function: fcp_update_tgt_state
5478  *
5479  *  Description: This function updates the field lun_state of a LUN.  That
5480  *		 field is a bitmap and which bit can be set or reset
5481  *		 individually.
5482  *
5483  *     Argument: *plun	LUN structure.
5484  *		 flag	Flag indication what action to apply (set/reset).
5485  *		 state	State bits to update.
5486  *
5487  * Return Value: None
5488  *
5489  *	Context: Interrupt, Kernel or User context.
5490  *		 The mutex of the target (ptgt->tgt_mutex) must be owned when
5491  *		 calling this function.
5492  */
5493 void
fcp_update_lun_state(struct fcp_lun * plun,int flag,uint32_t state)5494 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5495 {
5496 	struct fcp_tgt	*ptgt = plun->lun_tgt;
5497 
5498 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
5499 
5500 	if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5501 		if (flag == FCP_SET) {
5502 			plun->lun_state |= state;
5503 		} else {
5504 			plun->lun_state &= ~state;
5505 		}
5506 	}
5507 }
5508 
5509 /*
5510  *     Function: fcp_get_port
5511  *
5512  *  Description: This function returns the fcp_port structure from the opaque
5513  *		 handle passed by the caller.  That opaque handle is the handle
5514  *		 used by fp/fctl to identify a particular local port.  That
5515  *		 handle has been stored in the corresponding fcp_port
5516  *		 structure.  This function is going to walk the global list of
5517  *		 fcp_port structures till one has a port_fp_handle that matches
5518  *		 the handle passed by the caller.  This function enters the
5519  *		 mutex fcp_global_mutex while walking the global list and then
5520  *		 releases it.
5521  *
5522  *     Argument: port_handle	Opaque handle that fp/fctl uses to identify a
5523  *				particular port.
5524  *
5525  * Return Value: NULL		Not found.
5526  *		 Not NULL	Pointer to the fcp_port structure.
5527  *
5528  *	Context: Interrupt, Kernel or User context.
5529  */
5530 static struct fcp_port *
fcp_get_port(opaque_t port_handle)5531 fcp_get_port(opaque_t port_handle)
5532 {
5533 	struct fcp_port *pptr;
5534 
5535 	ASSERT(port_handle != NULL);
5536 
5537 	mutex_enter(&fcp_global_mutex);
5538 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5539 		if (pptr->port_fp_handle == port_handle) {
5540 			break;
5541 		}
5542 	}
5543 	mutex_exit(&fcp_global_mutex);
5544 
5545 	return (pptr);
5546 }
5547 
5548 
5549 static void
fcp_unsol_callback(fc_packet_t * fpkt)5550 fcp_unsol_callback(fc_packet_t *fpkt)
5551 {
5552 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5553 	struct fcp_port *pptr = icmd->ipkt_port;
5554 
5555 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5556 		caddr_t state, reason, action, expln;
5557 
5558 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
5559 		    &action, &expln);
5560 
5561 		fcp_log(CE_WARN, pptr->port_dip,
5562 		    "!couldn't post response to unsolicited request: "
5563 		    " state=%s reason=%s rx_id=%x ox_id=%x",
5564 		    state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5565 		    fpkt->pkt_cmd_fhdr.rx_id);
5566 	}
5567 	fcp_icmd_free(pptr, icmd);
5568 }
5569 
5570 
5571 /*
5572  * Perform general purpose preparation of a response to an unsolicited request
5573  */
5574 static void
fcp_unsol_resp_init(fc_packet_t * pkt,fc_unsol_buf_t * buf,uchar_t r_ctl,uchar_t type)5575 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5576     uchar_t r_ctl, uchar_t type)
5577 {
5578 	pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5579 	pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5580 	pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5581 	pkt->pkt_cmd_fhdr.type = type;
5582 	pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5583 	pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5584 	pkt->pkt_cmd_fhdr.df_ctl  = buf->ub_frame.df_ctl;
5585 	pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5586 	pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5587 	pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5588 	pkt->pkt_cmd_fhdr.ro = 0;
5589 	pkt->pkt_cmd_fhdr.rsvd = 0;
5590 	pkt->pkt_comp = fcp_unsol_callback;
5591 	pkt->pkt_pd = NULL;
5592 	pkt->pkt_ub_resp_token = (opaque_t)buf;
5593 }
5594 
5595 
5596 /*ARGSUSED*/
5597 static int
fcp_unsol_prli(struct fcp_port * pptr,fc_unsol_buf_t * buf)5598 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5599 {
5600 	fc_packet_t		*fpkt;
5601 	struct la_els_prli	prli;
5602 	struct fcp_prli		*fprli;
5603 	struct fcp_ipkt	*icmd;
5604 	struct la_els_prli	*from;
5605 	struct fcp_prli		*orig;
5606 	struct fcp_tgt	*ptgt;
5607 	int			tcount = 0;
5608 	int			lcount;
5609 
5610 	from = (struct la_els_prli *)buf->ub_buffer;
5611 	orig = (struct fcp_prli *)from->service_params;
5612 	if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5613 	    NULL) {
5614 		mutex_enter(&ptgt->tgt_mutex);
5615 		tcount = ptgt->tgt_change_cnt;
5616 		mutex_exit(&ptgt->tgt_mutex);
5617 	}
5618 
5619 	mutex_enter(&pptr->port_mutex);
5620 	lcount = pptr->port_link_cnt;
5621 	mutex_exit(&pptr->port_mutex);
5622 
5623 	if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5624 	    sizeof (la_els_prli_t), 0,
5625 	    pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5626 	    lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5627 		return (FC_FAILURE);
5628 	}
5629 
5630 	fpkt = icmd->ipkt_fpkt;
5631 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5632 	fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5633 	fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5634 	fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5635 	fpkt->pkt_rsplen = 0;
5636 	fpkt->pkt_datalen = 0;
5637 
5638 	icmd->ipkt_opcode = LA_ELS_PRLI;
5639 
5640 	bzero(&prli, sizeof (struct la_els_prli));
5641 	fprli = (struct fcp_prli *)prli.service_params;
5642 	prli.ls_code = LA_ELS_ACC;
5643 	prli.page_length = 0x10;
5644 	prli.payload_length = sizeof (struct la_els_prli);
5645 
5646 	/* fill in service params */
5647 	fprli->type = 0x08;
5648 	fprli->resvd1 = 0;
5649 	fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5650 	fprli->orig_process_associator = orig->orig_process_associator;
5651 	fprli->resp_process_assoc_valid = 0;
5652 	fprli->establish_image_pair = 1;
5653 	fprli->resvd2 = 0;
5654 	fprli->resvd3 = 0;
5655 	fprli->obsolete_1 = 0;
5656 	fprli->obsolete_2 = 0;
5657 	fprli->data_overlay_allowed = 0;
5658 	fprli->initiator_fn = 1;
5659 	fprli->confirmed_compl_allowed = 1;
5660 
5661 	if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5662 		fprli->target_fn = 1;
5663 	} else {
5664 		fprli->target_fn = 0;
5665 	}
5666 
5667 	fprli->retry = 1;
5668 	fprli->read_xfer_rdy_disabled = 1;
5669 	fprli->write_xfer_rdy_disabled = 0;
5670 
5671 	/* save the unsol prli payload first */
5672 	FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5673 	    fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5674 
5675 	FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5676 	    fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5677 
5678 	fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5679 
5680 	mutex_enter(&pptr->port_mutex);
5681 	if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5682 		int rval;
5683 		mutex_exit(&pptr->port_mutex);
5684 
5685 		if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5686 		    FC_SUCCESS) {
5687 			if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5688 			    ptgt != NULL) {
5689 				fcp_queue_ipkt(pptr, fpkt);
5690 				return (FC_SUCCESS);
5691 			}
5692 			/* Let it timeout */
5693 			fcp_icmd_free(pptr, icmd);
5694 			return (FC_FAILURE);
5695 		}
5696 	} else {
5697 		mutex_exit(&pptr->port_mutex);
5698 		fcp_icmd_free(pptr, icmd);
5699 		return (FC_FAILURE);
5700 	}
5701 
5702 	(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5703 
5704 	return (FC_SUCCESS);
5705 }
5706 
5707 /*
5708  *     Function: fcp_icmd_alloc
5709  *
5710  *  Description: This function allocated a fcp_ipkt structure.	The pkt_comp
5711  *		 field is initialized to fcp_icmd_callback.  Sometimes it is
5712  *		 modified by the caller (such as fcp_send_scsi).  The
5713  *		 structure is also tied to the state of the line and of the
5714  *		 target at a particular time.  That link is established by
5715  *		 setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5716  *		 and tcount which came respectively from pptr->link_cnt and
5717  *		 ptgt->tgt_change_cnt.
5718  *
5719  *     Argument: *pptr		Fcp port.
5720  *		 *ptgt		Target (destination of the command).
5721  *		 cmd_len	Length of the command.
5722  *		 resp_len	Length of the expected response.
5723  *		 data_len	Length of the data.
5724  *		 nodma		Indicates weither the command and response.
5725  *				will be transfer through DMA or not.
5726  *		 lcount		Link state change counter.
5727  *		 tcount		Target state change counter.
5728  *		 cause		Reason that lead to this call.
5729  *
5730  * Return Value: NULL		Failed.
5731  *		 Not NULL	Internal packet address.
5732  */
5733 static struct fcp_ipkt *
fcp_icmd_alloc(struct fcp_port * pptr,struct fcp_tgt * ptgt,int cmd_len,int resp_len,int data_len,int nodma,int lcount,int tcount,int cause,uint32_t rscn_count)5734 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5735     int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5736     uint32_t rscn_count)
5737 {
5738 	int			dma_setup = 0;
5739 	fc_packet_t		*fpkt;
5740 	struct fcp_ipkt	*icmd = NULL;
5741 
5742 	icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5743 	    pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5744 	    KM_NOSLEEP);
5745 	if (icmd == NULL) {
5746 		fcp_log(CE_WARN, pptr->port_dip,
5747 		    "!internal packet allocation failed");
5748 		return (NULL);
5749 	}
5750 
5751 	/*
5752 	 * initialize the allocated packet
5753 	 */
5754 	icmd->ipkt_nodma = nodma;
5755 	icmd->ipkt_next = icmd->ipkt_prev = NULL;
5756 	icmd->ipkt_lun = NULL;
5757 
5758 	icmd->ipkt_link_cnt = lcount;
5759 	icmd->ipkt_change_cnt = tcount;
5760 	icmd->ipkt_cause = cause;
5761 
5762 	mutex_enter(&pptr->port_mutex);
5763 	icmd->ipkt_port = pptr;
5764 	mutex_exit(&pptr->port_mutex);
5765 
5766 	/* keep track of amt of data to be sent in pkt */
5767 	icmd->ipkt_cmdlen = cmd_len;
5768 	icmd->ipkt_resplen = resp_len;
5769 	icmd->ipkt_datalen = data_len;
5770 
5771 	/* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5772 	icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5773 
5774 	/* set pkt's private ptr to point to cmd pkt */
5775 	icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5776 
5777 	/* set FCA private ptr to memory just beyond */
5778 	icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5779 	    ((char *)icmd + sizeof (struct fcp_ipkt) +
5780 	    pptr->port_dmacookie_sz);
5781 
5782 	/* get ptr to fpkt substruct and fill it in */
5783 	fpkt = icmd->ipkt_fpkt;
5784 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5785 	    sizeof (struct fcp_ipkt));
5786 
5787 	if (ptgt != NULL) {
5788 		icmd->ipkt_tgt = ptgt;
5789 		fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5790 	}
5791 
5792 	fpkt->pkt_comp = fcp_icmd_callback;
5793 	fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5794 	fpkt->pkt_cmdlen = cmd_len;
5795 	fpkt->pkt_rsplen = resp_len;
5796 	fpkt->pkt_datalen = data_len;
5797 
5798 	/*
5799 	 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5800 	 * rscn_count as fcp knows down to the transport. If a valid count was
5801 	 * passed into this function, we allocate memory to actually pass down
5802 	 * this info.
5803 	 *
5804 	 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5805 	 * basically mean that fcp will not be able to help transport
5806 	 * distinguish if a new RSCN has come after fcp was last informed about
5807 	 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5808 	 * 5068068 where the device might end up going offline in case of RSCN
5809 	 * storms.
5810 	 */
5811 	fpkt->pkt_ulp_rscn_infop = NULL;
5812 	if (rscn_count != FC_INVALID_RSCN_COUNT) {
5813 		fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5814 		    sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5815 		if (fpkt->pkt_ulp_rscn_infop == NULL) {
5816 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5817 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5818 			    "Failed to alloc memory to pass rscn info");
5819 		}
5820 	}
5821 
5822 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5823 		fc_ulp_rscn_info_t	*rscnp;
5824 
5825 		rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5826 		rscnp->ulp_rscn_count = rscn_count;
5827 	}
5828 
5829 	if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5830 		goto fail;
5831 	}
5832 	dma_setup++;
5833 
5834 	/*
5835 	 * Must hold target mutex across setting of pkt_pd and call to
5836 	 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5837 	 * away while we're not looking.
5838 	 */
5839 	if (ptgt != NULL) {
5840 		mutex_enter(&ptgt->tgt_mutex);
5841 		fpkt->pkt_pd = ptgt->tgt_pd_handle;
5842 
5843 		/* ask transport to do its initialization on this pkt */
5844 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5845 		    != FC_SUCCESS) {
5846 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5847 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5848 			    "fc_ulp_init_packet failed");
5849 			mutex_exit(&ptgt->tgt_mutex);
5850 			goto fail;
5851 		}
5852 		mutex_exit(&ptgt->tgt_mutex);
5853 	} else {
5854 		if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5855 		    != FC_SUCCESS) {
5856 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
5857 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
5858 			    "fc_ulp_init_packet failed");
5859 			goto fail;
5860 		}
5861 	}
5862 
5863 	mutex_enter(&pptr->port_mutex);
5864 	if (pptr->port_state & (FCP_STATE_DETACHING |
5865 	    FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5866 		int rval;
5867 
5868 		mutex_exit(&pptr->port_mutex);
5869 
5870 		rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5871 		ASSERT(rval == FC_SUCCESS);
5872 
5873 		goto fail;
5874 	}
5875 
5876 	if (ptgt != NULL) {
5877 		mutex_enter(&ptgt->tgt_mutex);
5878 		ptgt->tgt_ipkt_cnt++;
5879 		mutex_exit(&ptgt->tgt_mutex);
5880 	}
5881 
5882 	pptr->port_ipkt_cnt++;
5883 
5884 	mutex_exit(&pptr->port_mutex);
5885 
5886 	return (icmd);
5887 
5888 fail:
5889 	if (fpkt->pkt_ulp_rscn_infop != NULL) {
5890 		kmem_free(fpkt->pkt_ulp_rscn_infop,
5891 		    sizeof (fc_ulp_rscn_info_t));
5892 		fpkt->pkt_ulp_rscn_infop = NULL;
5893 	}
5894 
5895 	if (dma_setup) {
5896 		fcp_free_dma(pptr, icmd);
5897 	}
5898 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5899 	    (size_t)pptr->port_dmacookie_sz);
5900 
5901 	return (NULL);
5902 }
5903 
5904 /*
5905  *     Function: fcp_icmd_free
5906  *
5907  *  Description: Frees the internal command passed by the caller.
5908  *
5909  *     Argument: *pptr		Fcp port.
5910  *		 *icmd		Internal packet to free.
5911  *
5912  * Return Value: None
5913  */
5914 static void
fcp_icmd_free(struct fcp_port * pptr,struct fcp_ipkt * icmd)5915 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5916 {
5917 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
5918 
5919 	/* Let the underlying layers do their cleanup. */
5920 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5921 	    icmd->ipkt_fpkt);
5922 
5923 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5924 		kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5925 		    sizeof (fc_ulp_rscn_info_t));
5926 	}
5927 
5928 	fcp_free_dma(pptr, icmd);
5929 
5930 	kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5931 	    (size_t)pptr->port_dmacookie_sz);
5932 
5933 	mutex_enter(&pptr->port_mutex);
5934 
5935 	if (ptgt) {
5936 		mutex_enter(&ptgt->tgt_mutex);
5937 		ptgt->tgt_ipkt_cnt--;
5938 		mutex_exit(&ptgt->tgt_mutex);
5939 	}
5940 
5941 	pptr->port_ipkt_cnt--;
5942 	mutex_exit(&pptr->port_mutex);
5943 }
5944 
5945 /*
5946  *     Function: fcp_alloc_dma
5947  *
5948  *  Description: Allocated the DMA resources required for the internal
5949  *		 packet.
5950  *
5951  *     Argument: *pptr	FCP port.
5952  *		 *icmd	Internal FCP packet.
5953  *		 nodma	Indicates if the Cmd and Resp will be DMAed.
5954  *		 flags	Allocation flags (Sleep or NoSleep).
5955  *
5956  * Return Value: FC_SUCCESS
5957  *		 FC_NOMEM
5958  */
5959 static int
fcp_alloc_dma(struct fcp_port * pptr,struct fcp_ipkt * icmd,int nodma,int flags)5960 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5961     int nodma, int flags)
5962 {
5963 	int		rval;
5964 	size_t		real_size;
5965 	uint_t		ccount;
5966 	int		bound = 0;
5967 	int		cmd_resp = 0;
5968 	fc_packet_t	*fpkt;
5969 	ddi_dma_cookie_t	pkt_data_cookie;
5970 	ddi_dma_cookie_t	*cp;
5971 	uint32_t		cnt;
5972 
5973 	fpkt = &icmd->ipkt_fc_packet;
5974 
5975 	ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5976 	    fpkt->pkt_resp_dma == NULL);
5977 
5978 	icmd->ipkt_nodma = nodma;
5979 
5980 	if (nodma) {
5981 		fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5982 		if (fpkt->pkt_cmd == NULL) {
5983 			goto fail;
5984 		}
5985 
5986 		fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5987 		if (fpkt->pkt_resp == NULL) {
5988 			goto fail;
5989 		}
5990 	} else {
5991 		ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5992 
5993 		rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5994 		if (rval == FC_FAILURE) {
5995 			ASSERT(fpkt->pkt_cmd_dma == NULL &&
5996 			    fpkt->pkt_resp_dma == NULL);
5997 			goto fail;
5998 		}
5999 		cmd_resp++;
6000 	}
6001 
6002 	if ((fpkt->pkt_datalen != 0) &&
6003 	    !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
6004 		/*
6005 		 * set up DMA handle and memory for the data in this packet
6006 		 */
6007 		if (ddi_dma_alloc_handle(pptr->port_dip,
6008 		    &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6009 		    NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6010 			goto fail;
6011 		}
6012 
6013 		if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6014 		    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6015 		    DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6016 		    &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6017 			goto fail;
6018 		}
6019 
6020 		/* was DMA mem size gotten < size asked for/needed ?? */
6021 		if (real_size < fpkt->pkt_datalen) {
6022 			goto fail;
6023 		}
6024 
6025 		/* bind DMA address and handle together */
6026 		if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6027 		    NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6028 		    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6029 		    &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6030 			goto fail;
6031 		}
6032 		bound++;
6033 
6034 		if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6035 			goto fail;
6036 		}
6037 
6038 		fpkt->pkt_data_cookie_cnt = ccount;
6039 
6040 		cp = fpkt->pkt_data_cookie;
6041 		*cp = pkt_data_cookie;
6042 		cp++;
6043 
6044 		for (cnt = 1; cnt < ccount; cnt++, cp++) {
6045 			ddi_dma_nextcookie(fpkt->pkt_data_dma,
6046 			    &pkt_data_cookie);
6047 			*cp = pkt_data_cookie;
6048 		}
6049 
6050 	} else if (fpkt->pkt_datalen != 0) {
6051 		/*
6052 		 * If it's a pseudo FCA, then it can't support DMA even in
6053 		 * SCSI data phase.
6054 		 */
6055 		fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6056 		if (fpkt->pkt_data == NULL) {
6057 			goto fail;
6058 		}
6059 
6060 	}
6061 
6062 	return (FC_SUCCESS);
6063 
6064 fail:
6065 	if (bound) {
6066 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6067 	}
6068 
6069 	if (fpkt->pkt_data_dma) {
6070 		if (fpkt->pkt_data) {
6071 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6072 		}
6073 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6074 	} else {
6075 		if (fpkt->pkt_data) {
6076 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6077 		}
6078 	}
6079 
6080 	if (nodma) {
6081 		if (fpkt->pkt_cmd) {
6082 			kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6083 		}
6084 		if (fpkt->pkt_resp) {
6085 			kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6086 		}
6087 	} else {
6088 		if (cmd_resp) {
6089 			fcp_free_cmd_resp(pptr, fpkt);
6090 		}
6091 	}
6092 
6093 	return (FC_NOMEM);
6094 }
6095 
6096 
6097 static void
fcp_free_dma(struct fcp_port * pptr,struct fcp_ipkt * icmd)6098 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6099 {
6100 	fc_packet_t *fpkt = icmd->ipkt_fpkt;
6101 
6102 	if (fpkt->pkt_data_dma) {
6103 		(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6104 		if (fpkt->pkt_data) {
6105 			ddi_dma_mem_free(&fpkt->pkt_data_acc);
6106 		}
6107 		ddi_dma_free_handle(&fpkt->pkt_data_dma);
6108 	} else {
6109 		if (fpkt->pkt_data) {
6110 			kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6111 		}
6112 		/*
6113 		 * Need we reset pkt_* to zero???
6114 		 */
6115 	}
6116 
6117 	if (icmd->ipkt_nodma) {
6118 		if (fpkt->pkt_cmd) {
6119 			kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6120 		}
6121 		if (fpkt->pkt_resp) {
6122 			kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6123 		}
6124 	} else {
6125 		ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6126 
6127 		fcp_free_cmd_resp(pptr, fpkt);
6128 	}
6129 }
6130 
6131 /*
6132  *     Function: fcp_lookup_target
6133  *
6134  *  Description: Finds a target given a WWN.
6135  *
6136  *     Argument: *pptr	FCP port.
6137  *		 *wwn	World Wide Name of the device to look for.
6138  *
6139  * Return Value: NULL		No target found
6140  *		 Not NULL	Target structure
6141  *
6142  *	Context: Interrupt context.
6143  *		 The mutex pptr->port_mutex must be owned.
6144  */
6145 /* ARGSUSED */
6146 static struct fcp_tgt *
fcp_lookup_target(struct fcp_port * pptr,uchar_t * wwn)6147 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6148 {
6149 	int			hash;
6150 	struct fcp_tgt	*ptgt;
6151 
6152 	ASSERT(mutex_owned(&pptr->port_mutex));
6153 
6154 	hash = FCP_HASH(wwn);
6155 
6156 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6157 	    ptgt = ptgt->tgt_next) {
6158 		if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6159 		    bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6160 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
6161 			break;
6162 		}
6163 	}
6164 
6165 	return (ptgt);
6166 }
6167 
6168 
6169 /*
6170  * Find target structure given a port identifier
6171  */
6172 static struct fcp_tgt *
fcp_get_target_by_did(struct fcp_port * pptr,uint32_t d_id)6173 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6174 {
6175 	fc_portid_t		port_id;
6176 	la_wwn_t		pwwn;
6177 	struct fcp_tgt	*ptgt = NULL;
6178 
6179 	port_id.priv_lilp_posit = 0;
6180 	port_id.port_id = d_id;
6181 	if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6182 	    &pwwn) == FC_SUCCESS) {
6183 		mutex_enter(&pptr->port_mutex);
6184 		ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6185 		mutex_exit(&pptr->port_mutex);
6186 	}
6187 
6188 	return (ptgt);
6189 }
6190 
6191 
6192 /*
6193  * the packet completion callback routine for info cmd pkts
6194  *
6195  * this means fpkt pts to a response to either a PLOGI or a PRLI
6196  *
6197  * if there is an error an attempt is made to call a routine to resend
6198  * the command that failed
6199  */
6200 static void
fcp_icmd_callback(fc_packet_t * fpkt)6201 fcp_icmd_callback(fc_packet_t *fpkt)
6202 {
6203 	struct fcp_ipkt	*icmd;
6204 	struct fcp_port	*pptr;
6205 	struct fcp_tgt	*ptgt;
6206 	struct la_els_prli	*prli;
6207 	struct la_els_prli	prli_s;
6208 	struct fcp_prli		*fprli;
6209 	struct fcp_lun	*plun;
6210 	int		free_pkt = 1;
6211 	int		rval;
6212 	ls_code_t	resp;
6213 	uchar_t		prli_acc = 0;
6214 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
6215 	int		lun0_newalloc;
6216 
6217 	icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6218 
6219 	/* get ptrs to the port and target structs for the cmd */
6220 	pptr = icmd->ipkt_port;
6221 	ptgt = icmd->ipkt_tgt;
6222 
6223 	FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6224 
6225 	if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6226 		FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6227 		    sizeof (prli_s));
6228 		prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6229 	}
6230 
6231 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6232 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6233 	    "ELS (%x) callback state=0x%x reason=0x%x for %x",
6234 	    icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6235 	    ptgt->tgt_d_id);
6236 
6237 	if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6238 	    ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6239 
6240 		mutex_enter(&ptgt->tgt_mutex);
6241 		if (ptgt->tgt_pd_handle == NULL) {
6242 			/*
6243 			 * in a fabric environment the port device handles
6244 			 * get created only after successful LOGIN into the
6245 			 * transport, so the transport makes this port
6246 			 * device (pd) handle available in this packet, so
6247 			 * save it now
6248 			 */
6249 			ASSERT(fpkt->pkt_pd != NULL);
6250 			ptgt->tgt_pd_handle = fpkt->pkt_pd;
6251 		}
6252 		mutex_exit(&ptgt->tgt_mutex);
6253 
6254 		/* which ELS cmd is this response for ?? */
6255 		switch (icmd->ipkt_opcode) {
6256 		case LA_ELS_PLOGI:
6257 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6258 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6259 			    "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6260 			    ptgt->tgt_d_id,
6261 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6262 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6263 
6264 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6265 			    FCP_TGT_TRACE_15);
6266 
6267 			/* Note that we are not allocating a new icmd */
6268 			if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6269 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6270 			    icmd->ipkt_cause) != DDI_SUCCESS) {
6271 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6272 				    FCP_TGT_TRACE_16);
6273 				goto fail;
6274 			}
6275 			break;
6276 
6277 		case LA_ELS_PRLI:
6278 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6279 			    fcp_trace, FCP_BUF_LEVEL_5, 0,
6280 			    "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6281 
6282 			FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6283 			    FCP_TGT_TRACE_17);
6284 
6285 			prli = &prli_s;
6286 
6287 			FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6288 			    sizeof (prli_s));
6289 
6290 			fprli = (struct fcp_prli *)prli->service_params;
6291 
6292 			mutex_enter(&ptgt->tgt_mutex);
6293 			ptgt->tgt_icap = fprli->initiator_fn;
6294 			ptgt->tgt_tcap = fprli->target_fn;
6295 			mutex_exit(&ptgt->tgt_mutex);
6296 
6297 			if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6298 				/*
6299 				 * this FCP device does not support target mode
6300 				 */
6301 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6302 				    FCP_TGT_TRACE_18);
6303 				goto fail;
6304 			}
6305 			if (fprli->retry == 1) {
6306 				fc_ulp_disable_relogin(pptr->port_fp_handle,
6307 				    &ptgt->tgt_port_wwn);
6308 			}
6309 
6310 			/* target is no longer offline */
6311 			mutex_enter(&pptr->port_mutex);
6312 			mutex_enter(&ptgt->tgt_mutex);
6313 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6314 				ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6315 				    FCP_TGT_MARK);
6316 			} else {
6317 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6318 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6319 				    "fcp_icmd_callback,1: state change "
6320 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6321 				mutex_exit(&ptgt->tgt_mutex);
6322 				mutex_exit(&pptr->port_mutex);
6323 				goto fail;
6324 			}
6325 			mutex_exit(&ptgt->tgt_mutex);
6326 			mutex_exit(&pptr->port_mutex);
6327 
6328 			/*
6329 			 * lun 0 should always respond to inquiry, so
6330 			 * get the LUN struct for LUN 0
6331 			 *
6332 			 * Currently we deal with first level of addressing.
6333 			 * If / when we start supporting 0x device types
6334 			 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6335 			 * this logic will need revisiting.
6336 			 */
6337 			lun0_newalloc = 0;
6338 			if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6339 				/*
6340 				 * no LUN struct for LUN 0 yet exists,
6341 				 * so create one
6342 				 */
6343 				plun = fcp_alloc_lun(ptgt);
6344 				if (plun == NULL) {
6345 					fcp_log(CE_WARN, pptr->port_dip,
6346 					    "!Failed to allocate lun 0 for"
6347 					    " D_ID=%x", ptgt->tgt_d_id);
6348 					goto fail;
6349 				}
6350 				lun0_newalloc = 1;
6351 			}
6352 
6353 			/* fill in LUN info */
6354 			mutex_enter(&ptgt->tgt_mutex);
6355 			/*
6356 			 * consider lun 0 as device not connected if it is
6357 			 * offlined or newly allocated
6358 			 */
6359 			if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6360 			    lun0_newalloc) {
6361 				plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6362 			}
6363 			plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6364 			plun->lun_state &= ~FCP_LUN_OFFLINE;
6365 			ptgt->tgt_lun_cnt = 1;
6366 			ptgt->tgt_report_lun_cnt = 0;
6367 			mutex_exit(&ptgt->tgt_mutex);
6368 
6369 			/* Retrieve the rscn count (if a valid one exists) */
6370 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6371 				rscn_count = ((fc_ulp_rscn_info_t *)
6372 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6373 				    ->ulp_rscn_count;
6374 			} else {
6375 				rscn_count = FC_INVALID_RSCN_COUNT;
6376 			}
6377 
6378 			/* send Report Lun request to target */
6379 			if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6380 			    sizeof (struct fcp_reportlun_resp),
6381 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6382 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6383 				mutex_enter(&pptr->port_mutex);
6384 				if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6385 					fcp_log(CE_WARN, pptr->port_dip,
6386 					    "!Failed to send REPORT LUN to"
6387 					    "  D_ID=%x", ptgt->tgt_d_id);
6388 				} else {
6389 					FCP_TRACE(fcp_logq,
6390 					    pptr->port_instbuf, fcp_trace,
6391 					    FCP_BUF_LEVEL_5, 0,
6392 					    "fcp_icmd_callback,2:state change"
6393 					    " occured for D_ID=0x%x",
6394 					    ptgt->tgt_d_id);
6395 				}
6396 				mutex_exit(&pptr->port_mutex);
6397 
6398 				FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6399 				    FCP_TGT_TRACE_19);
6400 
6401 				goto fail;
6402 			} else {
6403 				free_pkt = 0;
6404 				fcp_icmd_free(pptr, icmd);
6405 			}
6406 			break;
6407 
6408 		default:
6409 			fcp_log(CE_WARN, pptr->port_dip,
6410 			    "!fcp_icmd_callback Invalid opcode");
6411 			goto fail;
6412 		}
6413 
6414 		return;
6415 	}
6416 
6417 
6418 	/*
6419 	 * Other PLOGI failures are not retried as the
6420 	 * transport does it already
6421 	 */
6422 	if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6423 		if (fcp_is_retryable(icmd) &&
6424 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6425 
6426 			if (FCP_MUST_RETRY(fpkt)) {
6427 				fcp_queue_ipkt(pptr, fpkt);
6428 				return;
6429 			}
6430 
6431 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6432 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6433 			    "ELS PRLI is retried for d_id=0x%x, state=%x,"
6434 			    " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6435 			    fpkt->pkt_reason);
6436 
6437 			/*
6438 			 * Retry by recalling the routine that
6439 			 * originally queued this packet
6440 			 */
6441 			mutex_enter(&pptr->port_mutex);
6442 			if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6443 				caddr_t msg;
6444 
6445 				mutex_exit(&pptr->port_mutex);
6446 
6447 				ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6448 
6449 				if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6450 					fpkt->pkt_timeout +=
6451 					    FCP_TIMEOUT_DELTA;
6452 				}
6453 
6454 				rval = fc_ulp_issue_els(pptr->port_fp_handle,
6455 				    fpkt);
6456 				if (rval == FC_SUCCESS) {
6457 					return;
6458 				}
6459 
6460 				if (rval == FC_STATEC_BUSY ||
6461 				    rval == FC_OFFLINE) {
6462 					fcp_queue_ipkt(pptr, fpkt);
6463 					return;
6464 				}
6465 				(void) fc_ulp_error(rval, &msg);
6466 
6467 				fcp_log(CE_NOTE, pptr->port_dip,
6468 				    "!ELS 0x%x failed to d_id=0x%x;"
6469 				    " %s", icmd->ipkt_opcode,
6470 				    ptgt->tgt_d_id, msg);
6471 			} else {
6472 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6473 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
6474 				    "fcp_icmd_callback,3: state change "
6475 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
6476 				mutex_exit(&pptr->port_mutex);
6477 			}
6478 		}
6479 	} else {
6480 		if (fcp_is_retryable(icmd) &&
6481 		    icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6482 			if (FCP_MUST_RETRY(fpkt)) {
6483 				fcp_queue_ipkt(pptr, fpkt);
6484 				return;
6485 			}
6486 		}
6487 		mutex_enter(&pptr->port_mutex);
6488 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6489 		    fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6490 			mutex_exit(&pptr->port_mutex);
6491 			fcp_print_error(fpkt);
6492 		} else {
6493 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6494 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6495 			    "fcp_icmd_callback,4: state change occured"
6496 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6497 			mutex_exit(&pptr->port_mutex);
6498 		}
6499 	}
6500 
6501 fail:
6502 	if (free_pkt) {
6503 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6504 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
6505 		fcp_icmd_free(pptr, icmd);
6506 	}
6507 }
6508 
6509 
6510 /*
6511  * called internally to send an info cmd using the transport
6512  *
6513  * sends either an INQ or a REPORT_LUN
6514  *
6515  * when the packet is completed fcp_scsi_callback is called
6516  */
6517 static int
fcp_send_scsi(struct fcp_lun * plun,uchar_t opcode,int alloc_len,int lcount,int tcount,int cause,uint32_t rscn_count)6518 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6519     int lcount, int tcount, int cause, uint32_t rscn_count)
6520 {
6521 	int			nodma;
6522 	struct fcp_ipkt		*icmd;
6523 	struct fcp_tgt		*ptgt;
6524 	struct fcp_port		*pptr;
6525 	fc_frame_hdr_t		*hp;
6526 	fc_packet_t		*fpkt;
6527 	struct fcp_cmd		fcp_cmd;
6528 	struct fcp_cmd		*fcmd;
6529 	union scsi_cdb		*scsi_cdb;
6530 
6531 	ASSERT(plun != NULL);
6532 
6533 	ptgt = plun->lun_tgt;
6534 	ASSERT(ptgt != NULL);
6535 
6536 	pptr = ptgt->tgt_port;
6537 	ASSERT(pptr != NULL);
6538 
6539 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6540 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6541 	    "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6542 
6543 	nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6544 	icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6545 	    FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6546 	    rscn_count);
6547 
6548 	if (icmd == NULL) {
6549 		return (DDI_FAILURE);
6550 	}
6551 
6552 	fpkt = icmd->ipkt_fpkt;
6553 	fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6554 	icmd->ipkt_retries = 0;
6555 	icmd->ipkt_opcode = opcode;
6556 	icmd->ipkt_lun = plun;
6557 
6558 	if (nodma) {
6559 		fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6560 	} else {
6561 		fcmd = &fcp_cmd;
6562 	}
6563 	bzero(fcmd, sizeof (struct fcp_cmd));
6564 
6565 	fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6566 
6567 	hp = &fpkt->pkt_cmd_fhdr;
6568 
6569 	hp->s_id = pptr->port_id;
6570 	hp->d_id = ptgt->tgt_d_id;
6571 	hp->r_ctl = R_CTL_COMMAND;
6572 	hp->type = FC_TYPE_SCSI_FCP;
6573 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6574 	hp->rsvd = 0;
6575 	hp->seq_id = 0;
6576 	hp->seq_cnt = 0;
6577 	hp->ox_id = 0xffff;
6578 	hp->rx_id = 0xffff;
6579 	hp->ro = 0;
6580 
6581 	bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6582 
6583 	/*
6584 	 * Request SCSI target for expedited processing
6585 	 */
6586 
6587 	/*
6588 	 * Set up for untagged queuing because we do not
6589 	 * know if the fibre device supports queuing.
6590 	 */
6591 	fcmd->fcp_cntl.cntl_reserved_0 = 0;
6592 	fcmd->fcp_cntl.cntl_reserved_1 = 0;
6593 	fcmd->fcp_cntl.cntl_reserved_2 = 0;
6594 	fcmd->fcp_cntl.cntl_reserved_3 = 0;
6595 	fcmd->fcp_cntl.cntl_reserved_4 = 0;
6596 	fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6597 	scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6598 
6599 	switch (opcode) {
6600 	case SCMD_INQUIRY_PAGE83:
6601 		/*
6602 		 * Prepare to get the Inquiry VPD page 83 information
6603 		 */
6604 		fcmd->fcp_cntl.cntl_read_data = 1;
6605 		fcmd->fcp_cntl.cntl_write_data = 0;
6606 		fcmd->fcp_data_len = alloc_len;
6607 
6608 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6609 		fpkt->pkt_comp = fcp_scsi_callback;
6610 
6611 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6612 		scsi_cdb->g0_addr2 = 0x01;
6613 		scsi_cdb->g0_addr1 = 0x83;
6614 		scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6615 		break;
6616 
6617 	case SCMD_INQUIRY:
6618 		fcmd->fcp_cntl.cntl_read_data = 1;
6619 		fcmd->fcp_cntl.cntl_write_data = 0;
6620 		fcmd->fcp_data_len = alloc_len;
6621 
6622 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6623 		fpkt->pkt_comp = fcp_scsi_callback;
6624 
6625 		scsi_cdb->scc_cmd = SCMD_INQUIRY;
6626 		scsi_cdb->g0_count0 = SUN_INQSIZE;
6627 		break;
6628 
6629 	case SCMD_REPORT_LUN: {
6630 		fc_portid_t	d_id;
6631 		opaque_t	fca_dev;
6632 
6633 		ASSERT(alloc_len >= 16);
6634 
6635 		d_id.priv_lilp_posit = 0;
6636 		d_id.port_id = ptgt->tgt_d_id;
6637 
6638 		fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6639 
6640 		mutex_enter(&ptgt->tgt_mutex);
6641 		ptgt->tgt_fca_dev = fca_dev;
6642 		mutex_exit(&ptgt->tgt_mutex);
6643 
6644 		fcmd->fcp_cntl.cntl_read_data = 1;
6645 		fcmd->fcp_cntl.cntl_write_data = 0;
6646 		fcmd->fcp_data_len = alloc_len;
6647 
6648 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6649 		fpkt->pkt_comp = fcp_scsi_callback;
6650 
6651 		scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6652 		scsi_cdb->scc5_count0 = alloc_len & 0xff;
6653 		scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6654 		scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6655 		scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6656 		break;
6657 	}
6658 
6659 	default:
6660 		fcp_log(CE_WARN, pptr->port_dip,
6661 		    "!fcp_send_scsi Invalid opcode");
6662 		break;
6663 	}
6664 
6665 	if (!nodma) {
6666 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6667 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6668 	}
6669 
6670 	mutex_enter(&pptr->port_mutex);
6671 	if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6672 
6673 		mutex_exit(&pptr->port_mutex);
6674 		if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6675 		    FC_SUCCESS) {
6676 			fcp_icmd_free(pptr, icmd);
6677 			return (DDI_FAILURE);
6678 		}
6679 		return (DDI_SUCCESS);
6680 	} else {
6681 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6682 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6683 		    "fcp_send_scsi,1: state change occured"
6684 		    " for D_ID=0x%x", ptgt->tgt_d_id);
6685 		mutex_exit(&pptr->port_mutex);
6686 		fcp_icmd_free(pptr, icmd);
6687 		return (DDI_FAILURE);
6688 	}
6689 }
6690 
6691 
6692 /*
6693  * called by fcp_scsi_callback to check to handle the case where
6694  * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6695  */
6696 static int
fcp_check_reportlun(struct fcp_rsp * rsp,fc_packet_t * fpkt)6697 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6698 {
6699 	uchar_t				rqlen;
6700 	int				rval = DDI_FAILURE;
6701 	struct scsi_extended_sense	sense_info, *sense;
6702 	struct fcp_ipkt		*icmd = (struct fcp_ipkt *)
6703 	    fpkt->pkt_ulp_private;
6704 	struct fcp_tgt		*ptgt = icmd->ipkt_tgt;
6705 	struct fcp_port		*pptr = ptgt->tgt_port;
6706 
6707 	ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6708 
6709 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6710 		/*
6711 		 * SCSI-II Reserve Release support. Some older FC drives return
6712 		 * Reservation conflict for Report Luns command.
6713 		 */
6714 		if (icmd->ipkt_nodma) {
6715 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6716 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6717 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6718 		} else {
6719 			fcp_rsp_t	new_resp;
6720 
6721 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6722 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6723 
6724 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6725 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6726 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6727 
6728 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6729 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6730 		}
6731 
6732 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6733 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6734 
6735 		return (DDI_SUCCESS);
6736 	}
6737 
6738 	sense = &sense_info;
6739 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
6740 		/* no need to continue if sense length is not set */
6741 		return (rval);
6742 	}
6743 
6744 	/* casting 64-bit integer to 8-bit */
6745 	rqlen = (uchar_t)min(rsp->fcp_sense_len,
6746 	    sizeof (struct scsi_extended_sense));
6747 
6748 	if (rqlen < 14) {
6749 		/* no need to continue if request length isn't long enough */
6750 		return (rval);
6751 	}
6752 
6753 	if (icmd->ipkt_nodma) {
6754 		/*
6755 		 * We can safely use fcp_response_len here since the
6756 		 * only path that calls fcp_check_reportlun,
6757 		 * fcp_scsi_callback, has already called
6758 		 * fcp_validate_fcp_response.
6759 		 */
6760 		sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6761 		    sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6762 	} else {
6763 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6764 		    rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6765 		    sizeof (struct scsi_extended_sense));
6766 	}
6767 
6768 	if (!FCP_SENSE_NO_LUN(sense)) {
6769 		mutex_enter(&ptgt->tgt_mutex);
6770 		/* clear the flag if any */
6771 		ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6772 		mutex_exit(&ptgt->tgt_mutex);
6773 	}
6774 
6775 	if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6776 	    (sense->es_add_code == 0x20)) {
6777 		if (icmd->ipkt_nodma) {
6778 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6779 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6780 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6781 		} else {
6782 			fcp_rsp_t	new_resp;
6783 
6784 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6785 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6786 
6787 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6788 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6789 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6790 
6791 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6792 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6793 		}
6794 
6795 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6796 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6797 
6798 		return (DDI_SUCCESS);
6799 	}
6800 
6801 	/*
6802 	 * This is for the STK library which returns a check condition,
6803 	 * to indicate device is not ready, manual assistance needed.
6804 	 * This is to a report lun command when the door is open.
6805 	 */
6806 	if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6807 		if (icmd->ipkt_nodma) {
6808 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
6809 			rsp->fcp_u.fcp_status.sense_len_set = 0;
6810 			rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6811 		} else {
6812 			fcp_rsp_t	new_resp;
6813 
6814 			FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6815 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6816 
6817 			new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6818 			new_resp.fcp_u.fcp_status.sense_len_set = 0;
6819 			new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6820 
6821 			FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6822 			    fpkt->pkt_resp_acc, sizeof (new_resp));
6823 		}
6824 
6825 		FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6826 		    fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6827 
6828 		return (DDI_SUCCESS);
6829 	}
6830 
6831 	if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6832 	    (FCP_SENSE_NO_LUN(sense))) {
6833 		mutex_enter(&ptgt->tgt_mutex);
6834 		if ((FCP_SENSE_NO_LUN(sense)) &&
6835 		    (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6836 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6837 			mutex_exit(&ptgt->tgt_mutex);
6838 			/*
6839 			 * reconfig was triggred by ILLEGAL REQUEST but
6840 			 * got ILLEGAL REQUEST again
6841 			 */
6842 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6843 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
6844 			    "!FCP: Unable to obtain Report Lun data"
6845 			    " target=%x", ptgt->tgt_d_id);
6846 		} else {
6847 			if (ptgt->tgt_tid == NULL) {
6848 				timeout_id_t	tid;
6849 				/*
6850 				 * REPORT LUN data has changed.	 Kick off
6851 				 * rediscovery
6852 				 */
6853 				tid = timeout(fcp_reconfigure_luns,
6854 				    (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6855 
6856 				ptgt->tgt_tid = tid;
6857 				ptgt->tgt_state |= FCP_TGT_BUSY;
6858 			}
6859 			if (FCP_SENSE_NO_LUN(sense)) {
6860 				ptgt->tgt_state |= FCP_TGT_ILLREQ;
6861 			}
6862 			mutex_exit(&ptgt->tgt_mutex);
6863 			if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6864 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 				    "!FCP:Report Lun Has Changed"
6867 				    " target=%x", ptgt->tgt_d_id);
6868 			} else if (FCP_SENSE_NO_LUN(sense)) {
6869 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
6870 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
6871 				    "!FCP:LU Not Supported"
6872 				    " target=%x", ptgt->tgt_d_id);
6873 			}
6874 		}
6875 		rval = DDI_SUCCESS;
6876 	}
6877 
6878 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6879 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
6880 	    "D_ID=%x, sense=%x, status=%x",
6881 	    fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6882 	    rsp->fcp_u.fcp_status.scsi_status);
6883 
6884 	return (rval);
6885 }
6886 
6887 /*
6888  *     Function: fcp_scsi_callback
6889  *
6890  *  Description: This is the callback routine set by fcp_send_scsi() after
6891  *		 it calls fcp_icmd_alloc().  The SCSI command completed here
6892  *		 and autogenerated by FCP are:	REPORT_LUN, INQUIRY and
6893  *		 INQUIRY_PAGE83.
6894  *
6895  *     Argument: *fpkt	 FC packet used to convey the command
6896  *
6897  * Return Value: None
6898  */
6899 static void
fcp_scsi_callback(fc_packet_t * fpkt)6900 fcp_scsi_callback(fc_packet_t *fpkt)
6901 {
6902 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
6903 	    fpkt->pkt_ulp_private;
6904 	struct fcp_rsp_info	fcp_rsp_err, *bep;
6905 	struct fcp_port	*pptr;
6906 	struct fcp_tgt	*ptgt;
6907 	struct fcp_lun	*plun;
6908 	struct fcp_rsp		response, *rsp;
6909 
6910 	ptgt = icmd->ipkt_tgt;
6911 	pptr = ptgt->tgt_port;
6912 	plun = icmd->ipkt_lun;
6913 
6914 	if (icmd->ipkt_nodma) {
6915 		rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6916 	} else {
6917 		rsp = &response;
6918 		FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6919 		    sizeof (struct fcp_rsp));
6920 	}
6921 
6922 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
6923 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
6924 	    "SCSI callback state=0x%x for %x, op_code=0x%x, "
6925 	    "status=%x, lun num=%x",
6926 	    fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6927 	    rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6928 
6929 	/*
6930 	 * Pre-init LUN GUID with NWWN if it is not a device that
6931 	 * supports multiple luns and we know it's not page83
6932 	 * compliant.  Although using a NWWN is not lun unique,
6933 	 * we will be fine since there is only one lun behind the taget
6934 	 * in this case.
6935 	 */
6936 	if ((plun->lun_guid_size == 0) &&
6937 	    (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6938 	    (fcp_symmetric_device_probe(plun) == 0)) {
6939 
6940 		char ascii_wwn[FC_WWN_SIZE*2+1];
6941 		fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6942 		(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6943 	}
6944 
6945 	/*
6946 	 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6947 	 * when thay have more data than what is asked in CDB. An overrun
6948 	 * is really when FCP_DL is smaller than the data length in CDB.
6949 	 * In the case here we know that REPORT LUN command we formed within
6950 	 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6951 	 * behavior. In reality this is FC_SUCCESS.
6952 	 */
6953 	if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6954 	    (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6955 	    (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6956 		fpkt->pkt_state = FC_PKT_SUCCESS;
6957 	}
6958 
6959 	if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6960 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
6961 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
6962 		    "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6963 		    ptgt->tgt_d_id);
6964 
6965 		if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6966 			/*
6967 			 * Inquiry VPD page command on A5K SES devices would
6968 			 * result in data CRC errors.
6969 			 */
6970 			if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6971 				(void) fcp_handle_page83(fpkt, icmd, 1);
6972 				return;
6973 			}
6974 		}
6975 		if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6976 		    FCP_MUST_RETRY(fpkt)) {
6977 			fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6978 			fcp_retry_scsi_cmd(fpkt);
6979 			return;
6980 		}
6981 
6982 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6983 		    FCP_TGT_TRACE_20);
6984 
6985 		mutex_enter(&pptr->port_mutex);
6986 		mutex_enter(&ptgt->tgt_mutex);
6987 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6988 			mutex_exit(&ptgt->tgt_mutex);
6989 			mutex_exit(&pptr->port_mutex);
6990 			fcp_print_error(fpkt);
6991 		} else {
6992 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
6993 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
6994 			    "fcp_scsi_callback,1: state change occured"
6995 			    " for D_ID=0x%x", ptgt->tgt_d_id);
6996 			mutex_exit(&ptgt->tgt_mutex);
6997 			mutex_exit(&pptr->port_mutex);
6998 		}
6999 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7000 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7001 		fcp_icmd_free(pptr, icmd);
7002 		return;
7003 	}
7004 
7005 	FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7006 
7007 	mutex_enter(&pptr->port_mutex);
7008 	mutex_enter(&ptgt->tgt_mutex);
7009 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7010 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7011 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7012 		    "fcp_scsi_callback,2: state change occured"
7013 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7014 		mutex_exit(&ptgt->tgt_mutex);
7015 		mutex_exit(&pptr->port_mutex);
7016 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7017 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7018 		fcp_icmd_free(pptr, icmd);
7019 		return;
7020 	}
7021 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7022 
7023 	mutex_exit(&ptgt->tgt_mutex);
7024 	mutex_exit(&pptr->port_mutex);
7025 
7026 	if (icmd->ipkt_nodma) {
7027 		bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7028 		    sizeof (struct fcp_rsp));
7029 	} else {
7030 		bep = &fcp_rsp_err;
7031 		FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7032 		    fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7033 	}
7034 
7035 	if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7036 		fcp_retry_scsi_cmd(fpkt);
7037 		return;
7038 	}
7039 
7040 	if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7041 	    FCP_NO_FAILURE) {
7042 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7043 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7044 		    "rsp_code=0x%x, rsp_len_set=0x%x",
7045 		    bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7046 		fcp_retry_scsi_cmd(fpkt);
7047 		return;
7048 	}
7049 
7050 	if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7051 	    rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7052 		fcp_queue_ipkt(pptr, fpkt);
7053 		return;
7054 	}
7055 
7056 	/*
7057 	 * Devices that do not support INQUIRY_PAGE83, return check condition
7058 	 * with illegal request as per SCSI spec.
7059 	 * Crossbridge is one such device and Daktari's SES node is another.
7060 	 * We want to ideally enumerate these devices as a non-mpxio devices.
7061 	 * SES nodes (Daktari only currently) are an exception to this.
7062 	 */
7063 	if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7064 	    (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7065 
7066 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7067 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
7068 		    "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7069 		    "check condition. May enumerate as non-mpxio device",
7070 		    ptgt->tgt_d_id, plun->lun_type);
7071 
7072 		/*
7073 		 * If we let Daktari's SES be enumerated as a non-mpxio
7074 		 * device, there will be a discrepency in that the other
7075 		 * internal FC disks will get enumerated as mpxio devices.
7076 		 * Applications like luxadm expect this to be consistent.
7077 		 *
7078 		 * So, we put in a hack here to check if this is an SES device
7079 		 * and handle it here.
7080 		 */
7081 		if (plun->lun_type == DTYPE_ESI) {
7082 			/*
7083 			 * Since, pkt_state is actually FC_PKT_SUCCESS
7084 			 * at this stage, we fake a failure here so that
7085 			 * fcp_handle_page83 will create a device path using
7086 			 * the WWN instead of the GUID which is not there anyway
7087 			 */
7088 			fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7089 			(void) fcp_handle_page83(fpkt, icmd, 1);
7090 			return;
7091 		}
7092 
7093 		mutex_enter(&ptgt->tgt_mutex);
7094 		plun->lun_state &= ~(FCP_LUN_OFFLINE |
7095 		    FCP_LUN_MARK | FCP_LUN_BUSY);
7096 		mutex_exit(&ptgt->tgt_mutex);
7097 
7098 		(void) fcp_call_finish_init(pptr, ptgt,
7099 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7100 		    icmd->ipkt_cause);
7101 		fcp_icmd_free(pptr, icmd);
7102 		return;
7103 	}
7104 
7105 	if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7106 		int rval = DDI_FAILURE;
7107 
7108 		/*
7109 		 * handle cases where report lun isn't supported
7110 		 * by faking up our own REPORT_LUN response or
7111 		 * UNIT ATTENTION
7112 		 */
7113 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7114 			rval = fcp_check_reportlun(rsp, fpkt);
7115 
7116 			/*
7117 			 * fcp_check_reportlun might have modified the
7118 			 * FCP response. Copy it in again to get an updated
7119 			 * FCP response
7120 			 */
7121 			if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7122 				rsp = &response;
7123 
7124 				FCP_CP_IN(fpkt->pkt_resp, rsp,
7125 				    fpkt->pkt_resp_acc,
7126 				    sizeof (struct fcp_rsp));
7127 			}
7128 		}
7129 
7130 		if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7131 			if (rval == DDI_SUCCESS) {
7132 				(void) fcp_call_finish_init(pptr, ptgt,
7133 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7134 				    icmd->ipkt_cause);
7135 				fcp_icmd_free(pptr, icmd);
7136 			} else {
7137 				fcp_retry_scsi_cmd(fpkt);
7138 			}
7139 
7140 			return;
7141 		}
7142 	} else {
7143 		if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7144 			mutex_enter(&ptgt->tgt_mutex);
7145 			ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7146 			mutex_exit(&ptgt->tgt_mutex);
7147 		}
7148 	}
7149 
7150 	ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7151 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7152 		(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7153 		    DDI_DMA_SYNC_FORCPU);
7154 	}
7155 
7156 	switch (icmd->ipkt_opcode) {
7157 	case SCMD_INQUIRY:
7158 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7159 		fcp_handle_inquiry(fpkt, icmd);
7160 		break;
7161 
7162 	case SCMD_REPORT_LUN:
7163 		FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7164 		    FCP_TGT_TRACE_22);
7165 		fcp_handle_reportlun(fpkt, icmd);
7166 		break;
7167 
7168 	case SCMD_INQUIRY_PAGE83:
7169 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7170 		(void) fcp_handle_page83(fpkt, icmd, 0);
7171 		break;
7172 
7173 	default:
7174 		fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7175 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7176 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7177 		fcp_icmd_free(pptr, icmd);
7178 		break;
7179 	}
7180 }
7181 
7182 
7183 static void
fcp_retry_scsi_cmd(fc_packet_t * fpkt)7184 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7185 {
7186 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
7187 	    fpkt->pkt_ulp_private;
7188 	struct fcp_tgt	*ptgt = icmd->ipkt_tgt;
7189 	struct fcp_port	*pptr = ptgt->tgt_port;
7190 
7191 	if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7192 	    fcp_is_retryable(icmd)) {
7193 		mutex_enter(&pptr->port_mutex);
7194 		if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7195 			mutex_exit(&pptr->port_mutex);
7196 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7197 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7198 			    "Retrying %s to %x; state=%x, reason=%x",
7199 			    (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7200 			    "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7201 			    fpkt->pkt_state, fpkt->pkt_reason);
7202 
7203 			fcp_queue_ipkt(pptr, fpkt);
7204 		} else {
7205 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7206 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
7207 			    "fcp_retry_scsi_cmd,1: state change occured"
7208 			    " for D_ID=0x%x", ptgt->tgt_d_id);
7209 			mutex_exit(&pptr->port_mutex);
7210 			(void) fcp_call_finish_init(pptr, ptgt,
7211 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7212 			    icmd->ipkt_cause);
7213 			fcp_icmd_free(pptr, icmd);
7214 		}
7215 	} else {
7216 		fcp_print_error(fpkt);
7217 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7218 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7219 		fcp_icmd_free(pptr, icmd);
7220 	}
7221 }
7222 
7223 /*
7224  *     Function: fcp_handle_page83
7225  *
7226  *  Description: Treats the response to INQUIRY_PAGE83.
7227  *
7228  *     Argument: *fpkt	FC packet used to convey the command.
7229  *		 *icmd	Original fcp_ipkt structure.
7230  *		 ignore_page83_data
7231  *			if it's 1, that means it's a special devices's
7232  *			page83 response, it should be enumerated under mpxio
7233  *
7234  * Return Value: None
7235  */
7236 static void
fcp_handle_page83(fc_packet_t * fpkt,struct fcp_ipkt * icmd,int ignore_page83_data)7237 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7238     int ignore_page83_data)
7239 {
7240 	struct fcp_port	*pptr;
7241 	struct fcp_lun	*plun;
7242 	struct fcp_tgt	*ptgt;
7243 	uchar_t			dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7244 	int			fail = 0;
7245 	ddi_devid_t		devid;
7246 	char			*guid = NULL;
7247 	int			ret;
7248 
7249 	ASSERT(icmd != NULL && fpkt != NULL);
7250 
7251 	pptr = icmd->ipkt_port;
7252 	ptgt = icmd->ipkt_tgt;
7253 	plun = icmd->ipkt_lun;
7254 
7255 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7256 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7257 
7258 		FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7259 		    SCMD_MAX_INQUIRY_PAGE83_SIZE);
7260 
7261 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7262 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7263 		    "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7264 		    "dtype=0x%x, lun num=%x",
7265 		    pptr->port_instance, ptgt->tgt_d_id,
7266 		    dev_id_page[0], plun->lun_num);
7267 
7268 		ret = ddi_devid_scsi_encode(
7269 		    DEVID_SCSI_ENCODE_VERSION_LATEST,
7270 		    NULL,		/* driver name */
7271 		    (unsigned char *) &plun->lun_inq, /* standard inquiry */
7272 		    sizeof (plun->lun_inq), /* size of standard inquiry */
7273 		    NULL,		/* page 80 data */
7274 		    0,		/* page 80 len */
7275 		    dev_id_page,	/* page 83 data */
7276 		    SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7277 		    &devid);
7278 
7279 		if (ret == DDI_SUCCESS) {
7280 
7281 			guid = ddi_devid_to_guid(devid);
7282 
7283 			if (guid) {
7284 				/*
7285 				 * Check our current guid.  If it's non null
7286 				 * and it has changed, we need to copy it into
7287 				 * lun_old_guid since we might still need it.
7288 				 */
7289 				if (plun->lun_guid &&
7290 				    strcmp(guid, plun->lun_guid)) {
7291 					unsigned int len;
7292 
7293 					/*
7294 					 * If the guid of the LUN changes,
7295 					 * reconfiguration should be triggered
7296 					 * to reflect the changes.
7297 					 * i.e. we should offline the LUN with
7298 					 * the old guid, and online the LUN with
7299 					 * the new guid.
7300 					 */
7301 					plun->lun_state |= FCP_LUN_CHANGED;
7302 
7303 					if (plun->lun_old_guid) {
7304 						kmem_free(plun->lun_old_guid,
7305 						    plun->lun_old_guid_size);
7306 					}
7307 
7308 					len = plun->lun_guid_size;
7309 					plun->lun_old_guid_size = len;
7310 
7311 					plun->lun_old_guid = kmem_zalloc(len,
7312 					    KM_NOSLEEP);
7313 
7314 					if (plun->lun_old_guid) {
7315 						/*
7316 						 * The alloc was successful then
7317 						 * let's do the copy.
7318 						 */
7319 						bcopy(plun->lun_guid,
7320 						    plun->lun_old_guid, len);
7321 					} else {
7322 						fail = 1;
7323 						plun->lun_old_guid_size = 0;
7324 					}
7325 				}
7326 				if (!fail) {
7327 					if (fcp_copy_guid_2_lun_block(
7328 					    plun, guid)) {
7329 						fail = 1;
7330 					}
7331 				}
7332 				ddi_devid_free_guid(guid);
7333 
7334 			} else {
7335 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7336 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
7337 				    "fcp_handle_page83: unable to create "
7338 				    "GUID");
7339 
7340 				/* couldn't create good guid from devid */
7341 				fail = 1;
7342 			}
7343 			ddi_devid_free(devid);
7344 
7345 		} else if (ret == DDI_NOT_WELL_FORMED) {
7346 			/* NULL filled data for page 83 */
7347 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7348 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7349 			    "fcp_handle_page83: retry GUID");
7350 
7351 			icmd->ipkt_retries = 0;
7352 			fcp_retry_scsi_cmd(fpkt);
7353 			return;
7354 		} else {
7355 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
7356 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
7357 			    "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7358 			    ret);
7359 			/*
7360 			 * Since the page83 validation
7361 			 * introduced late, we are being
7362 			 * tolerant to the existing devices
7363 			 * that already found to be working
7364 			 * under mpxio, like A5200's SES device,
7365 			 * its page83 response will not be standard-compliant,
7366 			 * but we still want it to be enumerated under mpxio.
7367 			 */
7368 			if (fcp_symmetric_device_probe(plun) != 0) {
7369 				fail = 1;
7370 			}
7371 		}
7372 
7373 	} else {
7374 		/* bad packet state */
7375 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7376 
7377 		/*
7378 		 * For some special devices (A5K SES and Daktari's SES devices),
7379 		 * they should be enumerated under mpxio
7380 		 * or "luxadm dis" will fail
7381 		 */
7382 		if (ignore_page83_data) {
7383 			fail = 0;
7384 		} else {
7385 			fail = 1;
7386 		}
7387 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7388 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7389 		    "!Devid page cmd failed. "
7390 		    "fpkt_state: %x fpkt_reason: %x",
7391 		    "ignore_page83: %d",
7392 		    fpkt->pkt_state, fpkt->pkt_reason,
7393 		    ignore_page83_data);
7394 	}
7395 
7396 	mutex_enter(&pptr->port_mutex);
7397 	mutex_enter(&plun->lun_mutex);
7398 	/*
7399 	 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7400 	 * mismatch between lun_cip and lun_mpxio.
7401 	 */
7402 	if (plun->lun_cip == NULL) {
7403 		/*
7404 		 * If we don't have a guid for this lun it's because we were
7405 		 * unable to glean one from the page 83 response.  Set the
7406 		 * control flag to 0 here to make sure that we don't attempt to
7407 		 * enumerate it under mpxio.
7408 		 */
7409 		if (fail || pptr->port_mpxio == 0) {
7410 			plun->lun_mpxio = 0;
7411 		} else {
7412 			plun->lun_mpxio = 1;
7413 		}
7414 	}
7415 	mutex_exit(&plun->lun_mutex);
7416 	mutex_exit(&pptr->port_mutex);
7417 
7418 	mutex_enter(&ptgt->tgt_mutex);
7419 	plun->lun_state &=
7420 	    ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7421 	mutex_exit(&ptgt->tgt_mutex);
7422 
7423 	(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7424 	    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7425 
7426 	fcp_icmd_free(pptr, icmd);
7427 }
7428 
7429 /*
7430  *     Function: fcp_handle_inquiry
7431  *
7432  *  Description: Called by fcp_scsi_callback to handle the response to an
7433  *		 INQUIRY request.
7434  *
7435  *     Argument: *fpkt	FC packet used to convey the command.
7436  *		 *icmd	Original fcp_ipkt structure.
7437  *
7438  * Return Value: None
7439  */
7440 static void
fcp_handle_inquiry(fc_packet_t * fpkt,struct fcp_ipkt * icmd)7441 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7442 {
7443 	struct fcp_port	*pptr;
7444 	struct fcp_lun	*plun;
7445 	struct fcp_tgt	*ptgt;
7446 	uchar_t		dtype;
7447 	uchar_t		pqual;
7448 	uint32_t	rscn_count = FC_INVALID_RSCN_COUNT;
7449 
7450 	ASSERT(icmd != NULL && fpkt != NULL);
7451 
7452 	pptr = icmd->ipkt_port;
7453 	ptgt = icmd->ipkt_tgt;
7454 	plun = icmd->ipkt_lun;
7455 
7456 	FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7457 	    sizeof (struct scsi_inquiry));
7458 
7459 	dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7460 	pqual = plun->lun_inq.inq_dtype >> 5;
7461 
7462 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7463 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7464 	    "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7465 	    "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7466 	    plun->lun_num, dtype, pqual);
7467 
7468 	if (pqual != 0) {
7469 		/*
7470 		 * Non-zero peripheral qualifier
7471 		 */
7472 		fcp_log(CE_CONT, pptr->port_dip,
7473 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7474 		    "Device type=0x%x Peripheral qual=0x%x\n",
7475 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7476 
7477 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7478 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7479 		    "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7480 		    "Device type=0x%x Peripheral qual=0x%x\n",
7481 		    ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7482 
7483 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7484 
7485 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7486 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7487 		fcp_icmd_free(pptr, icmd);
7488 		return;
7489 	}
7490 
7491 	/*
7492 	 * If the device is already initialized, check the dtype
7493 	 * for a change. If it has changed then update the flags
7494 	 * so the create_luns will offline the old device and
7495 	 * create the new device. Refer to bug: 4764752
7496 	 */
7497 	if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7498 		plun->lun_state |= FCP_LUN_CHANGED;
7499 	}
7500 	plun->lun_type = plun->lun_inq.inq_dtype;
7501 
7502 	/*
7503 	 * This code is setting/initializing the throttling in the FCA
7504 	 * driver.
7505 	 */
7506 	mutex_enter(&pptr->port_mutex);
7507 	if (!pptr->port_notify) {
7508 		if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7509 			uint32_t cmd = 0;
7510 			cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7511 			    ((cmd & 0xFFFFFF00 >> 8) |
7512 			    FCP_SVE_THROTTLE << 8));
7513 			pptr->port_notify = 1;
7514 			mutex_exit(&pptr->port_mutex);
7515 			(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7516 			mutex_enter(&pptr->port_mutex);
7517 		}
7518 	}
7519 
7520 	if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7521 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7522 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
7523 		    "fcp_handle_inquiry,1:state change occured"
7524 		    " for D_ID=0x%x", ptgt->tgt_d_id);
7525 		mutex_exit(&pptr->port_mutex);
7526 
7527 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7528 		(void) fcp_call_finish_init(pptr, ptgt,
7529 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7530 		    icmd->ipkt_cause);
7531 		fcp_icmd_free(pptr, icmd);
7532 		return;
7533 	}
7534 	ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7535 	mutex_exit(&pptr->port_mutex);
7536 
7537 	/* Retrieve the rscn count (if a valid one exists) */
7538 	if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7539 		rscn_count = ((fc_ulp_rscn_info_t *)
7540 		    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7541 	} else {
7542 		rscn_count = FC_INVALID_RSCN_COUNT;
7543 	}
7544 
7545 	if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7546 	    SCMD_MAX_INQUIRY_PAGE83_SIZE,
7547 	    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 	    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7549 		fcp_log(CE_WARN, NULL, "!failed to send page 83");
7550 		FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7551 		(void) fcp_call_finish_init(pptr, ptgt,
7552 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7553 		    icmd->ipkt_cause);
7554 	}
7555 
7556 	/*
7557 	 * Read Inquiry VPD Page 0x83 to uniquely
7558 	 * identify this logical unit.
7559 	 */
7560 	fcp_icmd_free(pptr, icmd);
7561 }
7562 
7563 /*
7564  *     Function: fcp_handle_reportlun
7565  *
7566  *  Description: Called by fcp_scsi_callback to handle the response to a
7567  *		 REPORT_LUN request.
7568  *
7569  *     Argument: *fpkt	FC packet used to convey the command.
7570  *		 *icmd	Original fcp_ipkt structure.
7571  *
7572  * Return Value: None
7573  */
7574 static void
fcp_handle_reportlun(fc_packet_t * fpkt,struct fcp_ipkt * icmd)7575 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7576 {
7577 	int				i;
7578 	int				nluns_claimed;
7579 	int				nluns_bufmax;
7580 	int				len;
7581 	uint16_t			lun_num;
7582 	uint32_t			rscn_count = FC_INVALID_RSCN_COUNT;
7583 	struct fcp_port			*pptr;
7584 	struct fcp_tgt			*ptgt;
7585 	struct fcp_lun			*plun;
7586 	struct fcp_reportlun_resp	*report_lun;
7587 
7588 	pptr = icmd->ipkt_port;
7589 	ptgt = icmd->ipkt_tgt;
7590 	len = fpkt->pkt_datalen;
7591 
7592 	if ((len < FCP_LUN_HEADER) ||
7593 	    ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7594 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7595 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7596 		fcp_icmd_free(pptr, icmd);
7597 		return;
7598 	}
7599 
7600 	FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7601 	    fpkt->pkt_datalen);
7602 
7603 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7604 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7605 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7606 	    pptr->port_instance, ptgt->tgt_d_id);
7607 
7608 	/*
7609 	 * Get the number of luns (which is supplied as LUNS * 8) the
7610 	 * device claims it has.
7611 	 */
7612 	nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7613 
7614 	/*
7615 	 * Get the maximum number of luns the buffer submitted can hold.
7616 	 */
7617 	nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7618 
7619 	/*
7620 	 * Due to limitations of certain hardware, we support only 16 bit LUNs
7621 	 */
7622 	if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7623 		kmem_free(report_lun, len);
7624 
7625 		fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7626 		    " 0x%x number of LUNs for target=%x", nluns_claimed,
7627 		    ptgt->tgt_d_id);
7628 
7629 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7630 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7631 		fcp_icmd_free(pptr, icmd);
7632 		return;
7633 	}
7634 
7635 	/*
7636 	 * If there are more LUNs than we have allocated memory for,
7637 	 * allocate more space and send down yet another report lun if
7638 	 * the maximum number of attempts hasn't been reached.
7639 	 */
7640 	mutex_enter(&ptgt->tgt_mutex);
7641 
7642 	if ((nluns_claimed > nluns_bufmax) &&
7643 	    (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7644 
7645 		struct fcp_lun *plun;
7646 
7647 		ptgt->tgt_report_lun_cnt++;
7648 		plun = ptgt->tgt_lun;
7649 		ASSERT(plun != NULL);
7650 		mutex_exit(&ptgt->tgt_mutex);
7651 
7652 		kmem_free(report_lun, len);
7653 
7654 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7655 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7656 		    "!Dynamically discovered %d LUNs for D_ID=%x",
7657 		    nluns_claimed, ptgt->tgt_d_id);
7658 
7659 		/* Retrieve the rscn count (if a valid one exists) */
7660 		if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7661 			rscn_count = ((fc_ulp_rscn_info_t *)
7662 			    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7663 			    ulp_rscn_count;
7664 		} else {
7665 			rscn_count = FC_INVALID_RSCN_COUNT;
7666 		}
7667 
7668 		if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7669 		    FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7670 		    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7671 		    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7672 			(void) fcp_call_finish_init(pptr, ptgt,
7673 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7674 			    icmd->ipkt_cause);
7675 		}
7676 
7677 		fcp_icmd_free(pptr, icmd);
7678 		return;
7679 	}
7680 
7681 	if (nluns_claimed > nluns_bufmax) {
7682 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7683 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7684 		    "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7685 		    "	 Number of LUNs lost=%x",
7686 		    ptgt->tgt_port_wwn.raw_wwn[0],
7687 		    ptgt->tgt_port_wwn.raw_wwn[1],
7688 		    ptgt->tgt_port_wwn.raw_wwn[2],
7689 		    ptgt->tgt_port_wwn.raw_wwn[3],
7690 		    ptgt->tgt_port_wwn.raw_wwn[4],
7691 		    ptgt->tgt_port_wwn.raw_wwn[5],
7692 		    ptgt->tgt_port_wwn.raw_wwn[6],
7693 		    ptgt->tgt_port_wwn.raw_wwn[7],
7694 		    nluns_claimed - nluns_bufmax);
7695 
7696 		nluns_claimed = nluns_bufmax;
7697 	}
7698 	ptgt->tgt_lun_cnt = nluns_claimed;
7699 
7700 	/*
7701 	 * Identify missing LUNs and print warning messages
7702 	 */
7703 	for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7704 		int offline;
7705 		int exists = 0;
7706 
7707 		offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7708 
7709 		for (i = 0; i < nluns_claimed && exists == 0; i++) {
7710 			uchar_t		*lun_string;
7711 
7712 			lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7713 
7714 			switch (lun_string[0] & 0xC0) {
7715 			case FCP_LUN_ADDRESSING:
7716 			case FCP_PD_ADDRESSING:
7717 			case FCP_VOLUME_ADDRESSING:
7718 				lun_num = ((lun_string[0] & 0x3F) << 8) |
7719 				    lun_string[1];
7720 				if (plun->lun_num == lun_num) {
7721 					exists++;
7722 					break;
7723 				}
7724 				break;
7725 
7726 			default:
7727 				break;
7728 			}
7729 		}
7730 
7731 		if (!exists && !offline) {
7732 			mutex_exit(&ptgt->tgt_mutex);
7733 
7734 			mutex_enter(&pptr->port_mutex);
7735 			mutex_enter(&ptgt->tgt_mutex);
7736 			if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7737 				/*
7738 				 * set disappear flag when device was connected
7739 				 */
7740 				if (!(plun->lun_state &
7741 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7742 					plun->lun_state |= FCP_LUN_DISAPPEARED;
7743 				}
7744 				mutex_exit(&ptgt->tgt_mutex);
7745 				mutex_exit(&pptr->port_mutex);
7746 				if (!(plun->lun_state &
7747 				    FCP_LUN_DEVICE_NOT_CONNECTED)) {
7748 					fcp_log(CE_NOTE, pptr->port_dip,
7749 					    "!Lun=%x for target=%x disappeared",
7750 					    plun->lun_num, ptgt->tgt_d_id);
7751 				}
7752 				mutex_enter(&ptgt->tgt_mutex);
7753 			} else {
7754 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
7755 				    fcp_trace, FCP_BUF_LEVEL_5, 0,
7756 				    "fcp_handle_reportlun,1: state change"
7757 				    " occured for D_ID=0x%x", ptgt->tgt_d_id);
7758 				mutex_exit(&ptgt->tgt_mutex);
7759 				mutex_exit(&pptr->port_mutex);
7760 				kmem_free(report_lun, len);
7761 				(void) fcp_call_finish_init(pptr, ptgt,
7762 				    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7763 				    icmd->ipkt_cause);
7764 				fcp_icmd_free(pptr, icmd);
7765 				return;
7766 			}
7767 		} else if (exists) {
7768 			/*
7769 			 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7770 			 * actually exists in REPORT_LUN response
7771 			 */
7772 			if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7773 				plun->lun_state &=
7774 				    ~FCP_LUN_DEVICE_NOT_CONNECTED;
7775 			}
7776 			if (offline || plun->lun_num == 0) {
7777 				if (plun->lun_state & FCP_LUN_DISAPPEARED)  {
7778 					plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7779 					mutex_exit(&ptgt->tgt_mutex);
7780 					fcp_log(CE_NOTE, pptr->port_dip,
7781 					    "!Lun=%x for target=%x reappeared",
7782 					    plun->lun_num, ptgt->tgt_d_id);
7783 					mutex_enter(&ptgt->tgt_mutex);
7784 				}
7785 			}
7786 		}
7787 	}
7788 
7789 	ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7790 	mutex_exit(&ptgt->tgt_mutex);
7791 
7792 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7793 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7794 	    "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7795 	    pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7796 
7797 	/* scan each lun */
7798 	for (i = 0; i < nluns_claimed; i++) {
7799 		uchar_t	*lun_string;
7800 
7801 		lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7802 
7803 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
7804 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
7805 		    "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7806 		    " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7807 		    lun_string[0]);
7808 
7809 		switch (lun_string[0] & 0xC0) {
7810 		case FCP_LUN_ADDRESSING:
7811 		case FCP_PD_ADDRESSING:
7812 		case FCP_VOLUME_ADDRESSING:
7813 			lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7814 
7815 			/* We will skip masked LUNs because of the blacklist. */
7816 			if (fcp_lun_blacklist != NULL) {
7817 				mutex_enter(&ptgt->tgt_mutex);
7818 				if (fcp_should_mask(&ptgt->tgt_port_wwn,
7819 				    lun_num) == TRUE) {
7820 					ptgt->tgt_lun_cnt--;
7821 					mutex_exit(&ptgt->tgt_mutex);
7822 					break;
7823 				}
7824 				mutex_exit(&ptgt->tgt_mutex);
7825 			}
7826 
7827 			/* see if this LUN is already allocated */
7828 			if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7829 				plun = fcp_alloc_lun(ptgt);
7830 				if (plun == NULL) {
7831 					fcp_log(CE_NOTE, pptr->port_dip,
7832 					    "!Lun allocation failed"
7833 					    " target=%x lun=%x",
7834 					    ptgt->tgt_d_id, lun_num);
7835 					break;
7836 				}
7837 			}
7838 
7839 			mutex_enter(&plun->lun_tgt->tgt_mutex);
7840 			/* convert to LUN */
7841 			plun->lun_addr.ent_addr_0 =
7842 			    BE_16(*(uint16_t *)&(lun_string[0]));
7843 			plun->lun_addr.ent_addr_1 =
7844 			    BE_16(*(uint16_t *)&(lun_string[2]));
7845 			plun->lun_addr.ent_addr_2 =
7846 			    BE_16(*(uint16_t *)&(lun_string[4]));
7847 			plun->lun_addr.ent_addr_3 =
7848 			    BE_16(*(uint16_t *)&(lun_string[6]));
7849 
7850 			plun->lun_num = lun_num;
7851 			plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7852 			plun->lun_state &= ~FCP_LUN_OFFLINE;
7853 			mutex_exit(&plun->lun_tgt->tgt_mutex);
7854 
7855 			/* Retrieve the rscn count (if a valid one exists) */
7856 			if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7857 				rscn_count = ((fc_ulp_rscn_info_t *)
7858 				    (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7859 				    ulp_rscn_count;
7860 			} else {
7861 				rscn_count = FC_INVALID_RSCN_COUNT;
7862 			}
7863 
7864 			if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7865 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7866 			    icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7867 				mutex_enter(&pptr->port_mutex);
7868 				mutex_enter(&plun->lun_tgt->tgt_mutex);
7869 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7870 					fcp_log(CE_NOTE, pptr->port_dip,
7871 					    "!failed to send INQUIRY"
7872 					    " target=%x lun=%x",
7873 					    ptgt->tgt_d_id, plun->lun_num);
7874 				} else {
7875 					FCP_TRACE(fcp_logq,
7876 					    pptr->port_instbuf, fcp_trace,
7877 					    FCP_BUF_LEVEL_5, 0,
7878 					    "fcp_handle_reportlun,2: state"
7879 					    " change occured for D_ID=0x%x",
7880 					    ptgt->tgt_d_id);
7881 				}
7882 				mutex_exit(&plun->lun_tgt->tgt_mutex);
7883 				mutex_exit(&pptr->port_mutex);
7884 			} else {
7885 				continue;
7886 			}
7887 			break;
7888 
7889 		default:
7890 			fcp_log(CE_WARN, NULL,
7891 			    "!Unsupported LUN Addressing method %x "
7892 			    "in response to REPORT_LUN", lun_string[0]);
7893 			break;
7894 		}
7895 
7896 		/*
7897 		 * each time through this loop we should decrement
7898 		 * the tmp_cnt by one -- since we go through this loop
7899 		 * one time for each LUN, the tmp_cnt should never be <=0
7900 		 */
7901 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7902 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7903 	}
7904 
7905 	if (i == 0) {
7906 		fcp_log(CE_WARN, pptr->port_dip,
7907 		    "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7908 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7909 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
7910 	}
7911 
7912 	kmem_free(report_lun, len);
7913 	fcp_icmd_free(pptr, icmd);
7914 }
7915 
7916 
7917 /*
7918  * called internally to return a LUN given a target and a LUN number
7919  */
7920 static struct fcp_lun *
fcp_get_lun(struct fcp_tgt * ptgt,uint16_t lun_num)7921 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7922 {
7923 	struct fcp_lun	*plun;
7924 
7925 	mutex_enter(&ptgt->tgt_mutex);
7926 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7927 		if (plun->lun_num == lun_num) {
7928 			mutex_exit(&ptgt->tgt_mutex);
7929 			return (plun);
7930 		}
7931 	}
7932 	mutex_exit(&ptgt->tgt_mutex);
7933 
7934 	return (NULL);
7935 }
7936 
7937 
7938 /*
7939  * handle finishing one target for fcp_finish_init
7940  *
7941  * return true (non-zero) if we want finish_init to continue with the
7942  * next target
7943  *
7944  * called with the port mutex held
7945  */
7946 /*ARGSUSED*/
7947 static int
fcp_finish_tgt(struct fcp_port * pptr,struct fcp_tgt * ptgt,int link_cnt,int tgt_cnt,int cause)7948 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7949     int link_cnt, int tgt_cnt, int cause)
7950 {
7951 	int	rval = 1;
7952 	ASSERT(pptr != NULL);
7953 	ASSERT(ptgt != NULL);
7954 
7955 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
7956 	    fcp_trace, FCP_BUF_LEVEL_5, 0,
7957 	    "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7958 	    ptgt->tgt_state);
7959 
7960 	ASSERT(mutex_owned(&pptr->port_mutex));
7961 
7962 	if ((pptr->port_link_cnt != link_cnt) ||
7963 	    (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7964 		/*
7965 		 * oh oh -- another link reset or target change
7966 		 * must have occurred while we are in here
7967 		 */
7968 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7969 
7970 		return (0);
7971 	} else {
7972 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7973 	}
7974 
7975 	mutex_enter(&ptgt->tgt_mutex);
7976 
7977 	if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7978 		/*
7979 		 * tgt is not offline -- is it marked (i.e. needs
7980 		 * to be offlined) ??
7981 		 */
7982 		if (ptgt->tgt_state & FCP_TGT_MARK) {
7983 			/*
7984 			 * this target not offline *and*
7985 			 * marked
7986 			 */
7987 			ptgt->tgt_state &= ~FCP_TGT_MARK;
7988 			rval = fcp_offline_target(pptr, ptgt, link_cnt,
7989 			    tgt_cnt, 0, 0);
7990 		} else {
7991 			ptgt->tgt_state &= ~FCP_TGT_BUSY;
7992 
7993 			/* create the LUNs */
7994 			if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7995 				ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7996 				fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7997 				    cause);
7998 				ptgt->tgt_device_created = 1;
7999 			} else {
8000 				fcp_update_tgt_state(ptgt, FCP_RESET,
8001 				    FCP_LUN_BUSY);
8002 			}
8003 		}
8004 	}
8005 
8006 	mutex_exit(&ptgt->tgt_mutex);
8007 
8008 	return (rval);
8009 }
8010 
8011 
8012 /*
8013  * this routine is called to finish port initialization
8014  *
8015  * Each port has a "temp" counter -- when a state change happens (e.g.
8016  * port online), the temp count is set to the number of devices in the map.
8017  * Then, as each device gets "discovered", the temp counter is decremented
8018  * by one.  When this count reaches zero we know that all of the devices
8019  * in the map have been discovered (or an error has occurred), so we can
8020  * then finish initialization -- which is done by this routine (well, this
8021  * and fcp-finish_tgt())
8022  *
8023  * acquires and releases the global mutex
8024  *
8025  * called with the port mutex owned
8026  */
8027 static void
fcp_finish_init(struct fcp_port * pptr)8028 fcp_finish_init(struct fcp_port *pptr)
8029 {
8030 #ifdef	DEBUG
8031 	bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8032 	pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8033 	    FCP_STACK_DEPTH);
8034 #endif /* DEBUG */
8035 
8036 	ASSERT(mutex_owned(&pptr->port_mutex));
8037 
8038 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8039 	    fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8040 	    " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8041 
8042 	if ((pptr->port_state & FCP_STATE_ONLINING) &&
8043 	    !(pptr->port_state & (FCP_STATE_SUSPENDED |
8044 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8045 		pptr->port_state &= ~FCP_STATE_ONLINING;
8046 		pptr->port_state |= FCP_STATE_ONLINE;
8047 	}
8048 
8049 	/* Wake up threads waiting on config done */
8050 	cv_broadcast(&pptr->port_config_cv);
8051 }
8052 
8053 
8054 /*
8055  * called from fcp_finish_init to create the LUNs for a target
8056  *
8057  * called with the port mutex owned
8058  */
8059 static void
fcp_create_luns(struct fcp_tgt * ptgt,int link_cnt,int tgt_cnt,int cause)8060 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8061 {
8062 	struct fcp_lun	*plun;
8063 	struct fcp_port	*pptr;
8064 	child_info_t		*cip = NULL;
8065 
8066 	ASSERT(ptgt != NULL);
8067 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8068 
8069 	pptr = ptgt->tgt_port;
8070 
8071 	ASSERT(pptr != NULL);
8072 
8073 	/* scan all LUNs for this target */
8074 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8075 		if (plun->lun_state & FCP_LUN_OFFLINE) {
8076 			continue;
8077 		}
8078 
8079 		if (plun->lun_state & FCP_LUN_MARK) {
8080 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8081 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8082 			    "fcp_create_luns: offlining marked LUN!");
8083 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8084 			continue;
8085 		}
8086 
8087 		plun->lun_state &= ~FCP_LUN_BUSY;
8088 
8089 		/*
8090 		 * There are conditions in which FCP_LUN_INIT flag is cleared
8091 		 * but we have a valid plun->lun_cip. To cover this case also
8092 		 * CLEAR_BUSY whenever we have a valid lun_cip.
8093 		 */
8094 		if (plun->lun_mpxio && plun->lun_cip &&
8095 		    (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8096 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8097 		    0, 0))) {
8098 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
8099 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
8100 			    "fcp_create_luns: enable lun %p failed!",
8101 			    plun);
8102 		}
8103 
8104 		if (plun->lun_state & FCP_LUN_INIT &&
8105 		    !(plun->lun_state & FCP_LUN_CHANGED)) {
8106 			continue;
8107 		}
8108 
8109 		if (cause == FCP_CAUSE_USER_CREATE) {
8110 			continue;
8111 		}
8112 
8113 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
8114 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
8115 		    "create_luns: passing ONLINE elem to HP thread");
8116 
8117 		/*
8118 		 * If lun has changed, prepare for offlining the old path.
8119 		 * Do not offline the old path right now, since it may be
8120 		 * still opened.
8121 		 */
8122 		if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8123 			fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8124 		}
8125 
8126 		/* pass an ONLINE element to the hotplug thread */
8127 		if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8128 		    link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8129 
8130 			/*
8131 			 * We can not synchronous attach (i.e pass
8132 			 * NDI_ONLINE_ATTACH) here as we might be
8133 			 * coming from an interrupt or callback
8134 			 * thread.
8135 			 */
8136 			if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8137 			    link_cnt, tgt_cnt, 0, 0)) {
8138 				fcp_log(CE_CONT, pptr->port_dip,
8139 				    "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8140 				    plun->lun_tgt->tgt_d_id, plun->lun_num);
8141 			}
8142 		}
8143 	}
8144 }
8145 
8146 
8147 /*
8148  * function to online/offline devices
8149  */
8150 static int
fcp_trigger_lun(struct fcp_lun * plun,child_info_t * cip,int old_mpxio,int online,int lcount,int tcount,int flags)8151 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8152     int online, int lcount, int tcount, int flags)
8153 {
8154 	int			rval = NDI_FAILURE;
8155 	boolean_t		enteredv;
8156 	child_info_t		*ccip;
8157 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
8158 	int			is_mpxio = pptr->port_mpxio;
8159 	dev_info_t		*cdip, *pdip;
8160 	char			*devname;
8161 
8162 	if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8163 		/*
8164 		 * When this event gets serviced, lun_cip and lun_mpxio
8165 		 * has changed, so it should be invalidated now.
8166 		 */
8167 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8168 		    FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8169 		    "plun: %p, cip: %p, what:%d", plun, cip, online);
8170 		return (rval);
8171 	}
8172 
8173 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
8174 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
8175 	    "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8176 	    "flags=%x mpxio=%x\n",
8177 	    plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8178 	    plun->lun_mpxio);
8179 
8180 	/*
8181 	 * lun_mpxio needs checking here because we can end up in a race
8182 	 * condition where this task has been dispatched while lun_mpxio is
8183 	 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8184 	 * enable MPXIO for the LUN, but was unable to, and hence cleared
8185 	 * the flag. We rely on the serialization of the tasks here. We return
8186 	 * NDI_SUCCESS so any callers continue without reporting spurious
8187 	 * errors, and the still think we're an MPXIO LUN.
8188 	 */
8189 
8190 	if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8191 	    online == FCP_MPXIO_PATH_SET_BUSY) {
8192 		if (plun->lun_mpxio) {
8193 			rval = fcp_update_mpxio_path(plun, cip, online);
8194 		} else {
8195 			rval = NDI_SUCCESS;
8196 		}
8197 		return (rval);
8198 	}
8199 
8200 	/*
8201 	 * Explicit devfs_clean() due to ndi_devi_offline() not
8202 	 * executing devfs_clean() if parent lock is held.
8203 	 */
8204 	ASSERT(!servicing_interrupt());
8205 	if (online == FCP_OFFLINE) {
8206 		if (plun->lun_mpxio == 0) {
8207 			if (plun->lun_cip == cip) {
8208 				cdip = DIP(plun->lun_cip);
8209 			} else {
8210 				cdip = DIP(cip);
8211 			}
8212 		} else if ((plun->lun_cip == cip) && plun->lun_cip) {
8213 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8214 		} else if ((plun->lun_cip != cip) && cip) {
8215 			/*
8216 			 * This means a DTYPE/GUID change, we shall get the
8217 			 * dip of the old cip instead of the current lun_cip.
8218 			 */
8219 			cdip = mdi_pi_get_client(PIP(cip));
8220 		}
8221 		if (cdip) {
8222 			if (i_ddi_devi_attached(cdip)) {
8223 				pdip = ddi_get_parent(cdip);
8224 				devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8225 				ndi_devi_enter(pdip);
8226 				(void) ddi_deviname(cdip, devname);
8227 				/*
8228 				 * Release parent lock before calling
8229 				 * devfs_clean().
8230 				 */
8231 				ndi_devi_exit(pdip);
8232 				(void) devfs_clean(pdip, devname + 1,
8233 				    DV_CLEAN_FORCE);
8234 				kmem_free(devname, MAXNAMELEN + 1);
8235 			}
8236 		}
8237 	}
8238 
8239 	if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8240 		return (NDI_FAILURE);
8241 	}
8242 
8243 	if (is_mpxio) {
8244 		mdi_devi_enter(pptr->port_dip, &enteredv);
8245 	} else {
8246 		ndi_devi_enter(pptr->port_dip);
8247 	}
8248 
8249 	mutex_enter(&pptr->port_mutex);
8250 	mutex_enter(&plun->lun_mutex);
8251 
8252 	if (online == FCP_ONLINE) {
8253 		ccip = fcp_get_cip(plun, cip, lcount, tcount);
8254 		if (ccip == NULL) {
8255 			goto fail;
8256 		}
8257 	} else {
8258 		if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8259 			goto fail;
8260 		}
8261 		ccip = cip;
8262 	}
8263 
8264 	if (online == FCP_ONLINE) {
8265 		rval = fcp_online_child(plun, ccip, lcount, tcount, flags);
8266 		fc_ulp_log_device_event(pptr->port_fp_handle,
8267 		    FC_ULP_DEVICE_ONLINE);
8268 	} else {
8269 		rval = fcp_offline_child(plun, ccip, lcount, tcount, flags);
8270 		fc_ulp_log_device_event(pptr->port_fp_handle,
8271 		    FC_ULP_DEVICE_OFFLINE);
8272 	}
8273 
8274 fail:	mutex_exit(&plun->lun_mutex);
8275 	mutex_exit(&pptr->port_mutex);
8276 
8277 	if (is_mpxio) {
8278 		mdi_devi_exit(pptr->port_dip, enteredv);
8279 	} else {
8280 		ndi_devi_exit(pptr->port_dip);
8281 	}
8282 
8283 	fc_ulp_idle_port(pptr->port_fp_handle);
8284 
8285 	return (rval);
8286 }
8287 
8288 
8289 /*
8290  * take a target offline by taking all of its LUNs offline
8291  */
8292 /*ARGSUSED*/
8293 static int
fcp_offline_target(struct fcp_port * pptr,struct fcp_tgt * ptgt,int link_cnt,int tgt_cnt,int nowait,int flags)8294 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8295     int link_cnt, int tgt_cnt, int nowait, int flags)
8296 {
8297 	struct fcp_tgt_elem	*elem;
8298 
8299 	ASSERT(mutex_owned(&pptr->port_mutex));
8300 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8301 
8302 	ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8303 
8304 	if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8305 	    ptgt->tgt_change_cnt)) {
8306 		mutex_exit(&ptgt->tgt_mutex);
8307 		FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8308 		mutex_enter(&ptgt->tgt_mutex);
8309 
8310 		return (0);
8311 	}
8312 
8313 	ptgt->tgt_pd_handle = NULL;
8314 	mutex_exit(&ptgt->tgt_mutex);
8315 	FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8316 	mutex_enter(&ptgt->tgt_mutex);
8317 
8318 	tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8319 
8320 	if (ptgt->tgt_tcap &&
8321 	    (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8322 		elem->flags = flags;
8323 		elem->time = fcp_watchdog_time;
8324 		if (nowait == 0) {
8325 			elem->time += fcp_offline_delay;
8326 		}
8327 		elem->ptgt = ptgt;
8328 		elem->link_cnt = link_cnt;
8329 		elem->tgt_cnt = tgt_cnt;
8330 		elem->next = pptr->port_offline_tgts;
8331 		pptr->port_offline_tgts = elem;
8332 	} else {
8333 		fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8334 	}
8335 
8336 	return (1);
8337 }
8338 
8339 
8340 static void
fcp_offline_target_now(struct fcp_port * pptr,struct fcp_tgt * ptgt,int link_cnt,int tgt_cnt,int flags)8341 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8342     int link_cnt, int tgt_cnt, int flags)
8343 {
8344 	ASSERT(mutex_owned(&pptr->port_mutex));
8345 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8346 
8347 	fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8348 	ptgt->tgt_state = FCP_TGT_OFFLINE;
8349 	ptgt->tgt_pd_handle = NULL;
8350 	fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8351 }
8352 
8353 
8354 static void
fcp_offline_tgt_luns(struct fcp_tgt * ptgt,int link_cnt,int tgt_cnt,int flags)8355 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8356     int flags)
8357 {
8358 	struct	fcp_lun	*plun;
8359 
8360 	ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8361 	ASSERT(mutex_owned(&ptgt->tgt_mutex));
8362 
8363 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8364 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8365 			fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8366 		}
8367 	}
8368 }
8369 
8370 
8371 /*
8372  * take a LUN offline
8373  *
8374  * enters and leaves with the target mutex held, releasing it in the process
8375  *
8376  * allocates memory in non-sleep mode
8377  */
8378 static void
fcp_offline_lun(struct fcp_lun * plun,int link_cnt,int tgt_cnt,int nowait,int flags)8379 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8380     int nowait, int flags)
8381 {
8382 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
8383 	struct fcp_lun_elem	*elem;
8384 
8385 	ASSERT(plun != NULL);
8386 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8387 
8388 	if (nowait) {
8389 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8390 		return;
8391 	}
8392 
8393 	if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8394 		elem->flags = flags;
8395 		elem->time = fcp_watchdog_time;
8396 		if (nowait == 0) {
8397 			elem->time += fcp_offline_delay;
8398 		}
8399 		elem->plun = plun;
8400 		elem->link_cnt = link_cnt;
8401 		elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8402 		elem->next = pptr->port_offline_luns;
8403 		pptr->port_offline_luns = elem;
8404 	} else {
8405 		fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8406 	}
8407 }
8408 
8409 
8410 static void
fcp_prepare_offline_lun(struct fcp_lun * plun,int link_cnt,int tgt_cnt)8411 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8412 {
8413 	struct fcp_pkt	*head = NULL;
8414 
8415 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8416 
8417 	mutex_exit(&LUN_TGT->tgt_mutex);
8418 
8419 	head = fcp_scan_commands(plun);
8420 	if (head != NULL) {
8421 		fcp_abort_commands(head, LUN_PORT);
8422 	}
8423 
8424 	mutex_enter(&LUN_TGT->tgt_mutex);
8425 
8426 	if (plun->lun_cip && plun->lun_mpxio) {
8427 		/*
8428 		 * Intimate MPxIO lun busy is cleared
8429 		 */
8430 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8431 		    FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8432 		    0, 0)) {
8433 			fcp_log(CE_NOTE, LUN_PORT->port_dip,
8434 			    "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8435 			    LUN_TGT->tgt_d_id, plun->lun_num);
8436 		}
8437 		/*
8438 		 * Intimate MPxIO that the lun is now marked for offline
8439 		 */
8440 		mutex_exit(&LUN_TGT->tgt_mutex);
8441 		(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8442 		mutex_enter(&LUN_TGT->tgt_mutex);
8443 	}
8444 }
8445 
8446 static void
fcp_offline_lun_now(struct fcp_lun * plun,int link_cnt,int tgt_cnt,int flags)8447 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8448     int flags)
8449 {
8450 	ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8451 
8452 	mutex_exit(&LUN_TGT->tgt_mutex);
8453 	fcp_update_offline_flags(plun);
8454 	mutex_enter(&LUN_TGT->tgt_mutex);
8455 
8456 	fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8457 
8458 	FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8459 	    fcp_trace, FCP_BUF_LEVEL_4, 0,
8460 	    "offline_lun: passing OFFLINE elem to HP thread");
8461 
8462 	if (plun->lun_cip) {
8463 		fcp_log(CE_NOTE, LUN_PORT->port_dip,
8464 		    "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8465 		    plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8466 		    LUN_TGT->tgt_trace);
8467 
8468 		if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8469 		    link_cnt, tgt_cnt, flags, 0)) {
8470 			fcp_log(CE_CONT, LUN_PORT->port_dip,
8471 			    "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8472 			    LUN_TGT->tgt_d_id, plun->lun_num);
8473 		}
8474 	}
8475 }
8476 
8477 static void
fcp_scan_offline_luns(struct fcp_port * pptr)8478 fcp_scan_offline_luns(struct fcp_port *pptr)
8479 {
8480 	struct fcp_lun_elem	*elem;
8481 	struct fcp_lun_elem	*prev;
8482 	struct fcp_lun_elem	*next;
8483 
8484 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8485 
8486 	prev = NULL;
8487 	elem = pptr->port_offline_luns;
8488 	while (elem) {
8489 		next = elem->next;
8490 		if (elem->time <= fcp_watchdog_time) {
8491 			int			changed = 1;
8492 			struct fcp_tgt	*ptgt = elem->plun->lun_tgt;
8493 
8494 			mutex_enter(&ptgt->tgt_mutex);
8495 			if (pptr->port_link_cnt == elem->link_cnt &&
8496 			    ptgt->tgt_change_cnt == elem->tgt_cnt) {
8497 				changed = 0;
8498 			}
8499 
8500 			if (!changed &&
8501 			    !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8502 				fcp_offline_lun_now(elem->plun,
8503 				    elem->link_cnt, elem->tgt_cnt, elem->flags);
8504 			}
8505 			mutex_exit(&ptgt->tgt_mutex);
8506 
8507 			kmem_free(elem, sizeof (*elem));
8508 
8509 			if (prev) {
8510 				prev->next = next;
8511 			} else {
8512 				pptr->port_offline_luns = next;
8513 			}
8514 		} else {
8515 			prev = elem;
8516 		}
8517 		elem = next;
8518 	}
8519 }
8520 
8521 
8522 static void
fcp_scan_offline_tgts(struct fcp_port * pptr)8523 fcp_scan_offline_tgts(struct fcp_port *pptr)
8524 {
8525 	struct fcp_tgt_elem	*elem;
8526 	struct fcp_tgt_elem	*prev;
8527 	struct fcp_tgt_elem	*next;
8528 
8529 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
8530 
8531 	prev = NULL;
8532 	elem = pptr->port_offline_tgts;
8533 	while (elem) {
8534 		next = elem->next;
8535 		if (elem->time <= fcp_watchdog_time) {
8536 			int		outdated = 1;
8537 			struct fcp_tgt	*ptgt = elem->ptgt;
8538 
8539 			mutex_enter(&ptgt->tgt_mutex);
8540 
8541 			if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8542 				/* No change on tgt since elem was created. */
8543 				outdated = 0;
8544 			} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8545 			    pptr->port_link_cnt == elem->link_cnt + 1 &&
8546 			    ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8547 				/*
8548 				 * Exactly one thing happened to the target
8549 				 * inbetween: the local port went offline.
8550 				 * For fp the remote port is already gone so
8551 				 * it will not tell us again to offline the
8552 				 * target. We must offline it now.
8553 				 */
8554 				outdated = 0;
8555 			}
8556 
8557 			if (!outdated && !(ptgt->tgt_state &
8558 			    FCP_TGT_OFFLINE)) {
8559 				fcp_offline_target_now(pptr,
8560 				    ptgt, elem->link_cnt, elem->tgt_cnt,
8561 				    elem->flags);
8562 			}
8563 
8564 			mutex_exit(&ptgt->tgt_mutex);
8565 
8566 			kmem_free(elem, sizeof (*elem));
8567 
8568 			if (prev) {
8569 				prev->next = next;
8570 			} else {
8571 				pptr->port_offline_tgts = next;
8572 			}
8573 		} else {
8574 			prev = elem;
8575 		}
8576 		elem = next;
8577 	}
8578 }
8579 
8580 
8581 static void
fcp_update_offline_flags(struct fcp_lun * plun)8582 fcp_update_offline_flags(struct fcp_lun *plun)
8583 {
8584 	struct fcp_port	*pptr = LUN_PORT;
8585 	ASSERT(plun != NULL);
8586 
8587 	mutex_enter(&LUN_TGT->tgt_mutex);
8588 	plun->lun_state |= FCP_LUN_OFFLINE;
8589 	plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8590 
8591 	mutex_enter(&plun->lun_mutex);
8592 	if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8593 		dev_info_t *cdip = NULL;
8594 
8595 		mutex_exit(&LUN_TGT->tgt_mutex);
8596 
8597 		if (plun->lun_mpxio == 0) {
8598 			cdip = DIP(plun->lun_cip);
8599 		} else if (plun->lun_cip) {
8600 			cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8601 		}
8602 
8603 		mutex_exit(&plun->lun_mutex);
8604 		if (cdip) {
8605 			(void) ndi_event_retrieve_cookie(
8606 			    pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8607 			    &fcp_remove_eid, NDI_EVENT_NOPASS);
8608 			(void) ndi_event_run_callbacks(
8609 			    pptr->port_ndi_event_hdl, cdip,
8610 			    fcp_remove_eid, NULL);
8611 		}
8612 	} else {
8613 		mutex_exit(&plun->lun_mutex);
8614 		mutex_exit(&LUN_TGT->tgt_mutex);
8615 	}
8616 }
8617 
8618 
8619 /*
8620  * Scan all of the command pkts for this port, moving pkts that
8621  * match our LUN onto our own list (headed by "head")
8622  */
8623 static struct fcp_pkt *
fcp_scan_commands(struct fcp_lun * plun)8624 fcp_scan_commands(struct fcp_lun *plun)
8625 {
8626 	struct fcp_port	*pptr = LUN_PORT;
8627 
8628 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8629 	struct fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8630 	struct fcp_pkt	*pcmd = NULL;	/* the previous command */
8631 
8632 	struct fcp_pkt	*head = NULL;	/* head of our list */
8633 	struct fcp_pkt	*tail = NULL;	/* tail of our list */
8634 
8635 	int			cmds_found = 0;
8636 
8637 	mutex_enter(&pptr->port_pkt_mutex);
8638 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8639 		struct fcp_lun *tlun =
8640 		    ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8641 
8642 		ncmd = cmd->cmd_next;	/* set next command */
8643 
8644 		/*
8645 		 * if this pkt is for a different LUN  or the
8646 		 * command is sent down, skip it.
8647 		 */
8648 		if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8649 		    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8650 			pcmd = cmd;
8651 			continue;
8652 		}
8653 		cmds_found++;
8654 		if (pcmd != NULL) {
8655 			ASSERT(pptr->port_pkt_head != cmd);
8656 			pcmd->cmd_next = cmd->cmd_next;
8657 		} else {
8658 			ASSERT(cmd == pptr->port_pkt_head);
8659 			pptr->port_pkt_head = cmd->cmd_next;
8660 		}
8661 
8662 		if (cmd == pptr->port_pkt_tail) {
8663 			pptr->port_pkt_tail = pcmd;
8664 			if (pcmd) {
8665 				pcmd->cmd_next = NULL;
8666 			}
8667 		}
8668 
8669 		if (head == NULL) {
8670 			head = tail = cmd;
8671 		} else {
8672 			ASSERT(tail != NULL);
8673 
8674 			tail->cmd_next = cmd;
8675 			tail = cmd;
8676 		}
8677 		cmd->cmd_next = NULL;
8678 	}
8679 	mutex_exit(&pptr->port_pkt_mutex);
8680 
8681 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8682 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
8683 	    "scan commands: %d cmd(s) found", cmds_found);
8684 
8685 	return (head);
8686 }
8687 
8688 
8689 /*
8690  * Abort all the commands in the command queue
8691  */
8692 static void
fcp_abort_commands(struct fcp_pkt * head,struct fcp_port * pptr)8693 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8694 {
8695 	struct fcp_pkt	*cmd = NULL;	/* pkt cmd ptr */
8696 	struct	fcp_pkt	*ncmd = NULL;	/* next pkt ptr */
8697 
8698 	ASSERT(mutex_owned(&pptr->port_mutex));
8699 
8700 	/* scan through the pkts and invalid them */
8701 	for (cmd = head; cmd != NULL; cmd = ncmd) {
8702 		struct scsi_pkt *pkt = cmd->cmd_pkt;
8703 
8704 		ncmd = cmd->cmd_next;
8705 		ASSERT(pkt != NULL);
8706 
8707 		/*
8708 		 * The lun is going to be marked offline. Indicate
8709 		 * the target driver not to requeue or retry this command
8710 		 * as the device is going to be offlined pretty soon.
8711 		 */
8712 		pkt->pkt_reason = CMD_DEV_GONE;
8713 		pkt->pkt_statistics = 0;
8714 		pkt->pkt_state = 0;
8715 
8716 		/* reset cmd flags/state */
8717 		cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8718 		cmd->cmd_state = FCP_PKT_IDLE;
8719 
8720 		/*
8721 		 * ensure we have a packet completion routine,
8722 		 * then call it.
8723 		 */
8724 		ASSERT(pkt->pkt_comp != NULL);
8725 
8726 		mutex_exit(&pptr->port_mutex);
8727 		fcp_post_callback(cmd);
8728 		mutex_enter(&pptr->port_mutex);
8729 	}
8730 }
8731 
8732 
8733 /*
8734  * the pkt_comp callback for command packets
8735  */
8736 static void
fcp_cmd_callback(fc_packet_t * fpkt)8737 fcp_cmd_callback(fc_packet_t *fpkt)
8738 {
8739 	struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8740 	struct scsi_pkt *pkt = cmd->cmd_pkt;
8741 	struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8742 
8743 	ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8744 
8745 	if (cmd->cmd_state == FCP_PKT_IDLE) {
8746 		cmn_err(CE_PANIC, "Packet already completed %p",
8747 		    (void *)cmd);
8748 	}
8749 
8750 	/*
8751 	 * Watch thread should be freeing the packet, ignore the pkt.
8752 	 */
8753 	if (cmd->cmd_state == FCP_PKT_ABORTING) {
8754 		fcp_log(CE_CONT, pptr->port_dip,
8755 		    "!FCP: Pkt completed while aborting\n");
8756 		return;
8757 	}
8758 	cmd->cmd_state = FCP_PKT_IDLE;
8759 
8760 	fcp_complete_pkt(fpkt);
8761 
8762 #ifdef	DEBUG
8763 	mutex_enter(&pptr->port_pkt_mutex);
8764 	pptr->port_npkts--;
8765 	mutex_exit(&pptr->port_pkt_mutex);
8766 #endif /* DEBUG */
8767 
8768 	fcp_post_callback(cmd);
8769 }
8770 
8771 
8772 static void
fcp_complete_pkt(fc_packet_t * fpkt)8773 fcp_complete_pkt(fc_packet_t *fpkt)
8774 {
8775 	int			error = 0;
8776 	struct fcp_pkt	*cmd = (struct fcp_pkt *)
8777 	    fpkt->pkt_ulp_private;
8778 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
8779 	struct fcp_port		*pptr = ADDR2FCP(&pkt->pkt_address);
8780 	struct fcp_lun	*plun;
8781 	struct fcp_tgt	*ptgt;
8782 	struct fcp_rsp		*rsp;
8783 	struct scsi_address	save;
8784 
8785 #ifdef	DEBUG
8786 	save = pkt->pkt_address;
8787 #endif /* DEBUG */
8788 
8789 	rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8790 
8791 	if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8792 		if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8793 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8794 			    sizeof (struct fcp_rsp));
8795 		}
8796 
8797 		pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8798 		    STATE_SENT_CMD | STATE_GOT_STATUS;
8799 
8800 		pkt->pkt_resid = 0;
8801 
8802 		if (fpkt->pkt_datalen) {
8803 			pkt->pkt_state |= STATE_XFERRED_DATA;
8804 			if (fpkt->pkt_data_resid) {
8805 				error++;
8806 			}
8807 		}
8808 
8809 		if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8810 		    rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8811 			/*
8812 			 * The next two checks make sure that if there
8813 			 * is no sense data or a valid response and
8814 			 * the command came back with check condition,
8815 			 * the command should be retried.
8816 			 */
8817 			if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8818 			    !rsp->fcp_u.fcp_status.sense_len_set) {
8819 				pkt->pkt_state &= ~STATE_XFERRED_DATA;
8820 				pkt->pkt_resid = cmd->cmd_dmacount;
8821 			}
8822 		}
8823 
8824 		if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8825 			return;
8826 		}
8827 
8828 		plun = ADDR2LUN(&pkt->pkt_address);
8829 		ptgt = plun->lun_tgt;
8830 		ASSERT(ptgt != NULL);
8831 
8832 		/*
8833 		 * Update the transfer resid, if appropriate
8834 		 */
8835 		if (rsp->fcp_u.fcp_status.resid_over ||
8836 		    rsp->fcp_u.fcp_status.resid_under) {
8837 			pkt->pkt_resid = rsp->fcp_resid;
8838 		}
8839 
8840 		/*
8841 		 * First see if we got a FCP protocol error.
8842 		 */
8843 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
8844 			struct fcp_rsp_info	*bep;
8845 			bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8846 			    sizeof (struct fcp_rsp));
8847 
8848 			if (fcp_validate_fcp_response(rsp, pptr) !=
8849 			    FC_SUCCESS) {
8850 				pkt->pkt_reason = CMD_CMPLT;
8851 				*(pkt->pkt_scbp) = STATUS_CHECK;
8852 
8853 				fcp_log(CE_WARN, pptr->port_dip,
8854 				    "!SCSI command to d_id=0x%x lun=0x%x"
8855 				    " failed, Bad FCP response values:"
8856 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8857 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8858 				    ptgt->tgt_d_id, plun->lun_num,
8859 				    rsp->reserved_0, rsp->reserved_1,
8860 				    rsp->fcp_u.fcp_status.reserved_0,
8861 				    rsp->fcp_u.fcp_status.reserved_1,
8862 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8863 
8864 				return;
8865 			}
8866 
8867 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8868 				FCP_CP_IN(fpkt->pkt_resp +
8869 				    sizeof (struct fcp_rsp), bep,
8870 				    fpkt->pkt_resp_acc,
8871 				    sizeof (struct fcp_rsp_info));
8872 			}
8873 
8874 			if (bep->rsp_code != FCP_NO_FAILURE) {
8875 				child_info_t	*cip;
8876 
8877 				pkt->pkt_reason = CMD_TRAN_ERR;
8878 
8879 				mutex_enter(&plun->lun_mutex);
8880 				cip = plun->lun_cip;
8881 				mutex_exit(&plun->lun_mutex);
8882 
8883 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
8884 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
8885 				    "FCP response error on cmd=%p"
8886 				    " target=0x%x, cip=%p", cmd,
8887 				    ptgt->tgt_d_id, cip);
8888 			}
8889 		}
8890 
8891 		/*
8892 		 * See if we got a SCSI error with sense data
8893 		 */
8894 		if (rsp->fcp_u.fcp_status.sense_len_set) {
8895 			uchar_t				rqlen;
8896 			caddr_t				sense_from;
8897 			child_info_t			*cip;
8898 			timeout_id_t			tid;
8899 			struct scsi_arq_status		*arq;
8900 			struct scsi_extended_sense	*sense_to;
8901 
8902 			arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8903 			sense_to = &arq->sts_sensedata;
8904 
8905 			rqlen = (uchar_t)min(rsp->fcp_sense_len,
8906 			    sizeof (struct scsi_extended_sense));
8907 
8908 			sense_from = (caddr_t)fpkt->pkt_resp +
8909 			    sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8910 
8911 			if (fcp_validate_fcp_response(rsp, pptr) !=
8912 			    FC_SUCCESS) {
8913 				pkt->pkt_reason = CMD_CMPLT;
8914 				*(pkt->pkt_scbp) = STATUS_CHECK;
8915 
8916 				fcp_log(CE_WARN, pptr->port_dip,
8917 				    "!SCSI command to d_id=0x%x lun=0x%x"
8918 				    " failed, Bad FCP response values:"
8919 				    " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8920 				    " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8921 				    ptgt->tgt_d_id, plun->lun_num,
8922 				    rsp->reserved_0, rsp->reserved_1,
8923 				    rsp->fcp_u.fcp_status.reserved_0,
8924 				    rsp->fcp_u.fcp_status.reserved_1,
8925 				    rsp->fcp_response_len, rsp->fcp_sense_len);
8926 
8927 				return;
8928 			}
8929 
8930 			/*
8931 			 * copy in sense information
8932 			 */
8933 			if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8934 				FCP_CP_IN(sense_from, sense_to,
8935 				    fpkt->pkt_resp_acc, rqlen);
8936 			} else {
8937 				bcopy(sense_from, sense_to, rqlen);
8938 			}
8939 
8940 			if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8941 			    (FCP_SENSE_NO_LUN(sense_to))) {
8942 				mutex_enter(&ptgt->tgt_mutex);
8943 				if (ptgt->tgt_tid == NULL) {
8944 					/*
8945 					 * Kick off rediscovery
8946 					 */
8947 					tid = timeout(fcp_reconfigure_luns,
8948 					    (caddr_t)ptgt, drv_usectohz(1));
8949 
8950 					ptgt->tgt_tid = tid;
8951 					ptgt->tgt_state |= FCP_TGT_BUSY;
8952 				}
8953 				mutex_exit(&ptgt->tgt_mutex);
8954 				if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8955 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8956 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8957 					    "!FCP: Report Lun Has Changed"
8958 					    " target=%x", ptgt->tgt_d_id);
8959 				} else if (FCP_SENSE_NO_LUN(sense_to)) {
8960 					FCP_TRACE(fcp_logq, pptr->port_instbuf,
8961 					    fcp_trace, FCP_BUF_LEVEL_3, 0,
8962 					    "!FCP: LU Not Supported"
8963 					    " target=%x", ptgt->tgt_d_id);
8964 				}
8965 			}
8966 			ASSERT(pkt->pkt_scbp != NULL);
8967 
8968 			pkt->pkt_state |= STATE_ARQ_DONE;
8969 
8970 			arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8971 
8972 			*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8973 			arq->sts_rqpkt_reason = 0;
8974 			arq->sts_rqpkt_statistics = 0;
8975 
8976 			arq->sts_rqpkt_state = STATE_GOT_BUS |
8977 			    STATE_GOT_TARGET | STATE_SENT_CMD |
8978 			    STATE_GOT_STATUS | STATE_ARQ_DONE |
8979 			    STATE_XFERRED_DATA;
8980 
8981 			mutex_enter(&plun->lun_mutex);
8982 			cip = plun->lun_cip;
8983 			mutex_exit(&plun->lun_mutex);
8984 
8985 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8986 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
8987 			    "SCSI Check condition on cmd=%p target=0x%x"
8988 			    " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8989 			    " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8990 			    cmd->cmd_fcp_cmd.fcp_cdb[0],
8991 			    rsp->fcp_u.fcp_status.scsi_status,
8992 			    sense_to->es_key, sense_to->es_add_code,
8993 			    sense_to->es_qual_code);
8994 		}
8995 	} else {
8996 		plun = ADDR2LUN(&pkt->pkt_address);
8997 		ptgt = plun->lun_tgt;
8998 		ASSERT(ptgt != NULL);
8999 
9000 		/*
9001 		 * Work harder to translate errors into target driver
9002 		 * understandable ones. Note with despair that the target
9003 		 * drivers don't decode pkt_state and pkt_reason exhaustively
9004 		 * They resort to using the big hammer most often, which
9005 		 * may not get fixed in the life time of this driver.
9006 		 */
9007 		pkt->pkt_state = 0;
9008 		pkt->pkt_statistics = 0;
9009 
9010 		switch (fpkt->pkt_state) {
9011 		case FC_PKT_TRAN_ERROR:
9012 			switch (fpkt->pkt_reason) {
9013 			case FC_REASON_OVERRUN:
9014 				pkt->pkt_reason = CMD_CMD_OVR;
9015 				pkt->pkt_statistics |= STAT_ABORTED;
9016 				break;
9017 
9018 			case FC_REASON_XCHG_BSY: {
9019 				caddr_t ptr;
9020 
9021 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9022 
9023 				ptr = (caddr_t)pkt->pkt_scbp;
9024 				if (ptr) {
9025 					*ptr = STATUS_BUSY;
9026 				}
9027 				break;
9028 			}
9029 
9030 			case FC_REASON_ABORTED:
9031 				pkt->pkt_reason = CMD_TRAN_ERR;
9032 				pkt->pkt_statistics |= STAT_ABORTED;
9033 				break;
9034 
9035 			case FC_REASON_ABORT_FAILED:
9036 				pkt->pkt_reason = CMD_ABORT_FAIL;
9037 				break;
9038 
9039 			case FC_REASON_NO_SEQ_INIT:
9040 			case FC_REASON_CRC_ERROR:
9041 				pkt->pkt_reason = CMD_TRAN_ERR;
9042 				pkt->pkt_statistics |= STAT_ABORTED;
9043 				break;
9044 			default:
9045 				pkt->pkt_reason = CMD_TRAN_ERR;
9046 				break;
9047 			}
9048 			break;
9049 
9050 		case FC_PKT_PORT_OFFLINE: {
9051 			dev_info_t	*cdip = NULL;
9052 			caddr_t		ptr;
9053 
9054 			if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9055 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9056 				    fcp_trace, FCP_BUF_LEVEL_8, 0,
9057 				    "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9058 				    ptgt->tgt_d_id);
9059 			}
9060 
9061 			mutex_enter(&plun->lun_mutex);
9062 			if (plun->lun_mpxio == 0) {
9063 				cdip = DIP(plun->lun_cip);
9064 			} else if (plun->lun_cip) {
9065 				cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9066 			}
9067 
9068 			mutex_exit(&plun->lun_mutex);
9069 
9070 			if (cdip) {
9071 				(void) ndi_event_retrieve_cookie(
9072 				    pptr->port_ndi_event_hdl, cdip,
9073 				    FCAL_REMOVE_EVENT, &fcp_remove_eid,
9074 				    NDI_EVENT_NOPASS);
9075 				(void) ndi_event_run_callbacks(
9076 				    pptr->port_ndi_event_hdl, cdip,
9077 				    fcp_remove_eid, NULL);
9078 			}
9079 
9080 			/*
9081 			 * If the link goes off-line for a lip,
9082 			 * this will cause a error to the ST SG
9083 			 * SGEN drivers. By setting BUSY we will
9084 			 * give the drivers the chance to retry
9085 			 * before it blows of the job. ST will
9086 			 * remember how many times it has retried.
9087 			 */
9088 
9089 			if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9090 			    (plun->lun_type == DTYPE_CHANGER)) {
9091 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9092 				ptr = (caddr_t)pkt->pkt_scbp;
9093 				if (ptr) {
9094 					*ptr = STATUS_BUSY;
9095 				}
9096 			} else {
9097 				pkt->pkt_reason = CMD_TRAN_ERR;
9098 				pkt->pkt_statistics |= STAT_BUS_RESET;
9099 			}
9100 			break;
9101 		}
9102 
9103 		case FC_PKT_TRAN_BSY:
9104 			/*
9105 			 * Use the ssd Qfull handling here.
9106 			 */
9107 			*pkt->pkt_scbp = STATUS_INTERMEDIATE;
9108 			pkt->pkt_state = STATE_GOT_BUS;
9109 			break;
9110 
9111 		case FC_PKT_TIMEOUT:
9112 			pkt->pkt_reason = CMD_TIMEOUT;
9113 			if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9114 				pkt->pkt_statistics |= STAT_TIMEOUT;
9115 			} else {
9116 				pkt->pkt_statistics |= STAT_ABORTED;
9117 			}
9118 			break;
9119 
9120 		case FC_PKT_LOCAL_RJT:
9121 			switch (fpkt->pkt_reason) {
9122 			case FC_REASON_OFFLINE: {
9123 				dev_info_t	*cdip = NULL;
9124 
9125 				mutex_enter(&plun->lun_mutex);
9126 				if (plun->lun_mpxio == 0) {
9127 					cdip = DIP(plun->lun_cip);
9128 				} else if (plun->lun_cip) {
9129 					cdip = mdi_pi_get_client(
9130 					    PIP(plun->lun_cip));
9131 				}
9132 				mutex_exit(&plun->lun_mutex);
9133 
9134 				if (cdip) {
9135 					(void) ndi_event_retrieve_cookie(
9136 					    pptr->port_ndi_event_hdl, cdip,
9137 					    FCAL_REMOVE_EVENT,
9138 					    &fcp_remove_eid,
9139 					    NDI_EVENT_NOPASS);
9140 					(void) ndi_event_run_callbacks(
9141 					    pptr->port_ndi_event_hdl,
9142 					    cdip, fcp_remove_eid, NULL);
9143 				}
9144 
9145 				pkt->pkt_reason = CMD_TRAN_ERR;
9146 				pkt->pkt_statistics |= STAT_BUS_RESET;
9147 
9148 				break;
9149 			}
9150 
9151 			case FC_REASON_NOMEM:
9152 			case FC_REASON_QFULL: {
9153 				caddr_t ptr;
9154 
9155 				pkt->pkt_reason = CMD_CMPLT;	/* Lie */
9156 				ptr = (caddr_t)pkt->pkt_scbp;
9157 				if (ptr) {
9158 					*ptr = STATUS_BUSY;
9159 				}
9160 				break;
9161 			}
9162 
9163 			case FC_REASON_DMA_ERROR:
9164 				pkt->pkt_reason = CMD_DMA_DERR;
9165 				pkt->pkt_statistics |= STAT_ABORTED;
9166 				break;
9167 
9168 			case FC_REASON_CRC_ERROR:
9169 			case FC_REASON_UNDERRUN: {
9170 				uchar_t		status;
9171 				/*
9172 				 * Work around for Bugid: 4240945.
9173 				 * IB on A5k doesn't set the Underrun bit
9174 				 * in the fcp status, when it is transferring
9175 				 * less than requested amount of data. Work
9176 				 * around the ses problem to keep luxadm
9177 				 * happy till ibfirmware is fixed.
9178 				 */
9179 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9180 					FCP_CP_IN(fpkt->pkt_resp, rsp,
9181 					    fpkt->pkt_resp_acc,
9182 					    sizeof (struct fcp_rsp));
9183 				}
9184 				status = rsp->fcp_u.fcp_status.scsi_status;
9185 				if (((plun->lun_type & DTYPE_MASK) ==
9186 				    DTYPE_ESI) && (status == STATUS_GOOD)) {
9187 					pkt->pkt_reason = CMD_CMPLT;
9188 					*pkt->pkt_scbp = status;
9189 					pkt->pkt_resid = 0;
9190 				} else {
9191 					pkt->pkt_reason = CMD_TRAN_ERR;
9192 					pkt->pkt_statistics |= STAT_ABORTED;
9193 				}
9194 				break;
9195 			}
9196 
9197 			case FC_REASON_NO_CONNECTION:
9198 			case FC_REASON_UNSUPPORTED:
9199 			case FC_REASON_ILLEGAL_REQ:
9200 			case FC_REASON_BAD_SID:
9201 			case FC_REASON_DIAG_BUSY:
9202 			case FC_REASON_FCAL_OPN_FAIL:
9203 			case FC_REASON_BAD_XID:
9204 			default:
9205 				pkt->pkt_reason = CMD_TRAN_ERR;
9206 				pkt->pkt_statistics |= STAT_ABORTED;
9207 				break;
9208 
9209 			}
9210 			break;
9211 
9212 		case FC_PKT_NPORT_RJT:
9213 		case FC_PKT_FABRIC_RJT:
9214 		case FC_PKT_NPORT_BSY:
9215 		case FC_PKT_FABRIC_BSY:
9216 		default:
9217 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9218 			    fcp_trace, FCP_BUF_LEVEL_8, 0,
9219 			    "FC Status 0x%x, reason 0x%x",
9220 			    fpkt->pkt_state, fpkt->pkt_reason);
9221 			pkt->pkt_reason = CMD_TRAN_ERR;
9222 			pkt->pkt_statistics |= STAT_ABORTED;
9223 			break;
9224 		}
9225 
9226 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9227 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
9228 		    "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9229 		    " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9230 		    fpkt->pkt_reason);
9231 	}
9232 
9233 	ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9234 }
9235 
9236 
9237 static int
fcp_validate_fcp_response(struct fcp_rsp * rsp,struct fcp_port * pptr)9238 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9239 {
9240 	if (rsp->reserved_0 || rsp->reserved_1 ||
9241 	    rsp->fcp_u.fcp_status.reserved_0 ||
9242 	    rsp->fcp_u.fcp_status.reserved_1) {
9243 		/*
9244 		 * These reserved fields should ideally be zero. FCP-2 does say
9245 		 * that the recipient need not check for reserved fields to be
9246 		 * zero. If they are not zero, we will not make a fuss about it
9247 		 * - just log it (in debug to both trace buffer and messages
9248 		 * file and to trace buffer only in non-debug) and move on.
9249 		 *
9250 		 * Non-zero reserved fields were seen with minnows.
9251 		 *
9252 		 * qlc takes care of some of this but we cannot assume that all
9253 		 * FCAs will do so.
9254 		 */
9255 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9256 		    FCP_BUF_LEVEL_5, 0,
9257 		    "Got fcp response packet with non-zero reserved fields "
9258 		    "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9259 		    "status.reserved_0:0x%x, status.reserved_1:0x%x",
9260 		    rsp->reserved_0, rsp->reserved_1,
9261 		    rsp->fcp_u.fcp_status.reserved_0,
9262 		    rsp->fcp_u.fcp_status.reserved_1);
9263 	}
9264 
9265 	if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9266 	    (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9267 		return (FC_FAILURE);
9268 	}
9269 
9270 	if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9271 	    (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9272 	    sizeof (struct fcp_rsp))) {
9273 		return (FC_FAILURE);
9274 	}
9275 
9276 	return (FC_SUCCESS);
9277 }
9278 
9279 
9280 /*
9281  * This is called when there is a change the in device state. The case we're
9282  * handling here is, if the d_id s does not match, offline this tgt and online
9283  * a new tgt with the new d_id.	 called from fcp_handle_devices with
9284  * port_mutex held.
9285  */
9286 static int
fcp_device_changed(struct fcp_port * pptr,struct fcp_tgt * ptgt,fc_portmap_t * map_entry,int link_cnt,int tgt_cnt,int cause)9287 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9288     fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9289 {
9290 	ASSERT(mutex_owned(&pptr->port_mutex));
9291 
9292 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
9293 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
9294 	    "Starting fcp_device_changed...");
9295 
9296 	/*
9297 	 * The two cases where the port_device_changed is called is
9298 	 * either it changes it's d_id or it's hard address.
9299 	 */
9300 	if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9301 	    (FC_TOP_EXTERNAL(pptr->port_topology) &&
9302 	    (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9303 
9304 		/* offline this target */
9305 		mutex_enter(&ptgt->tgt_mutex);
9306 		if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9307 			(void) fcp_offline_target(pptr, ptgt, link_cnt,
9308 			    0, 1, NDI_DEVI_REMOVE);
9309 		}
9310 		mutex_exit(&ptgt->tgt_mutex);
9311 
9312 		fcp_log(CE_NOTE, pptr->port_dip,
9313 		    "Change in target properties: Old D_ID=%x New D_ID=%x"
9314 		    " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9315 		    map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9316 		    map_entry->map_hard_addr.hard_addr);
9317 	}
9318 
9319 	return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9320 	    link_cnt, tgt_cnt, cause));
9321 }
9322 
9323 /*
9324  *     Function: fcp_alloc_lun
9325  *
9326  *  Description: Creates a new lun structure and adds it to the list
9327  *		 of luns of the target.
9328  *
9329  *     Argument: ptgt		Target the lun will belong to.
9330  *
9331  * Return Value: NULL		Failed
9332  *		 Not NULL	Succeeded
9333  *
9334  *	Context: Kernel context
9335  */
9336 static struct fcp_lun *
fcp_alloc_lun(struct fcp_tgt * ptgt)9337 fcp_alloc_lun(struct fcp_tgt *ptgt)
9338 {
9339 	struct fcp_lun *plun;
9340 
9341 	plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9342 	if (plun != NULL) {
9343 		/*
9344 		 * Initialize the mutex before putting in the target list
9345 		 * especially before releasing the target mutex.
9346 		 */
9347 		mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9348 		plun->lun_tgt = ptgt;
9349 
9350 		mutex_enter(&ptgt->tgt_mutex);
9351 		plun->lun_next = ptgt->tgt_lun;
9352 		ptgt->tgt_lun = plun;
9353 		plun->lun_old_guid = NULL;
9354 		plun->lun_old_guid_size = 0;
9355 		mutex_exit(&ptgt->tgt_mutex);
9356 	}
9357 
9358 	return (plun);
9359 }
9360 
9361 /*
9362  *     Function: fcp_dealloc_lun
9363  *
9364  *  Description: Frees the LUN structure passed by the caller.
9365  *
9366  *     Argument: plun		LUN structure to free.
9367  *
9368  * Return Value: None
9369  *
9370  *	Context: Kernel context.
9371  */
9372 static void
fcp_dealloc_lun(struct fcp_lun * plun)9373 fcp_dealloc_lun(struct fcp_lun *plun)
9374 {
9375 	mutex_enter(&plun->lun_mutex);
9376 	if (plun->lun_cip) {
9377 		fcp_remove_child(plun);
9378 	}
9379 	mutex_exit(&plun->lun_mutex);
9380 
9381 	mutex_destroy(&plun->lun_mutex);
9382 	if (plun->lun_guid) {
9383 		kmem_free(plun->lun_guid, plun->lun_guid_size);
9384 	}
9385 	if (plun->lun_old_guid) {
9386 		kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9387 	}
9388 	kmem_free(plun, sizeof (*plun));
9389 }
9390 
9391 /*
9392  *     Function: fcp_alloc_tgt
9393  *
9394  *  Description: Creates a new target structure and adds it to the port
9395  *		 hash list.
9396  *
9397  *     Argument: pptr		fcp port structure
9398  *		 *map_entry	entry describing the target to create
9399  *		 link_cnt	Link state change counter
9400  *
9401  * Return Value: NULL		Failed
9402  *		 Not NULL	Succeeded
9403  *
9404  *	Context: Kernel context.
9405  */
9406 static struct fcp_tgt *
fcp_alloc_tgt(struct fcp_port * pptr,fc_portmap_t * map_entry,int link_cnt)9407 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9408 {
9409 	int			hash;
9410 	uchar_t			*wwn;
9411 	struct fcp_tgt	*ptgt;
9412 
9413 	ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9414 	if (ptgt != NULL) {
9415 		mutex_enter(&pptr->port_mutex);
9416 		if (link_cnt != pptr->port_link_cnt) {
9417 			/*
9418 			 * oh oh -- another link reset
9419 			 * in progress -- give up
9420 			 */
9421 			mutex_exit(&pptr->port_mutex);
9422 			kmem_free(ptgt, sizeof (*ptgt));
9423 			ptgt = NULL;
9424 		} else {
9425 			/*
9426 			 * initialize the mutex before putting in the port
9427 			 * wwn list, especially before releasing the port
9428 			 * mutex.
9429 			 */
9430 			mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9431 
9432 			/* add new target entry to the port's hash list */
9433 			wwn = (uchar_t *)&map_entry->map_pwwn;
9434 			hash = FCP_HASH(wwn);
9435 
9436 			ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9437 			pptr->port_tgt_hash_table[hash] = ptgt;
9438 
9439 			/* save cross-ptr */
9440 			ptgt->tgt_port = pptr;
9441 
9442 			ptgt->tgt_change_cnt = 1;
9443 
9444 			/* initialize the target manual_config_only flag */
9445 			if (fcp_enable_auto_configuration) {
9446 				ptgt->tgt_manual_config_only = 0;
9447 			} else {
9448 				ptgt->tgt_manual_config_only = 1;
9449 			}
9450 
9451 			mutex_exit(&pptr->port_mutex);
9452 		}
9453 	}
9454 
9455 	return (ptgt);
9456 }
9457 
9458 /*
9459  *     Function: fcp_dealloc_tgt
9460  *
9461  *  Description: Frees the target structure passed by the caller.
9462  *
9463  *     Argument: ptgt		Target structure to free.
9464  *
9465  * Return Value: None
9466  *
9467  *	Context: Kernel context.
9468  */
9469 static void
fcp_dealloc_tgt(struct fcp_tgt * ptgt)9470 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9471 {
9472 	mutex_destroy(&ptgt->tgt_mutex);
9473 	kmem_free(ptgt, sizeof (*ptgt));
9474 }
9475 
9476 
9477 /*
9478  * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9479  *
9480  *	Device discovery commands will not be retried for-ever as
9481  *	this will have repercussions on other devices that need to
9482  *	be submitted to the hotplug thread. After a quick glance
9483  *	at the SCSI-3 spec, it was found that the spec doesn't
9484  *	mandate a forever retry, rather recommends a delayed retry.
9485  *
9486  *	Since Photon IB is single threaded, STATUS_BUSY is common
9487  *	in a 4+initiator environment. Make sure the total time
9488  *	spent on retries (including command timeout) does not
9489  *	60 seconds
9490  */
9491 static void
fcp_queue_ipkt(struct fcp_port * pptr,fc_packet_t * fpkt)9492 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9493 {
9494 	struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9495 	struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9496 
9497 	mutex_enter(&pptr->port_mutex);
9498 	mutex_enter(&ptgt->tgt_mutex);
9499 	if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9500 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
9501 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
9502 		    "fcp_queue_ipkt,1:state change occured"
9503 		    " for D_ID=0x%x", ptgt->tgt_d_id);
9504 		mutex_exit(&ptgt->tgt_mutex);
9505 		mutex_exit(&pptr->port_mutex);
9506 		(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9507 		    icmd->ipkt_change_cnt, icmd->ipkt_cause);
9508 		fcp_icmd_free(pptr, icmd);
9509 		return;
9510 	}
9511 	mutex_exit(&ptgt->tgt_mutex);
9512 
9513 	icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9514 
9515 	if (pptr->port_ipkt_list != NULL) {
9516 		/* add pkt to front of doubly-linked list */
9517 		pptr->port_ipkt_list->ipkt_prev = icmd;
9518 		icmd->ipkt_next = pptr->port_ipkt_list;
9519 		pptr->port_ipkt_list = icmd;
9520 		icmd->ipkt_prev = NULL;
9521 	} else {
9522 		/* this is the first/only pkt on the list */
9523 		pptr->port_ipkt_list = icmd;
9524 		icmd->ipkt_next = NULL;
9525 		icmd->ipkt_prev = NULL;
9526 	}
9527 	mutex_exit(&pptr->port_mutex);
9528 }
9529 
9530 /*
9531  *     Function: fcp_transport
9532  *
9533  *  Description: This function submits the Fibre Channel packet to the transort
9534  *		 layer by calling fc_ulp_transport().  If fc_ulp_transport()
9535  *		 fails the submission, the treatment depends on the value of
9536  *		 the variable internal.
9537  *
9538  *     Argument: port_handle	fp/fctl port handle.
9539  *		 *fpkt		Packet to submit to the transport layer.
9540  *		 internal	Not zero when it's an internal packet.
9541  *
9542  * Return Value: FC_TRAN_BUSY
9543  *		 FC_STATEC_BUSY
9544  *		 FC_OFFLINE
9545  *		 FC_LOGINREQ
9546  *		 FC_DEVICE_BUSY
9547  *		 FC_SUCCESS
9548  */
9549 static int
fcp_transport(opaque_t port_handle,fc_packet_t * fpkt,int internal)9550 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9551 {
9552 	int	rval;
9553 
9554 	rval = fc_ulp_transport(port_handle, fpkt);
9555 	if (rval == FC_SUCCESS) {
9556 		return (rval);
9557 	}
9558 
9559 	/*
9560 	 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9561 	 * a command, if the underlying modules see that there is a state
9562 	 * change, or if a port is OFFLINE, that means, that state change
9563 	 * hasn't reached FCP yet, so re-queue the command for deferred
9564 	 * submission.
9565 	 */
9566 	if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9567 	    (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9568 	    (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9569 		/*
9570 		 * Defer packet re-submission. Life hang is possible on
9571 		 * internal commands if the port driver sends FC_STATEC_BUSY
9572 		 * for ever, but that shouldn't happen in a good environment.
9573 		 * Limiting re-transport for internal commands is probably a
9574 		 * good idea..
9575 		 * A race condition can happen when a port sees barrage of
9576 		 * link transitions offline to online. If the FCTL has
9577 		 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9578 		 * internal commands should be queued to do the discovery.
9579 		 * The race condition is when an online comes and FCP starts
9580 		 * its internal discovery and the link goes offline. It is
9581 		 * possible that the statec_callback has not reached FCP
9582 		 * and FCP is carrying on with its internal discovery.
9583 		 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9584 		 * that the link has gone offline. At this point FCP should
9585 		 * drop all the internal commands and wait for the
9586 		 * statec_callback. It will be facilitated by incrementing
9587 		 * port_link_cnt.
9588 		 *
9589 		 * For external commands, the (FC)pkt_timeout is decremented
9590 		 * by the QUEUE Delay added by our driver, Care is taken to
9591 		 * ensure that it doesn't become zero (zero means no timeout)
9592 		 * If the time expires right inside driver queue itself,
9593 		 * the watch thread will return it to the original caller
9594 		 * indicating that the command has timed-out.
9595 		 */
9596 		if (internal) {
9597 			char			*op;
9598 			struct fcp_ipkt	*icmd;
9599 
9600 			icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9601 			switch (icmd->ipkt_opcode) {
9602 			case SCMD_REPORT_LUN:
9603 				op = "REPORT LUN";
9604 				break;
9605 
9606 			case SCMD_INQUIRY:
9607 				op = "INQUIRY";
9608 				break;
9609 
9610 			case SCMD_INQUIRY_PAGE83:
9611 				op = "INQUIRY-83";
9612 				break;
9613 
9614 			default:
9615 				op = "Internal SCSI COMMAND";
9616 				break;
9617 			}
9618 
9619 			if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9620 			    icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9621 				rval = FC_SUCCESS;
9622 			}
9623 		} else {
9624 			struct fcp_pkt *cmd;
9625 			struct fcp_port *pptr;
9626 
9627 			cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9628 			cmd->cmd_state = FCP_PKT_IDLE;
9629 			pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9630 
9631 			if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9632 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9633 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
9634 				    "fcp_transport: xport busy for pkt %p",
9635 				    cmd->cmd_pkt);
9636 				rval = FC_TRAN_BUSY;
9637 			} else {
9638 				fcp_queue_pkt(pptr, cmd);
9639 				rval = FC_SUCCESS;
9640 			}
9641 		}
9642 	}
9643 
9644 	return (rval);
9645 }
9646 
9647 /*VARARGS3*/
9648 static void
fcp_log(int level,dev_info_t * dip,const char * fmt,...)9649 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9650 {
9651 	char		buf[256];
9652 	va_list		ap;
9653 
9654 	if (dip == NULL) {
9655 		dip = fcp_global_dip;
9656 	}
9657 
9658 	va_start(ap, fmt);
9659 	(void) vsprintf(buf, fmt, ap);
9660 	va_end(ap);
9661 
9662 	scsi_log(dip, "fcp", level, buf);
9663 }
9664 
9665 /*
9666  * This function retries NS registry of FC4 type.
9667  * It assumes that fcp_mutex is held.
9668  * The function does nothing if topology is not fabric
9669  * So, the topology has to be set before this function can be called
9670  */
9671 static void
fcp_retry_ns_registry(struct fcp_port * pptr,uint32_t s_id)9672 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9673 {
9674 	int	rval;
9675 
9676 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
9677 
9678 	if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9679 	    ((pptr->port_topology != FC_TOP_FABRIC) &&
9680 	    (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9681 		if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9682 			pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9683 		}
9684 		return;
9685 	}
9686 	mutex_exit(&pptr->port_mutex);
9687 	rval = fcp_do_ns_registry(pptr, s_id);
9688 	mutex_enter(&pptr->port_mutex);
9689 
9690 	if (rval == 0) {
9691 		/* Registry successful. Reset flag */
9692 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9693 	}
9694 }
9695 
9696 /*
9697  * This function registers the ULP with the switch by calling transport i/f
9698  */
9699 static int
fcp_do_ns_registry(struct fcp_port * pptr,uint32_t s_id)9700 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9701 {
9702 	fc_ns_cmd_t		ns_cmd;
9703 	ns_rfc_type_t		rfc;
9704 	uint32_t		types[8];
9705 
9706 	/*
9707 	 * Prepare the Name server structure to
9708 	 * register with the transport in case of
9709 	 * Fabric configuration.
9710 	 */
9711 	bzero(&rfc, sizeof (rfc));
9712 	bzero(types, sizeof (types));
9713 
9714 	types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9715 	    (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9716 
9717 	rfc.rfc_port_id.port_id = s_id;
9718 	bcopy(types, rfc.rfc_types, sizeof (types));
9719 
9720 	ns_cmd.ns_flags = 0;
9721 	ns_cmd.ns_cmd = NS_RFT_ID;
9722 	ns_cmd.ns_req_len = sizeof (rfc);
9723 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
9724 	ns_cmd.ns_resp_len = 0;
9725 	ns_cmd.ns_resp_payload = NULL;
9726 
9727 	/*
9728 	 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9729 	 */
9730 	if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9731 		fcp_log(CE_WARN, pptr->port_dip,
9732 		    "!ns_registry: failed name server registration");
9733 		return (1);
9734 	}
9735 
9736 	return (0);
9737 }
9738 
9739 /*
9740  *     Function: fcp_handle_port_attach
9741  *
9742  *  Description: This function is called from fcp_port_attach() to attach a
9743  *		 new port. This routine does the following:
9744  *
9745  *		1) Allocates an fcp_port structure and initializes it.
9746  *		2) Tries to register the new FC-4 (FCP) capablity with the name
9747  *		   server.
9748  *		3) Kicks off the enumeration of the targets/luns visible
9749  *		   through this new port.  That is done by calling
9750  *		   fcp_statec_callback() if the port is online.
9751  *
9752  *     Argument: ulph		fp/fctl port handle.
9753  *		 *pinfo		Port information.
9754  *		 s_id		Port ID.
9755  *		 instance	Device instance number for the local port
9756  *				(returned by ddi_get_instance()).
9757  *
9758  * Return Value: DDI_SUCCESS
9759  *		 DDI_FAILURE
9760  *
9761  *	Context: User and Kernel context.
9762  */
9763 /*ARGSUSED*/
9764 int
fcp_handle_port_attach(opaque_t ulph,fc_ulp_port_info_t * pinfo,uint32_t s_id,int instance)9765 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9766     uint32_t s_id, int instance)
9767 {
9768 	int			res = DDI_FAILURE;
9769 	scsi_hba_tran_t		*tran;
9770 	int			mutex_initted = FALSE;
9771 	int			hba_attached = FALSE;
9772 	int			soft_state_linked = FALSE;
9773 	int			event_bind = FALSE;
9774 	struct fcp_port		*pptr;
9775 	fc_portmap_t		*tmp_list = NULL;
9776 	uint32_t		max_cnt, alloc_cnt;
9777 	uchar_t			*boot_wwn = NULL;
9778 	uint_t			nbytes;
9779 	int			manual_cfg;
9780 
9781 	/*
9782 	 * this port instance attaching for the first time (or after
9783 	 * being detached before)
9784 	 */
9785 	FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9786 	    FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9787 
9788 	if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9789 		cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9790 		    "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9791 		    instance);
9792 		return (res);
9793 	}
9794 
9795 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9796 		/* this shouldn't happen */
9797 		ddi_soft_state_free(fcp_softstate, instance);
9798 		cmn_err(CE_WARN, "fcp: bad soft state");
9799 		return (res);
9800 	}
9801 
9802 	(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9803 
9804 	/*
9805 	 * Make a copy of ulp_port_info as fctl allocates
9806 	 * a temp struct.
9807 	 */
9808 	(void) fcp_cp_pinfo(pptr, pinfo);
9809 
9810 	/*
9811 	 * Check for manual_configuration_only property.
9812 	 * Enable manual configurtion if the property is
9813 	 * set to 1, otherwise disable manual configuration.
9814 	 */
9815 	if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9816 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9817 	    MANUAL_CFG_ONLY,
9818 	    -1)) != -1) {
9819 		if (manual_cfg == 1) {
9820 			char	*pathname;
9821 			pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9822 			(void) ddi_pathname(pptr->port_dip, pathname);
9823 			cmn_err(CE_NOTE,
9824 			    "%s (%s%d) %s is enabled via %s.conf.",
9825 			    pathname,
9826 			    ddi_driver_name(pptr->port_dip),
9827 			    ddi_get_instance(pptr->port_dip),
9828 			    MANUAL_CFG_ONLY,
9829 			    ddi_driver_name(pptr->port_dip));
9830 			fcp_enable_auto_configuration = 0;
9831 			kmem_free(pathname, MAXPATHLEN);
9832 		}
9833 	}
9834 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9835 	pptr->port_link_cnt = 1;
9836 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9837 	pptr->port_id = s_id;
9838 	pptr->port_instance = instance;
9839 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9840 	pptr->port_state = FCP_STATE_INIT;
9841 	if (pinfo->port_acc_attr == NULL) {
9842 		/*
9843 		 * The corresponding FCA doesn't support DMA at all
9844 		 */
9845 		pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9846 	}
9847 
9848 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9849 
9850 	if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9851 		/*
9852 		 * If FCA supports DMA in SCSI data phase, we need preallocate
9853 		 * dma cookie, so stash the cookie size
9854 		 */
9855 		pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9856 		    pptr->port_data_dma_attr.dma_attr_sgllen;
9857 	}
9858 
9859 	/*
9860 	 * The two mutexes of fcp_port are initialized.	 The variable
9861 	 * mutex_initted is incremented to remember that fact.	That variable
9862 	 * is checked when the routine fails and the mutexes have to be
9863 	 * destroyed.
9864 	 */
9865 	mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9866 	mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9867 	mutex_initted++;
9868 
9869 	/*
9870 	 * The SCSI tran structure is allocate and initialized now.
9871 	 */
9872 	if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9873 		fcp_log(CE_WARN, pptr->port_dip,
9874 		    "!fcp%d: scsi_hba_tran_alloc failed", instance);
9875 		goto fail;
9876 	}
9877 
9878 	/* link in the transport structure then fill it in */
9879 	pptr->port_tran = tran;
9880 	tran->tran_hba_private		= pptr;
9881 	tran->tran_tgt_init		= fcp_scsi_tgt_init;
9882 	tran->tran_tgt_probe		= NULL;
9883 	tran->tran_tgt_free		= fcp_scsi_tgt_free;
9884 	tran->tran_start		= fcp_scsi_start;
9885 	tran->tran_reset		= fcp_scsi_reset;
9886 	tran->tran_abort		= fcp_scsi_abort;
9887 	tran->tran_getcap		= fcp_scsi_getcap;
9888 	tran->tran_setcap		= fcp_scsi_setcap;
9889 	tran->tran_init_pkt		= NULL;
9890 	tran->tran_destroy_pkt		= NULL;
9891 	tran->tran_dmafree		= NULL;
9892 	tran->tran_sync_pkt		= NULL;
9893 	tran->tran_reset_notify		= fcp_scsi_reset_notify;
9894 	tran->tran_get_bus_addr		= fcp_scsi_get_bus_addr;
9895 	tran->tran_get_name		= fcp_scsi_get_name;
9896 	tran->tran_clear_aca		= NULL;
9897 	tran->tran_clear_task_set	= NULL;
9898 	tran->tran_terminate_task	= NULL;
9899 	tran->tran_get_eventcookie	= fcp_scsi_bus_get_eventcookie;
9900 	tran->tran_add_eventcall	= fcp_scsi_bus_add_eventcall;
9901 	tran->tran_remove_eventcall	= fcp_scsi_bus_remove_eventcall;
9902 	tran->tran_post_event		= fcp_scsi_bus_post_event;
9903 	tran->tran_quiesce		= NULL;
9904 	tran->tran_unquiesce		= NULL;
9905 	tran->tran_bus_reset		= NULL;
9906 	tran->tran_bus_config		= fcp_scsi_bus_config;
9907 	tran->tran_bus_unconfig		= fcp_scsi_bus_unconfig;
9908 	tran->tran_bus_power		= NULL;
9909 	tran->tran_interconnect_type	= INTERCONNECT_FABRIC;
9910 
9911 	tran->tran_pkt_constructor	= fcp_kmem_cache_constructor;
9912 	tran->tran_pkt_destructor	= fcp_kmem_cache_destructor;
9913 	tran->tran_setup_pkt		= fcp_pkt_setup;
9914 	tran->tran_teardown_pkt		= fcp_pkt_teardown;
9915 	tran->tran_hba_len		= pptr->port_priv_pkt_len +
9916 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9917 	if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9918 		/*
9919 		 * If FCA don't support DMA, then we use different vectors to
9920 		 * minimize the effects on DMA code flow path
9921 		 */
9922 		tran->tran_start	   = fcp_pseudo_start;
9923 		tran->tran_init_pkt	   = fcp_pseudo_init_pkt;
9924 		tran->tran_destroy_pkt	   = fcp_pseudo_destroy_pkt;
9925 		tran->tran_sync_pkt	   = fcp_pseudo_sync_pkt;
9926 		tran->tran_dmafree	   = fcp_pseudo_dmafree;
9927 		tran->tran_setup_pkt	   = NULL;
9928 		tran->tran_teardown_pkt	   = NULL;
9929 		tran->tran_pkt_constructor = NULL;
9930 		tran->tran_pkt_destructor  = NULL;
9931 		pptr->port_data_dma_attr   = pseudo_fca_dma_attr;
9932 	}
9933 
9934 	/*
9935 	 * Allocate an ndi event handle
9936 	 */
9937 	pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9938 	    kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9939 
9940 	bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9941 	    sizeof (fcp_ndi_event_defs));
9942 
9943 	(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9944 	    &pptr->port_ndi_event_hdl, NDI_SLEEP);
9945 
9946 	pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9947 	pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9948 	pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9949 
9950 	if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9951 	    (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9952 	    &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9953 		goto fail;
9954 	}
9955 	event_bind++;	/* Checked in fail case */
9956 
9957 	if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9958 	    tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9959 	    != DDI_SUCCESS) {
9960 		fcp_log(CE_WARN, pptr->port_dip,
9961 		    "!fcp%d: scsi_hba_attach_setup failed", instance);
9962 		goto fail;
9963 	}
9964 	hba_attached++;	/* Checked in fail case */
9965 
9966 	pptr->port_mpxio = 0;
9967 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9968 	    MDI_SUCCESS) {
9969 		pptr->port_mpxio++;
9970 	}
9971 
9972 	/*
9973 	 * The following code is putting the new port structure in the global
9974 	 * list of ports and, if it is the first port to attach, it start the
9975 	 * fcp_watchdog_tick.
9976 	 *
9977 	 * Why put this new port in the global before we are done attaching it?
9978 	 * We are actually making the structure globally known before we are
9979 	 * done attaching it.  The reason for that is: because of the code that
9980 	 * follows.  At this point the resources to handle the port are
9981 	 * allocated.  This function is now going to do the following:
9982 	 *
9983 	 *   1) It is going to try to register with the name server advertizing
9984 	 *	the new FCP capability of the port.
9985 	 *   2) It is going to play the role of the fp/fctl layer by building
9986 	 *	a list of worlwide names reachable through this port and call
9987 	 *	itself on fcp_statec_callback().  That requires the port to
9988 	 *	be part of the global list.
9989 	 */
9990 	mutex_enter(&fcp_global_mutex);
9991 	if (fcp_port_head == NULL) {
9992 		fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9993 	}
9994 	pptr->port_next = fcp_port_head;
9995 	fcp_port_head = pptr;
9996 	soft_state_linked++;
9997 
9998 	if (fcp_watchdog_init++ == 0) {
9999 		fcp_watchdog_tick = fcp_watchdog_timeout *
10000 		    drv_usectohz(1000000);
10001 		fcp_watchdog_id = timeout(fcp_watch, NULL,
10002 		    fcp_watchdog_tick);
10003 	}
10004 	mutex_exit(&fcp_global_mutex);
10005 
10006 	/*
10007 	 * Here an attempt is made to register with the name server, the new
10008 	 * FCP capability.  That is done using an RTF_ID to the name server.
10009 	 * It is done synchronously.  The function fcp_do_ns_registry()
10010 	 * doesn't return till the name server responded.
10011 	 * On failures, just ignore it for now and it will get retried during
10012 	 * state change callbacks. We'll set a flag to show this failure
10013 	 */
10014 	if (fcp_do_ns_registry(pptr, s_id)) {
10015 		mutex_enter(&pptr->port_mutex);
10016 		pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10017 		mutex_exit(&pptr->port_mutex);
10018 	} else {
10019 		mutex_enter(&pptr->port_mutex);
10020 		pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10021 		mutex_exit(&pptr->port_mutex);
10022 	}
10023 
10024 	/*
10025 	 * Lookup for boot WWN property
10026 	 */
10027 	if (modrootloaded != 1) {
10028 		if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10029 		    ddi_get_parent(pinfo->port_dip),
10030 		    DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10031 		    &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10032 		    (nbytes == FC_WWN_SIZE)) {
10033 			bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10034 		}
10035 		if (boot_wwn) {
10036 			ddi_prop_free(boot_wwn);
10037 		}
10038 	}
10039 
10040 	/*
10041 	 * Handle various topologies and link states.
10042 	 */
10043 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10044 	case FC_STATE_OFFLINE:
10045 
10046 		/*
10047 		 * we're attaching a port where the link is offline
10048 		 *
10049 		 * Wait for ONLINE, at which time a state
10050 		 * change will cause a statec_callback
10051 		 *
10052 		 * in the mean time, do not do anything
10053 		 */
10054 		res = DDI_SUCCESS;
10055 		pptr->port_state |= FCP_STATE_OFFLINE;
10056 		break;
10057 
10058 	case FC_STATE_ONLINE: {
10059 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
10060 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10061 			res = DDI_SUCCESS;
10062 			break;
10063 		}
10064 		/*
10065 		 * discover devices and create nodes (a private
10066 		 * loop or point-to-point)
10067 		 */
10068 		ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10069 
10070 		/*
10071 		 * At this point we are going to build a list of all the ports
10072 		 * that	can be reached through this local port.	 It looks like
10073 		 * we cannot handle more than FCP_MAX_DEVICES per local port
10074 		 * (128).
10075 		 */
10076 		if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10077 		    sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10078 		    KM_NOSLEEP)) == NULL) {
10079 			fcp_log(CE_WARN, pptr->port_dip,
10080 			    "!fcp%d: failed to allocate portmap",
10081 			    instance);
10082 			goto fail;
10083 		}
10084 
10085 		/*
10086 		 * fc_ulp_getportmap() is going to provide us with the list of
10087 		 * remote ports in the buffer we just allocated.  The way the
10088 		 * list is going to be retrieved depends on the topology.
10089 		 * However, if we are connected to a Fabric, a name server
10090 		 * request may be sent to get the list of FCP capable ports.
10091 		 * It should be noted that is the case the request is
10092 		 * synchronous.	 This means we are stuck here till the name
10093 		 * server replies.  A lot of things can change during that time
10094 		 * and including, may be, being called on
10095 		 * fcp_statec_callback() for different reasons. I'm not sure
10096 		 * the code can handle that.
10097 		 */
10098 		max_cnt = FCP_MAX_DEVICES;
10099 		alloc_cnt = FCP_MAX_DEVICES;
10100 		if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10101 		    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10102 		    FC_SUCCESS) {
10103 			caddr_t msg;
10104 
10105 			(void) fc_ulp_error(res, &msg);
10106 
10107 			/*
10108 			 * this	 just means the transport is
10109 			 * busy perhaps building a portmap so,
10110 			 * for now, succeed this port attach
10111 			 * when the transport has a new map,
10112 			 * it'll send us a state change then
10113 			 */
10114 			fcp_log(CE_WARN, pptr->port_dip,
10115 			    "!failed to get port map : %s", msg);
10116 
10117 			res = DDI_SUCCESS;
10118 			break;	/* go return result */
10119 		}
10120 		if (max_cnt > alloc_cnt) {
10121 			alloc_cnt = max_cnt;
10122 		}
10123 
10124 		/*
10125 		 * We are now going to call fcp_statec_callback() ourselves.
10126 		 * By issuing this call we are trying to kick off the enumera-
10127 		 * tion process.
10128 		 */
10129 		/*
10130 		 * let the state change callback do the SCSI device
10131 		 * discovery and create the devinfos
10132 		 */
10133 		fcp_statec_callback(ulph, pptr->port_fp_handle,
10134 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
10135 		    max_cnt, pptr->port_id);
10136 
10137 		res = DDI_SUCCESS;
10138 		break;
10139 	}
10140 
10141 	default:
10142 		/* unknown port state */
10143 		fcp_log(CE_WARN, pptr->port_dip,
10144 		    "!fcp%d: invalid port state at attach=0x%x",
10145 		    instance, pptr->port_phys_state);
10146 
10147 		mutex_enter(&pptr->port_mutex);
10148 		pptr->port_phys_state = FCP_STATE_OFFLINE;
10149 		mutex_exit(&pptr->port_mutex);
10150 
10151 		res = DDI_SUCCESS;
10152 		break;
10153 	}
10154 
10155 	/* free temp list if used */
10156 	if (tmp_list != NULL) {
10157 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10158 	}
10159 
10160 	/* note the attach time */
10161 	pptr->port_attach_time = ddi_get_lbolt64();
10162 
10163 	/* all done */
10164 	return (res);
10165 
10166 	/* a failure we have to clean up after */
10167 fail:
10168 	fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10169 
10170 	if (soft_state_linked) {
10171 		/* remove this fcp_port from the linked list */
10172 		(void) fcp_soft_state_unlink(pptr);
10173 	}
10174 
10175 	/* unbind and free event set */
10176 	if (pptr->port_ndi_event_hdl) {
10177 		if (event_bind) {
10178 			(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10179 			    &pptr->port_ndi_events, NDI_SLEEP);
10180 		}
10181 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10182 	}
10183 
10184 	if (pptr->port_ndi_event_defs) {
10185 		(void) kmem_free(pptr->port_ndi_event_defs,
10186 		    sizeof (fcp_ndi_event_defs));
10187 	}
10188 
10189 	/*
10190 	 * Clean up mpxio stuff
10191 	 */
10192 	if (pptr->port_mpxio) {
10193 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10194 		pptr->port_mpxio--;
10195 	}
10196 
10197 	/* undo SCSI HBA setup */
10198 	if (hba_attached) {
10199 		(void) scsi_hba_detach(pptr->port_dip);
10200 	}
10201 	if (pptr->port_tran != NULL) {
10202 		scsi_hba_tran_free(pptr->port_tran);
10203 	}
10204 
10205 	mutex_enter(&fcp_global_mutex);
10206 
10207 	/*
10208 	 * We check soft_state_linked, because it is incremented right before
10209 	 * we call increment fcp_watchdog_init.	 Therefore, we know if
10210 	 * soft_state_linked is still FALSE, we do not want to decrement
10211 	 * fcp_watchdog_init or possibly call untimeout.
10212 	 */
10213 
10214 	if (soft_state_linked) {
10215 		if (--fcp_watchdog_init == 0) {
10216 			timeout_id_t	tid = fcp_watchdog_id;
10217 
10218 			mutex_exit(&fcp_global_mutex);
10219 			(void) untimeout(tid);
10220 		} else {
10221 			mutex_exit(&fcp_global_mutex);
10222 		}
10223 	} else {
10224 		mutex_exit(&fcp_global_mutex);
10225 	}
10226 
10227 	if (mutex_initted) {
10228 		mutex_destroy(&pptr->port_mutex);
10229 		mutex_destroy(&pptr->port_pkt_mutex);
10230 	}
10231 
10232 	if (tmp_list != NULL) {
10233 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10234 	}
10235 
10236 	/* this makes pptr invalid */
10237 	ddi_soft_state_free(fcp_softstate, instance);
10238 
10239 	return (DDI_FAILURE);
10240 }
10241 
10242 
10243 static int
fcp_handle_port_detach(struct fcp_port * pptr,int flag,int instance)10244 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10245 {
10246 	int count = 0;
10247 
10248 	mutex_enter(&pptr->port_mutex);
10249 
10250 	/*
10251 	 * if the port is powered down or suspended, nothing else
10252 	 * to do; just return.
10253 	 */
10254 	if (flag != FCP_STATE_DETACHING) {
10255 		if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10256 		    FCP_STATE_SUSPENDED)) {
10257 			pptr->port_state |= flag;
10258 			mutex_exit(&pptr->port_mutex);
10259 			return (FC_SUCCESS);
10260 		}
10261 	}
10262 
10263 	if (pptr->port_state & FCP_STATE_IN_MDI) {
10264 		mutex_exit(&pptr->port_mutex);
10265 		return (FC_FAILURE);
10266 	}
10267 
10268 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
10269 	    fcp_trace, FCP_BUF_LEVEL_2, 0,
10270 	    "fcp_handle_port_detach: port is detaching");
10271 
10272 	pptr->port_state |= flag;
10273 
10274 	/*
10275 	 * Wait for any ongoing reconfig/ipkt to complete, that
10276 	 * ensures the freeing to targets/luns is safe.
10277 	 * No more ref to this port should happen from statec/ioctl
10278 	 * after that as it was removed from the global port list.
10279 	 */
10280 	while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10281 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10282 		/*
10283 		 * Let's give sufficient time for reconfig/ipkt
10284 		 * to complete.
10285 		 */
10286 		if (count++ >= FCP_ICMD_DEADLINE) {
10287 			break;
10288 		}
10289 		mutex_exit(&pptr->port_mutex);
10290 		delay(drv_usectohz(1000000));
10291 		mutex_enter(&pptr->port_mutex);
10292 	}
10293 
10294 	/*
10295 	 * if the driver is still busy then fail to
10296 	 * suspend/power down.
10297 	 */
10298 	if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10299 	    (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10300 		pptr->port_state &= ~flag;
10301 		mutex_exit(&pptr->port_mutex);
10302 		return (FC_FAILURE);
10303 	}
10304 
10305 	if (flag == FCP_STATE_DETACHING) {
10306 		pptr = fcp_soft_state_unlink(pptr);
10307 		ASSERT(pptr != NULL);
10308 	}
10309 
10310 	pptr->port_link_cnt++;
10311 	pptr->port_state |= FCP_STATE_OFFLINE;
10312 	pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10313 
10314 	fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10315 	    FCP_CAUSE_LINK_DOWN);
10316 	mutex_exit(&pptr->port_mutex);
10317 
10318 	/* kill watch dog timer if we're the last */
10319 	mutex_enter(&fcp_global_mutex);
10320 	if (--fcp_watchdog_init == 0) {
10321 		timeout_id_t	tid = fcp_watchdog_id;
10322 		mutex_exit(&fcp_global_mutex);
10323 		(void) untimeout(tid);
10324 	} else {
10325 		mutex_exit(&fcp_global_mutex);
10326 	}
10327 
10328 	/* clean up the port structures */
10329 	if (flag == FCP_STATE_DETACHING) {
10330 		fcp_cleanup_port(pptr, instance);
10331 	}
10332 
10333 	return (FC_SUCCESS);
10334 }
10335 
10336 
10337 static void
fcp_cleanup_port(struct fcp_port * pptr,int instance)10338 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10339 {
10340 	ASSERT(pptr != NULL);
10341 
10342 	/* unbind and free event set */
10343 	if (pptr->port_ndi_event_hdl) {
10344 		(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10345 		    &pptr->port_ndi_events, NDI_SLEEP);
10346 		(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10347 	}
10348 
10349 	if (pptr->port_ndi_event_defs) {
10350 		(void) kmem_free(pptr->port_ndi_event_defs,
10351 		    sizeof (fcp_ndi_event_defs));
10352 	}
10353 
10354 	/* free the lun/target structures and devinfos */
10355 	fcp_free_targets(pptr);
10356 
10357 	/*
10358 	 * Clean up mpxio stuff
10359 	 */
10360 	if (pptr->port_mpxio) {
10361 		(void) mdi_phci_unregister(pptr->port_dip, 0);
10362 		pptr->port_mpxio--;
10363 	}
10364 
10365 	/* clean up SCSA stuff */
10366 	(void) scsi_hba_detach(pptr->port_dip);
10367 	if (pptr->port_tran != NULL) {
10368 		scsi_hba_tran_free(pptr->port_tran);
10369 	}
10370 
10371 #ifdef	KSTATS_CODE
10372 	/* clean up kstats */
10373 	if (pptr->fcp_ksp != NULL) {
10374 		kstat_delete(pptr->fcp_ksp);
10375 	}
10376 #endif
10377 
10378 	/* clean up soft state mutexes/condition variables */
10379 	mutex_destroy(&pptr->port_mutex);
10380 	mutex_destroy(&pptr->port_pkt_mutex);
10381 
10382 	/* all done with soft state */
10383 	ddi_soft_state_free(fcp_softstate, instance);
10384 }
10385 
10386 /*
10387  *     Function: fcp_kmem_cache_constructor
10388  *
10389  *  Description: This function allocates and initializes the resources required
10390  *		 to build a scsi_pkt structure the target driver.  The result
10391  *		 of the allocation and initialization will be cached in the
10392  *		 memory cache.	As DMA resources may be allocated here, that
10393  *		 means DMA resources will be tied up in the cache manager.
10394  *		 This is a tradeoff that has been made for performance reasons.
10395  *
10396  *     Argument: *buf		Memory to preinitialize.
10397  *		 *arg		FCP port structure (fcp_port).
10398  *		 kmflags	Value passed to kmem_cache_alloc() and
10399  *				propagated to the constructor.
10400  *
10401  * Return Value: 0	Allocation/Initialization was successful.
10402  *		 -1	Allocation or Initialization failed.
10403  *
10404  *
10405  * If the returned value is 0, the buffer is initialized like this:
10406  *
10407  *		    +================================+
10408  *	     +----> |	      struct scsi_pkt	     |
10409  *	     |	    |				     |
10410  *	     | +--- | pkt_ha_private		     |
10411  *	     | |    |				     |
10412  *	     | |    +================================+
10413  *	     | |
10414  *	     | |    +================================+
10415  *	     | +--> |	    struct fcp_pkt	     | <---------+
10416  *	     |	    |				     |		 |
10417  *	     +----- | cmd_pkt			     |		 |
10418  *		    |			  cmd_fp_pkt | ---+	 |
10419  *	  +-------->| cmd_fcp_rsp[]		     |	  |	 |
10420  *	  |    +--->| cmd_fcp_cmd[]		     |	  |	 |
10421  *	  |    |    |--------------------------------|	  |	 |
10422  *	  |    |    |	      struct fc_packet	     | <--+	 |
10423  *	  |    |    |				     |		 |
10424  *	  |    |    |		     pkt_ulp_private | ----------+
10425  *	  |    |    |		     pkt_fca_private | -----+
10426  *	  |    |    |		     pkt_data_cookie | ---+ |
10427  *	  |    |    | pkt_cmdlen		     |	  | |
10428  *	  |    |(a) | pkt_rsplen		     |	  | |
10429  *	  |    +----| .......... pkt_cmd ........... | ---|-|---------------+
10430  *	  |	(b) |		      pkt_cmd_cookie | ---|-|----------+    |
10431  *	  +---------| .......... pkt_resp .......... | ---|-|------+   |    |
10432  *		    |		     pkt_resp_cookie | ---|-|--+   |   |    |
10433  *		    | pkt_cmd_dma		     |	  | |  |   |   |    |
10434  *		    | pkt_cmd_acc		     |	  | |  |   |   |    |
10435  *		    +================================+	  | |  |   |   |    |
10436  *		    |	      dma_cookies	     | <--+ |  |   |   |    |
10437  *		    |				     |	    |  |   |   |    |
10438  *		    +================================+	    |  |   |   |    |
10439  *		    |	      fca_private	     | <----+  |   |   |    |
10440  *		    |				     |	       |   |   |    |
10441  *		    +================================+	       |   |   |    |
10442  *							       |   |   |    |
10443  *							       |   |   |    |
10444  *		    +================================+	 (d)   |   |   |    |
10445  *		    |	     fcp_resp cookies	     | <-------+   |   |    |
10446  *		    |				     |		   |   |    |
10447  *		    +================================+		   |   |    |
10448  *								   |   |    |
10449  *		    +================================+	 (d)	   |   |    |
10450  *		    |		fcp_resp	     | <-----------+   |    |
10451  *		    |	(DMA resources associated)   |		       |    |
10452  *		    +================================+		       |    |
10453  *								       |    |
10454  *								       |    |
10455  *								       |    |
10456  *		    +================================+	 (c)	       |    |
10457  *		    |	     fcp_cmd cookies	     | <---------------+    |
10458  *		    |				     |			    |
10459  *		    +================================+			    |
10460  *									    |
10461  *		    +================================+	 (c)		    |
10462  *		    |		 fcp_cmd	     | <--------------------+
10463  *		    |	(DMA resources associated)   |
10464  *		    +================================+
10465  *
10466  * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10467  * (b) Only if DMA is NOT used for the FCP_RESP buffer
10468  * (c) Only if DMA is used for the FCP_CMD buffer.
10469  * (d) Only if DMA is used for the FCP_RESP buffer
10470  */
10471 static int
fcp_kmem_cache_constructor(struct scsi_pkt * pkt,scsi_hba_tran_t * tran,int kmflags)10472 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10473     int kmflags)
10474 {
10475 	struct fcp_pkt	*cmd;
10476 	struct fcp_port	*pptr;
10477 	fc_packet_t	*fpkt;
10478 
10479 	pptr = (struct fcp_port *)tran->tran_hba_private;
10480 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10481 	bzero(cmd, tran->tran_hba_len);
10482 
10483 	cmd->cmd_pkt = pkt;
10484 	pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10485 	fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10486 	cmd->cmd_fp_pkt = fpkt;
10487 
10488 	cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10489 	cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10490 	cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10491 	    sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10492 
10493 	fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10494 	    sizeof (struct fcp_pkt));
10495 
10496 	fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10497 	fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10498 
10499 	if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10500 		/*
10501 		 * The underlying HBA doesn't want to DMA the fcp_cmd or
10502 		 * fcp_resp.  The transfer of information will be done by
10503 		 * bcopy.
10504 		 * The naming of the flags (that is actually a value) is
10505 		 * unfortunate.	 FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10506 		 * DMA" but instead "NO DMA".
10507 		 */
10508 		fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10509 		fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10510 		fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10511 	} else {
10512 		/*
10513 		 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10514 		 * buffer.  A buffer is allocated for each one the ddi_dma_*
10515 		 * interfaces.
10516 		 */
10517 		if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10518 			return (-1);
10519 		}
10520 	}
10521 
10522 	return (0);
10523 }
10524 
10525 /*
10526  *     Function: fcp_kmem_cache_destructor
10527  *
10528  *  Description: Called by the destructor of the cache managed by SCSA.
10529  *		 All the resources pre-allocated in fcp_pkt_constructor
10530  *		 and the data also pre-initialized in fcp_pkt_constructor
10531  *		 are freed and uninitialized here.
10532  *
10533  *     Argument: *buf		Memory to uninitialize.
10534  *		 *arg		FCP port structure (fcp_port).
10535  *
10536  * Return Value: None
10537  *
10538  *	Context: kernel
10539  */
10540 static void
fcp_kmem_cache_destructor(struct scsi_pkt * pkt,scsi_hba_tran_t * tran)10541 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10542 {
10543 	struct fcp_pkt	*cmd;
10544 	struct fcp_port	*pptr;
10545 
10546 	pptr = (struct fcp_port *)(tran->tran_hba_private);
10547 	cmd = pkt->pkt_ha_private;
10548 
10549 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10550 		/*
10551 		 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10552 		 * buffer and DMA resources allocated to do so are released.
10553 		 */
10554 		fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10555 	}
10556 }
10557 
10558 /*
10559  *     Function: fcp_alloc_cmd_resp
10560  *
10561  *  Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10562  *		 will be DMAed by the HBA.  The buffer is allocated applying
10563  *		 the DMA requirements for the HBA.  The buffers allocated will
10564  *		 also be bound.	 DMA resources are allocated in the process.
10565  *		 They will be released by fcp_free_cmd_resp().
10566  *
10567  *     Argument: *pptr	FCP port.
10568  *		 *fpkt	fc packet for which the cmd and resp packet should be
10569  *			allocated.
10570  *		 flags	Allocation flags.
10571  *
10572  * Return Value: FC_FAILURE
10573  *		 FC_SUCCESS
10574  *
10575  *	Context: User or Kernel context only if flags == KM_SLEEP.
10576  *		 Interrupt context if the KM_SLEEP is not specified.
10577  */
10578 static int
fcp_alloc_cmd_resp(struct fcp_port * pptr,fc_packet_t * fpkt,int flags)10579 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10580 {
10581 	int			rval;
10582 	int			cmd_len;
10583 	int			resp_len;
10584 	ulong_t			real_len;
10585 	int			(*cb) (caddr_t);
10586 	ddi_dma_cookie_t	pkt_cookie;
10587 	ddi_dma_cookie_t	*cp;
10588 	uint32_t		cnt;
10589 
10590 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10591 
10592 	cmd_len = fpkt->pkt_cmdlen;
10593 	resp_len = fpkt->pkt_rsplen;
10594 
10595 	ASSERT(fpkt->pkt_cmd_dma == NULL);
10596 
10597 	/* Allocation of a DMA handle used in subsequent calls. */
10598 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10599 	    cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10600 		return (FC_FAILURE);
10601 	}
10602 
10603 	/* A buffer is allocated that satisfies the DMA requirements. */
10604 	rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10605 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10606 	    (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10607 
10608 	if (rval != DDI_SUCCESS) {
10609 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10610 		return (FC_FAILURE);
10611 	}
10612 
10613 	if (real_len < cmd_len) {
10614 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10615 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10616 		return (FC_FAILURE);
10617 	}
10618 
10619 	/* The buffer allocated is DMA bound. */
10620 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10621 	    fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10622 	    cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10623 
10624 	if (rval != DDI_DMA_MAPPED) {
10625 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10626 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10627 		return (FC_FAILURE);
10628 	}
10629 
10630 	if (fpkt->pkt_cmd_cookie_cnt >
10631 	    pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10632 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10633 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10634 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10635 		return (FC_FAILURE);
10636 	}
10637 
10638 	ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10639 
10640 	/*
10641 	 * The buffer where the scatter/gather list is going to be built is
10642 	 * allocated.
10643 	 */
10644 	cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10645 	    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10646 	    KM_NOSLEEP);
10647 
10648 	if (cp == NULL) {
10649 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10650 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10651 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10652 		return (FC_FAILURE);
10653 	}
10654 
10655 	/*
10656 	 * The scatter/gather list for the buffer we just allocated is built
10657 	 * here.
10658 	 */
10659 	*cp = pkt_cookie;
10660 	cp++;
10661 
10662 	for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10663 		ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10664 		    &pkt_cookie);
10665 		*cp = pkt_cookie;
10666 	}
10667 
10668 	ASSERT(fpkt->pkt_resp_dma == NULL);
10669 	if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10670 	    cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10671 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10672 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10673 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10674 		return (FC_FAILURE);
10675 	}
10676 
10677 	rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10678 	    &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10679 	    (caddr_t *)&fpkt->pkt_resp, &real_len,
10680 	    &fpkt->pkt_resp_acc);
10681 
10682 	if (rval != DDI_SUCCESS) {
10683 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10684 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10685 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10686 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10687 		kmem_free(fpkt->pkt_cmd_cookie,
10688 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10689 		return (FC_FAILURE);
10690 	}
10691 
10692 	if (real_len < resp_len) {
10693 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10694 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10695 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10696 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10697 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10698 		kmem_free(fpkt->pkt_cmd_cookie,
10699 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10700 		return (FC_FAILURE);
10701 	}
10702 
10703 	rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10704 	    fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10705 	    cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10706 
10707 	if (rval != DDI_DMA_MAPPED) {
10708 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10709 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10710 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10711 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10712 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10713 		kmem_free(fpkt->pkt_cmd_cookie,
10714 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10715 		return (FC_FAILURE);
10716 	}
10717 
10718 	if (fpkt->pkt_resp_cookie_cnt >
10719 	    pptr->port_resp_dma_attr.dma_attr_sgllen) {
10720 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10721 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10722 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10723 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10724 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10725 		kmem_free(fpkt->pkt_cmd_cookie,
10726 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10727 		return (FC_FAILURE);
10728 	}
10729 
10730 	ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10731 
10732 	cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10733 	    fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10734 	    KM_NOSLEEP);
10735 
10736 	if (cp == NULL) {
10737 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10738 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10739 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10740 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10741 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10742 		kmem_free(fpkt->pkt_cmd_cookie,
10743 		    fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10744 		return (FC_FAILURE);
10745 	}
10746 
10747 	*cp = pkt_cookie;
10748 	cp++;
10749 
10750 	for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10751 		ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10752 		    &pkt_cookie);
10753 		*cp = pkt_cookie;
10754 	}
10755 
10756 	return (FC_SUCCESS);
10757 }
10758 
10759 /*
10760  *     Function: fcp_free_cmd_resp
10761  *
10762  *  Description: This function releases the FCP_CMD and FCP_RESP buffer
10763  *		 allocated by fcp_alloc_cmd_resp() and all the resources
10764  *		 associated with them.	That includes the DMA resources and the
10765  *		 buffer allocated for the cookies of each one of them.
10766  *
10767  *     Argument: *pptr		FCP port context.
10768  *		 *fpkt		fc packet containing the cmd and resp packet
10769  *				to be released.
10770  *
10771  * Return Value: None
10772  *
10773  *	Context: Interrupt, User and Kernel context.
10774  */
10775 /* ARGSUSED */
10776 static void
fcp_free_cmd_resp(struct fcp_port * pptr,fc_packet_t * fpkt)10777 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10778 {
10779 	ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10780 
10781 	if (fpkt->pkt_resp_dma) {
10782 		(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10783 		ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10784 		ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10785 	}
10786 
10787 	if (fpkt->pkt_resp_cookie) {
10788 		kmem_free(fpkt->pkt_resp_cookie,
10789 		    fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10790 		fpkt->pkt_resp_cookie = NULL;
10791 	}
10792 
10793 	if (fpkt->pkt_cmd_dma) {
10794 		(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10795 		ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10796 		ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10797 	}
10798 
10799 	if (fpkt->pkt_cmd_cookie) {
10800 		kmem_free(fpkt->pkt_cmd_cookie,
10801 		    fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10802 		fpkt->pkt_cmd_cookie = NULL;
10803 	}
10804 }
10805 
10806 
10807 /*
10808  * called by the transport to do our own target initialization
10809  *
10810  * can acquire and release the global mutex
10811  */
10812 /* ARGSUSED */
10813 static int
fcp_phys_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)10814 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10815     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10816 {
10817 	uchar_t			*bytes;
10818 	uint_t			nbytes;
10819 	uint16_t		lun_num;
10820 	struct fcp_tgt	*ptgt;
10821 	struct fcp_lun	*plun;
10822 	struct fcp_port	*pptr = (struct fcp_port *)
10823 	    hba_tran->tran_hba_private;
10824 
10825 	ASSERT(pptr != NULL);
10826 
10827 	FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10828 	    FCP_BUF_LEVEL_8, 0,
10829 	    "fcp_phys_tgt_init: called for %s (instance %d)",
10830 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10831 
10832 	/* get our port WWN property */
10833 	bytes = NULL;
10834 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10835 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10836 	    (nbytes != FC_WWN_SIZE)) {
10837 		/* no port WWN property */
10838 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10839 		    FCP_BUF_LEVEL_8, 0,
10840 		    "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10841 		    " for %s (instance %d): bytes=%p nbytes=%x",
10842 		    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10843 		    nbytes);
10844 
10845 		if (bytes != NULL) {
10846 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10847 		}
10848 
10849 		return (DDI_NOT_WELL_FORMED);
10850 	}
10851 	ASSERT(bytes != NULL);
10852 
10853 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10854 	    LUN_PROP, 0xFFFF);
10855 	if (lun_num == 0xFFFF) {
10856 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10857 		    FCP_BUF_LEVEL_8, 0,
10858 		    "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10859 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10860 		    ddi_get_instance(tgt_dip));
10861 
10862 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10863 		return (DDI_NOT_WELL_FORMED);
10864 	}
10865 
10866 	mutex_enter(&pptr->port_mutex);
10867 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10868 		mutex_exit(&pptr->port_mutex);
10869 		FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10870 		    FCP_BUF_LEVEL_8, 0,
10871 		    "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10872 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10873 		    ddi_get_instance(tgt_dip));
10874 
10875 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10876 		return (DDI_FAILURE);
10877 	}
10878 
10879 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10880 	    FC_WWN_SIZE) == 0);
10881 	ASSERT(plun->lun_num == lun_num);
10882 
10883 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10884 
10885 	ptgt = plun->lun_tgt;
10886 
10887 	mutex_enter(&ptgt->tgt_mutex);
10888 	plun->lun_tgt_count++;
10889 	scsi_device_hba_private_set(sd, plun);
10890 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10891 	plun->lun_sd = sd;
10892 	mutex_exit(&ptgt->tgt_mutex);
10893 	mutex_exit(&pptr->port_mutex);
10894 
10895 	return (DDI_SUCCESS);
10896 }
10897 
10898 /*ARGSUSED*/
10899 static int
fcp_virt_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)10900 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10901     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10902 {
10903 	uchar_t			*bytes;
10904 	uint_t			nbytes;
10905 	uint16_t		lun_num;
10906 	struct fcp_tgt	*ptgt;
10907 	struct fcp_lun	*plun;
10908 	struct fcp_port	*pptr = (struct fcp_port *)
10909 	    hba_tran->tran_hba_private;
10910 	child_info_t		*cip;
10911 
10912 	ASSERT(pptr != NULL);
10913 
10914 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10915 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
10916 	    "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10917 	    " (tgt_dip %p)", ddi_get_name(tgt_dip),
10918 	    ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10919 
10920 	cip = (child_info_t *)sd->sd_pathinfo;
10921 	if (cip == NULL) {
10922 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10923 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10924 		    "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10925 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10926 		    ddi_get_instance(tgt_dip));
10927 
10928 		return (DDI_NOT_WELL_FORMED);
10929 	}
10930 
10931 	/* get our port WWN property */
10932 	bytes = NULL;
10933 	if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10934 	    PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10935 	    (nbytes != FC_WWN_SIZE)) {
10936 		if (bytes) {
10937 			scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10938 		}
10939 		return (DDI_NOT_WELL_FORMED);
10940 	}
10941 
10942 	ASSERT(bytes != NULL);
10943 
10944 	lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10945 	    LUN_PROP, 0xFFFF);
10946 	if (lun_num == 0xFFFF) {
10947 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10948 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10949 		    "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10950 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10951 		    ddi_get_instance(tgt_dip));
10952 
10953 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10954 		return (DDI_NOT_WELL_FORMED);
10955 	}
10956 
10957 	mutex_enter(&pptr->port_mutex);
10958 	if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10959 		mutex_exit(&pptr->port_mutex);
10960 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10961 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
10962 		    "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10963 		    " for %s (instance %d)", ddi_get_name(tgt_dip),
10964 		    ddi_get_instance(tgt_dip));
10965 
10966 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10967 		return (DDI_FAILURE);
10968 	}
10969 
10970 	ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10971 	    FC_WWN_SIZE) == 0);
10972 	ASSERT(plun->lun_num == lun_num);
10973 
10974 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10975 
10976 	ptgt = plun->lun_tgt;
10977 
10978 	mutex_enter(&ptgt->tgt_mutex);
10979 	plun->lun_tgt_count++;
10980 	scsi_device_hba_private_set(sd, plun);
10981 	plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10982 	plun->lun_sd = sd;
10983 	mutex_exit(&ptgt->tgt_mutex);
10984 	mutex_exit(&pptr->port_mutex);
10985 
10986 	return (DDI_SUCCESS);
10987 }
10988 
10989 
10990 /*
10991  * called by the transport to do our own target initialization
10992  *
10993  * can acquire and release the global mutex
10994  */
10995 /* ARGSUSED */
10996 static int
fcp_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)10997 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10998     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10999 {
11000 	struct fcp_port	*pptr = (struct fcp_port *)
11001 	    hba_tran->tran_hba_private;
11002 	int			rval;
11003 
11004 	ASSERT(pptr != NULL);
11005 
11006 	/*
11007 	 * Child node is getting initialized.  Look at the mpxio component
11008 	 * type on the child device to see if this device is mpxio managed
11009 	 * or not.
11010 	 */
11011 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11012 		rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11013 	} else {
11014 		rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11015 	}
11016 
11017 	return (rval);
11018 }
11019 
11020 
11021 /* ARGSUSED */
11022 static void
fcp_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)11023 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11024     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11025 {
11026 	struct fcp_lun	*plun = scsi_device_hba_private_get(sd);
11027 	struct fcp_tgt	*ptgt;
11028 
11029 	FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11030 	    fcp_trace, FCP_BUF_LEVEL_8, 0,
11031 	    "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11032 	    ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11033 	    ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11034 
11035 	if (plun == NULL) {
11036 		return;
11037 	}
11038 	ptgt = plun->lun_tgt;
11039 
11040 	ASSERT(ptgt != NULL);
11041 
11042 	mutex_enter(&ptgt->tgt_mutex);
11043 	ASSERT(plun->lun_tgt_count > 0);
11044 
11045 	if (--plun->lun_tgt_count == 0) {
11046 		plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11047 	}
11048 	plun->lun_sd = NULL;
11049 	mutex_exit(&ptgt->tgt_mutex);
11050 }
11051 
11052 /*
11053  *     Function: fcp_scsi_start
11054  *
11055  *  Description: This function is called by the target driver to request a
11056  *		 command to be sent.
11057  *
11058  *     Argument: *ap		SCSI address of the device.
11059  *		 *pkt		SCSI packet containing the cmd to send.
11060  *
11061  * Return Value: TRAN_ACCEPT
11062  *		 TRAN_BUSY
11063  *		 TRAN_BADPKT
11064  *		 TRAN_FATAL_ERROR
11065  */
11066 static int
fcp_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)11067 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11068 {
11069 	struct fcp_port	*pptr = ADDR2FCP(ap);
11070 	struct fcp_lun	*plun = ADDR2LUN(ap);
11071 	struct fcp_pkt	*cmd = PKT2CMD(pkt);
11072 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11073 	int			rval;
11074 
11075 	/* ensure command isn't already issued */
11076 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11077 
11078 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11079 	    fcp_trace, FCP_BUF_LEVEL_9, 0,
11080 	    "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11081 
11082 	/*
11083 	 * It is strange that we enter the fcp_port mutex and the target
11084 	 * mutex to check the lun state (which has a mutex of its own).
11085 	 */
11086 	mutex_enter(&pptr->port_mutex);
11087 	mutex_enter(&ptgt->tgt_mutex);
11088 
11089 	/*
11090 	 * If the device is offline and is not in the process of coming
11091 	 * online, fail the request.
11092 	 */
11093 
11094 	if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11095 	    !(plun->lun_state & FCP_LUN_ONLINING)) {
11096 		mutex_exit(&ptgt->tgt_mutex);
11097 		mutex_exit(&pptr->port_mutex);
11098 
11099 		if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11100 			pkt->pkt_reason = CMD_DEV_GONE;
11101 		}
11102 
11103 		return (TRAN_FATAL_ERROR);
11104 	}
11105 	cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11106 
11107 	/*
11108 	 * If we are suspended, kernel is trying to dump, so don't
11109 	 * block, fail or defer requests - send them down right away.
11110 	 * NOTE: If we are in panic (i.e. trying to dump), we can't
11111 	 * assume we have been suspended.  There is hardware such as
11112 	 * the v880 that doesn't do PM.	 Thus, the check for
11113 	 * ddi_in_panic.
11114 	 *
11115 	 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11116 	 * of changing.	 So, if we can queue the packet, do it.	 Eventually,
11117 	 * either the device will have gone away or changed and we can fail
11118 	 * the request, or we can proceed if the device didn't change.
11119 	 *
11120 	 * If the pd in the target or the packet is NULL it's probably
11121 	 * because the device has gone away, we allow the request to be
11122 	 * put on the internal queue here in case the device comes back within
11123 	 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11124 	 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11125 	 * could be NULL because the device was disappearing during or since
11126 	 * packet initialization.
11127 	 */
11128 
11129 	if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11130 	    FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11131 	    (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11132 	    (ptgt->tgt_pd_handle == NULL) ||
11133 	    (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11134 		/*
11135 		 * If ((LUN is busy AND
11136 		 *	LUN not suspended AND
11137 		 *	The system is not in panic state) OR
11138 		 *	(The port is coming up))
11139 		 *
11140 		 * We check to see if the any of the flags FLAG_NOINTR or
11141 		 * FLAG_NOQUEUE is set.	 If one of them is set the value
11142 		 * returned will be TRAN_BUSY.	If not, the request is queued.
11143 		 */
11144 		mutex_exit(&ptgt->tgt_mutex);
11145 		mutex_exit(&pptr->port_mutex);
11146 
11147 		/* see if using interrupts is allowed (so queueing'll work) */
11148 		if (pkt->pkt_flags & FLAG_NOINTR) {
11149 			pkt->pkt_resid = 0;
11150 			return (TRAN_BUSY);
11151 		}
11152 		if (pkt->pkt_flags & FLAG_NOQUEUE) {
11153 			FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11154 			    fcp_trace, FCP_BUF_LEVEL_9, 0,
11155 			    "fcp_scsi_start: lun busy for pkt %p", pkt);
11156 			return (TRAN_BUSY);
11157 		}
11158 #ifdef	DEBUG
11159 		mutex_enter(&pptr->port_pkt_mutex);
11160 		pptr->port_npkts++;
11161 		mutex_exit(&pptr->port_pkt_mutex);
11162 #endif /* DEBUG */
11163 
11164 		/* got queue up the pkt for later */
11165 		fcp_queue_pkt(pptr, cmd);
11166 		return (TRAN_ACCEPT);
11167 	}
11168 	cmd->cmd_state = FCP_PKT_ISSUED;
11169 
11170 	mutex_exit(&ptgt->tgt_mutex);
11171 	mutex_exit(&pptr->port_mutex);
11172 
11173 	/*
11174 	 * Now that we released the mutexes, what was protected by them can
11175 	 * change.
11176 	 */
11177 
11178 	/*
11179 	 * If there is a reconfiguration in progress, wait for it to complete.
11180 	 */
11181 	fcp_reconfig_wait(pptr);
11182 
11183 	cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11184 	    pkt->pkt_time : 0;
11185 
11186 	/* prepare the packet */
11187 
11188 	fcp_prepare_pkt(pptr, cmd, plun);
11189 
11190 	if (cmd->cmd_pkt->pkt_time) {
11191 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11192 	} else {
11193 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11194 	}
11195 
11196 	/*
11197 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
11198 	 * have to do polled I/O
11199 	 */
11200 	if (pkt->pkt_flags & FLAG_NOINTR) {
11201 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
11202 		return (fcp_dopoll(pptr, cmd));
11203 	}
11204 
11205 #ifdef	DEBUG
11206 	mutex_enter(&pptr->port_pkt_mutex);
11207 	pptr->port_npkts++;
11208 	mutex_exit(&pptr->port_pkt_mutex);
11209 #endif /* DEBUG */
11210 
11211 	rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11212 	if (rval == FC_SUCCESS) {
11213 		FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11214 		    fcp_trace, FCP_BUF_LEVEL_9, 0,
11215 		    "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11216 		return (TRAN_ACCEPT);
11217 	}
11218 
11219 	cmd->cmd_state = FCP_PKT_IDLE;
11220 
11221 #ifdef	DEBUG
11222 	mutex_enter(&pptr->port_pkt_mutex);
11223 	pptr->port_npkts--;
11224 	mutex_exit(&pptr->port_pkt_mutex);
11225 #endif /* DEBUG */
11226 
11227 	/*
11228 	 * For lack of clearer definitions, choose
11229 	 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11230 	 */
11231 
11232 	if (rval == FC_TRAN_BUSY) {
11233 		pkt->pkt_resid = 0;
11234 		rval = TRAN_BUSY;
11235 	} else {
11236 		mutex_enter(&ptgt->tgt_mutex);
11237 		if (plun->lun_state & FCP_LUN_OFFLINE) {
11238 			child_info_t	*cip;
11239 
11240 			mutex_enter(&plun->lun_mutex);
11241 			cip = plun->lun_cip;
11242 			mutex_exit(&plun->lun_mutex);
11243 
11244 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11245 			    fcp_trace, FCP_BUF_LEVEL_6, 0,
11246 			    "fcp_transport failed 2 for %x: %x; dip=%p",
11247 			    plun->lun_tgt->tgt_d_id, rval, cip);
11248 
11249 			rval = TRAN_FATAL_ERROR;
11250 		} else {
11251 			if (pkt->pkt_flags & FLAG_NOQUEUE) {
11252 				FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11253 				    fcp_trace, FCP_BUF_LEVEL_9, 0,
11254 				    "fcp_scsi_start: FC_BUSY for pkt %p",
11255 				    pkt);
11256 				rval = TRAN_BUSY;
11257 			} else {
11258 				rval = TRAN_ACCEPT;
11259 				fcp_queue_pkt(pptr, cmd);
11260 			}
11261 		}
11262 		mutex_exit(&ptgt->tgt_mutex);
11263 	}
11264 
11265 	return (rval);
11266 }
11267 
11268 /*
11269  * called by the transport to abort a packet
11270  */
11271 /*ARGSUSED*/
11272 static int
fcp_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)11273 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11274 {
11275 	int tgt_cnt;
11276 	struct fcp_port		*pptr = ADDR2FCP(ap);
11277 	struct fcp_lun	*plun = ADDR2LUN(ap);
11278 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11279 
11280 	if (pkt == NULL) {
11281 		if (ptgt) {
11282 			mutex_enter(&ptgt->tgt_mutex);
11283 			tgt_cnt = ptgt->tgt_change_cnt;
11284 			mutex_exit(&ptgt->tgt_mutex);
11285 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11286 			return (TRUE);
11287 		}
11288 	}
11289 	return (FALSE);
11290 }
11291 
11292 
11293 /*
11294  * Perform reset
11295  */
11296 int
fcp_scsi_reset(struct scsi_address * ap,int level)11297 fcp_scsi_reset(struct scsi_address *ap, int level)
11298 {
11299 	int			rval = 0;
11300 	struct fcp_port		*pptr = ADDR2FCP(ap);
11301 	struct fcp_lun	*plun = ADDR2LUN(ap);
11302 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11303 
11304 	if (level == RESET_ALL) {
11305 		if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11306 			rval = 1;
11307 		}
11308 	} else if (level == RESET_TARGET || level == RESET_LUN) {
11309 		/*
11310 		 * If we are in the middle of discovery, return
11311 		 * SUCCESS as this target will be rediscovered
11312 		 * anyway
11313 		 */
11314 		mutex_enter(&ptgt->tgt_mutex);
11315 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11316 			mutex_exit(&ptgt->tgt_mutex);
11317 			return (1);
11318 		}
11319 		mutex_exit(&ptgt->tgt_mutex);
11320 
11321 		if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11322 			rval = 1;
11323 		}
11324 	}
11325 	return (rval);
11326 }
11327 
11328 
11329 /*
11330  * called by the framework to get a SCSI capability
11331  */
11332 static int
fcp_scsi_getcap(struct scsi_address * ap,char * cap,int whom)11333 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11334 {
11335 	return (fcp_commoncap(ap, cap, 0, whom, 0));
11336 }
11337 
11338 
11339 /*
11340  * called by the framework to set a SCSI capability
11341  */
11342 static int
fcp_scsi_setcap(struct scsi_address * ap,char * cap,int value,int whom)11343 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11344 {
11345 	return (fcp_commoncap(ap, cap, value, whom, 1));
11346 }
11347 
11348 /*
11349  *     Function: fcp_pkt_setup
11350  *
11351  *  Description: This function sets up the scsi_pkt structure passed by the
11352  *		 caller. This function assumes fcp_pkt_constructor has been
11353  *		 called previously for the packet passed by the caller.	 If
11354  *		 successful this call will have the following results:
11355  *
11356  *		   - The resources needed that will be constant through out
11357  *		     the whole transaction are allocated.
11358  *		   - The fields that will be constant through out the whole
11359  *		     transaction are initialized.
11360  *		   - The scsi packet will be linked to the LUN structure
11361  *		     addressed by the transaction.
11362  *
11363  *     Argument:
11364  *		 *pkt		Pointer to a scsi_pkt structure.
11365  *		 callback
11366  *		 arg
11367  *
11368  * Return Value: 0	Success
11369  *		 !0	Failure
11370  *
11371  *	Context: Kernel context or interrupt context
11372  */
11373 /* ARGSUSED */
11374 static int
fcp_pkt_setup(struct scsi_pkt * pkt,int (* callback)(caddr_t arg),caddr_t arg)11375 fcp_pkt_setup(struct scsi_pkt *pkt,
11376     int (*callback)(caddr_t arg),
11377     caddr_t arg)
11378 {
11379 	struct fcp_pkt	*cmd;
11380 	struct fcp_port	*pptr;
11381 	struct fcp_lun	*plun;
11382 	struct fcp_tgt	*ptgt;
11383 	int		kf;
11384 	fc_packet_t	*fpkt;
11385 	fc_frame_hdr_t	*hp;
11386 
11387 	pptr = ADDR2FCP(&pkt->pkt_address);
11388 	plun = ADDR2LUN(&pkt->pkt_address);
11389 	ptgt = plun->lun_tgt;
11390 
11391 	cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11392 	fpkt = cmd->cmd_fp_pkt;
11393 
11394 	/*
11395 	 * this request is for dma allocation only
11396 	 */
11397 	/*
11398 	 * First step of fcp_scsi_init_pkt: pkt allocation
11399 	 * We determine if the caller is willing to wait for the
11400 	 * resources.
11401 	 */
11402 	kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11403 
11404 	/*
11405 	 * Selective zeroing of the pkt.
11406 	 */
11407 	cmd->cmd_back = NULL;
11408 	cmd->cmd_next = NULL;
11409 
11410 	/*
11411 	 * Zero out fcp command
11412 	 */
11413 	bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11414 
11415 	cmd->cmd_state = FCP_PKT_IDLE;
11416 
11417 	fpkt = cmd->cmd_fp_pkt;
11418 	fpkt->pkt_data_acc = NULL;
11419 
11420 	/*
11421 	 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11422 	 * could be destroyed.	We need fail pkt_setup.
11423 	 */
11424 	if (pptr->port_state & FCP_STATE_OFFLINE) {
11425 		return (-1);
11426 	}
11427 
11428 	mutex_enter(&ptgt->tgt_mutex);
11429 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
11430 
11431 	if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11432 	    != FC_SUCCESS) {
11433 		mutex_exit(&ptgt->tgt_mutex);
11434 		return (-1);
11435 	}
11436 
11437 	mutex_exit(&ptgt->tgt_mutex);
11438 
11439 	/* Fill in the Fabric Channel Header */
11440 	hp = &fpkt->pkt_cmd_fhdr;
11441 	hp->r_ctl = R_CTL_COMMAND;
11442 	hp->rsvd = 0;
11443 	hp->type = FC_TYPE_SCSI_FCP;
11444 	hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11445 	hp->seq_id = 0;
11446 	hp->df_ctl  = 0;
11447 	hp->seq_cnt = 0;
11448 	hp->ox_id = 0xffff;
11449 	hp->rx_id = 0xffff;
11450 	hp->ro = 0;
11451 
11452 	/*
11453 	 * A doubly linked list (cmd_forw, cmd_back) is built
11454 	 * out of every allocated packet on a per-lun basis
11455 	 *
11456 	 * The packets are maintained in the list so as to satisfy
11457 	 * scsi_abort() requests. At present (which is unlikely to
11458 	 * change in the future) nobody performs a real scsi_abort
11459 	 * in the SCSI target drivers (as they don't keep the packets
11460 	 * after doing scsi_transport - so they don't know how to
11461 	 * abort a packet other than sending a NULL to abort all
11462 	 * outstanding packets)
11463 	 */
11464 	mutex_enter(&plun->lun_mutex);
11465 	if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11466 		plun->lun_pkt_head->cmd_back = cmd;
11467 	} else {
11468 		plun->lun_pkt_tail = cmd;
11469 	}
11470 	plun->lun_pkt_head = cmd;
11471 	mutex_exit(&plun->lun_mutex);
11472 	return (0);
11473 }
11474 
11475 /*
11476  *     Function: fcp_pkt_teardown
11477  *
11478  *  Description: This function releases a scsi_pkt structure and all the
11479  *		 resources attached to it.
11480  *
11481  *     Argument: *pkt		Pointer to a scsi_pkt structure.
11482  *
11483  * Return Value: None
11484  *
11485  *	Context: User, Kernel or Interrupt context.
11486  */
11487 static void
fcp_pkt_teardown(struct scsi_pkt * pkt)11488 fcp_pkt_teardown(struct scsi_pkt *pkt)
11489 {
11490 	struct fcp_port	*pptr = ADDR2FCP(&pkt->pkt_address);
11491 	struct fcp_lun	*plun = ADDR2LUN(&pkt->pkt_address);
11492 	struct fcp_pkt	*cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11493 
11494 	/*
11495 	 * Remove the packet from the per-lun list
11496 	 */
11497 	mutex_enter(&plun->lun_mutex);
11498 	if (cmd->cmd_back) {
11499 		ASSERT(cmd != plun->lun_pkt_head);
11500 		cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11501 	} else {
11502 		ASSERT(cmd == plun->lun_pkt_head);
11503 		plun->lun_pkt_head = cmd->cmd_forw;
11504 	}
11505 
11506 	if (cmd->cmd_forw) {
11507 		cmd->cmd_forw->cmd_back = cmd->cmd_back;
11508 	} else {
11509 		ASSERT(cmd == plun->lun_pkt_tail);
11510 		plun->lun_pkt_tail = cmd->cmd_back;
11511 	}
11512 
11513 	mutex_exit(&plun->lun_mutex);
11514 
11515 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11516 }
11517 
11518 /*
11519  * Routine for reset notification setup, to register or cancel.
11520  * This function is called by SCSA
11521  */
11522 /*ARGSUSED*/
11523 static int
fcp_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)11524 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11525     void (*callback)(caddr_t), caddr_t arg)
11526 {
11527 	struct fcp_port *pptr = ADDR2FCP(ap);
11528 
11529 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11530 	    &pptr->port_mutex, &pptr->port_reset_notify_listf));
11531 }
11532 
11533 
11534 static int
fcp_scsi_bus_get_eventcookie(dev_info_t * dip,dev_info_t * rdip,char * name,ddi_eventcookie_t * event_cookiep)11535 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11536     ddi_eventcookie_t *event_cookiep)
11537 {
11538 	struct fcp_port *pptr = fcp_dip2port(dip);
11539 
11540 	if (pptr == NULL) {
11541 		return (DDI_FAILURE);
11542 	}
11543 
11544 	return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11545 	    event_cookiep, NDI_EVENT_NOPASS));
11546 }
11547 
11548 
11549 static int
fcp_scsi_bus_add_eventcall(dev_info_t * dip,dev_info_t * rdip,ddi_eventcookie_t eventid,void (* callback)(),void * arg,ddi_callback_id_t * cb_id)11550 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11551     ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11552     ddi_callback_id_t *cb_id)
11553 {
11554 	struct fcp_port *pptr = fcp_dip2port(dip);
11555 
11556 	if (pptr == NULL) {
11557 		return (DDI_FAILURE);
11558 	}
11559 
11560 	return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11561 	    eventid, callback, arg, NDI_SLEEP, cb_id));
11562 }
11563 
11564 
11565 static int
fcp_scsi_bus_remove_eventcall(dev_info_t * dip,ddi_callback_id_t cb_id)11566 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11567 {
11568 
11569 	struct fcp_port *pptr = fcp_dip2port(dip);
11570 
11571 	if (pptr == NULL) {
11572 		return (DDI_FAILURE);
11573 	}
11574 	return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11575 }
11576 
11577 
11578 /*
11579  * called by the transport to post an event
11580  */
11581 static int
fcp_scsi_bus_post_event(dev_info_t * dip,dev_info_t * rdip,ddi_eventcookie_t eventid,void * impldata)11582 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11583     ddi_eventcookie_t eventid, void *impldata)
11584 {
11585 	struct fcp_port *pptr = fcp_dip2port(dip);
11586 
11587 	if (pptr == NULL) {
11588 		return (DDI_FAILURE);
11589 	}
11590 
11591 	return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11592 	    eventid, impldata));
11593 }
11594 
11595 
11596 /*
11597  * A target in in many cases in Fibre Channel has a one to one relation
11598  * with a port identifier (which is also known as D_ID and also as AL_PA
11599  * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11600  * will most likely result in resetting all LUNs (which means a reset will
11601  * occur on all the SCSI devices connected at the other end of the bridge)
11602  * That is the latest favorite topic for discussion, for, one can debate as
11603  * hot as one likes and come up with arguably a best solution to one's
11604  * satisfaction
11605  *
11606  * To stay on track and not digress much, here are the problems stated
11607  * briefly:
11608  *
11609  *	SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11610  *	target drivers use RESET_TARGET even if their instance is on a
11611  *	LUN. Doesn't that sound a bit broken ?
11612  *
11613  *	FCP SCSI (the current spec) only defines RESET TARGET in the
11614  *	control fields of an FCP_CMND structure. It should have been
11615  *	fixed right there, giving flexibility to the initiators to
11616  *	minimize havoc that could be caused by resetting a target.
11617  */
11618 static int
fcp_reset_target(struct scsi_address * ap,int level)11619 fcp_reset_target(struct scsi_address *ap, int level)
11620 {
11621 	int			rval = FC_FAILURE;
11622 	char			lun_id[25];
11623 	struct fcp_port		*pptr = ADDR2FCP(ap);
11624 	struct fcp_lun	*plun = ADDR2LUN(ap);
11625 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11626 	struct scsi_pkt		*pkt;
11627 	struct fcp_pkt	*cmd;
11628 	struct fcp_rsp		*rsp;
11629 	uint32_t		tgt_cnt;
11630 	struct fcp_rsp_info	*rsp_info;
11631 	struct fcp_reset_elem	*p;
11632 	int			bval;
11633 
11634 	if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11635 	    KM_NOSLEEP)) == NULL) {
11636 		return (rval);
11637 	}
11638 
11639 	mutex_enter(&ptgt->tgt_mutex);
11640 	if (level == RESET_TARGET) {
11641 		if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11642 			mutex_exit(&ptgt->tgt_mutex);
11643 			kmem_free(p, sizeof (struct fcp_reset_elem));
11644 			return (rval);
11645 		}
11646 		fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11647 		(void) strcpy(lun_id, " ");
11648 	} else {
11649 		if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11650 			mutex_exit(&ptgt->tgt_mutex);
11651 			kmem_free(p, sizeof (struct fcp_reset_elem));
11652 			return (rval);
11653 		}
11654 		fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11655 
11656 		(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11657 	}
11658 	tgt_cnt = ptgt->tgt_change_cnt;
11659 
11660 	mutex_exit(&ptgt->tgt_mutex);
11661 
11662 	if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11663 	    0, 0, NULL, 0)) == NULL) {
11664 		kmem_free(p, sizeof (struct fcp_reset_elem));
11665 		mutex_enter(&ptgt->tgt_mutex);
11666 		fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11667 		mutex_exit(&ptgt->tgt_mutex);
11668 		return (rval);
11669 	}
11670 	pkt->pkt_time = FCP_POLL_TIMEOUT;
11671 
11672 	/* fill in cmd part of packet */
11673 	cmd = PKT2CMD(pkt);
11674 	if (level == RESET_TARGET) {
11675 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11676 	} else {
11677 		cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11678 	}
11679 	cmd->cmd_fp_pkt->pkt_comp = NULL;
11680 	cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11681 
11682 	/* prepare a packet for transport */
11683 	fcp_prepare_pkt(pptr, cmd, plun);
11684 
11685 	if (cmd->cmd_pkt->pkt_time) {
11686 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11687 	} else {
11688 		cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11689 	}
11690 
11691 	(void) fc_ulp_busy_port(pptr->port_fp_handle);
11692 	bval = fcp_dopoll(pptr, cmd);
11693 	fc_ulp_idle_port(pptr->port_fp_handle);
11694 
11695 	/* submit the packet */
11696 	if (bval == TRAN_ACCEPT) {
11697 		int error = 3;
11698 
11699 		rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11700 		rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11701 		    sizeof (struct fcp_rsp));
11702 
11703 		if (rsp->fcp_u.fcp_status.rsp_len_set) {
11704 			if (fcp_validate_fcp_response(rsp, pptr) ==
11705 			    FC_SUCCESS) {
11706 				if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11707 					FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11708 					    sizeof (struct fcp_rsp), rsp_info,
11709 					    cmd->cmd_fp_pkt->pkt_resp_acc,
11710 					    sizeof (struct fcp_rsp_info));
11711 				}
11712 				if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11713 					rval = FC_SUCCESS;
11714 					error = 0;
11715 				} else {
11716 					error = 1;
11717 				}
11718 			} else {
11719 				error = 2;
11720 			}
11721 		}
11722 
11723 		switch (error) {
11724 		case 0:
11725 			fcp_log(CE_WARN, pptr->port_dip,
11726 			    "!FCP: WWN 0x%08x%08x %s reset successfully",
11727 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11728 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11729 			break;
11730 
11731 		case 1:
11732 			fcp_log(CE_WARN, pptr->port_dip,
11733 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed,"
11734 			    " response code=%x",
11735 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11736 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11737 			    rsp_info->rsp_code);
11738 			break;
11739 
11740 		case 2:
11741 			fcp_log(CE_WARN, pptr->port_dip,
11742 			    "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11743 			    " Bad FCP response values: rsvd1=%x,"
11744 			    " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11745 			    " rsplen=%x, senselen=%x",
11746 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11747 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11748 			    rsp->reserved_0, rsp->reserved_1,
11749 			    rsp->fcp_u.fcp_status.reserved_0,
11750 			    rsp->fcp_u.fcp_status.reserved_1,
11751 			    rsp->fcp_response_len, rsp->fcp_sense_len);
11752 			break;
11753 
11754 		default:
11755 			fcp_log(CE_WARN, pptr->port_dip,
11756 			    "!FCP: Reset to WWN	 0x%08x%08x %s failed",
11757 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11758 			    *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11759 			break;
11760 		}
11761 	}
11762 	scsi_destroy_pkt(pkt);
11763 
11764 	if (rval == FC_FAILURE) {
11765 		mutex_enter(&ptgt->tgt_mutex);
11766 		if (level == RESET_TARGET) {
11767 			fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11768 		} else {
11769 			fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11770 		}
11771 		mutex_exit(&ptgt->tgt_mutex);
11772 		kmem_free(p, sizeof (struct fcp_reset_elem));
11773 		return (rval);
11774 	}
11775 
11776 	mutex_enter(&pptr->port_mutex);
11777 	if (level == RESET_TARGET) {
11778 		p->tgt = ptgt;
11779 		p->lun = NULL;
11780 	} else {
11781 		p->tgt = NULL;
11782 		p->lun = plun;
11783 	}
11784 	p->tgt = ptgt;
11785 	p->tgt_cnt = tgt_cnt;
11786 	p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11787 	p->next = pptr->port_reset_list;
11788 	pptr->port_reset_list = p;
11789 
11790 	FCP_TRACE(fcp_logq, pptr->port_instbuf,
11791 	    fcp_trace, FCP_BUF_LEVEL_3, 0,
11792 	    "Notify ssd of the reset to reinstate the reservations");
11793 
11794 	scsi_hba_reset_notify_callback(&pptr->port_mutex,
11795 	    &pptr->port_reset_notify_listf);
11796 
11797 	mutex_exit(&pptr->port_mutex);
11798 
11799 	return (rval);
11800 }
11801 
11802 
11803 /*
11804  * called by fcp_getcap and fcp_setcap to get and set (respectively)
11805  * SCSI capabilities
11806  */
11807 /* ARGSUSED */
11808 static int
fcp_commoncap(struct scsi_address * ap,char * cap,int val,int tgtonly,int doset)11809 fcp_commoncap(struct scsi_address *ap, char *cap,
11810     int val, int tgtonly, int doset)
11811 {
11812 	struct fcp_port		*pptr = ADDR2FCP(ap);
11813 	struct fcp_lun	*plun = ADDR2LUN(ap);
11814 	struct fcp_tgt	*ptgt = plun->lun_tgt;
11815 	int			cidx;
11816 	int			rval = FALSE;
11817 
11818 	if (cap == (char *)0) {
11819 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11820 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
11821 		    "fcp_commoncap: invalid arg");
11822 		return (rval);
11823 	}
11824 
11825 	if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11826 		return (UNDEFINED);
11827 	}
11828 
11829 	/*
11830 	 * Process setcap request.
11831 	 */
11832 	if (doset) {
11833 		/*
11834 		 * At present, we can only set binary (0/1) values
11835 		 */
11836 		switch (cidx) {
11837 		case SCSI_CAP_ARQ:
11838 			if (val == 0) {
11839 				rval = FALSE;
11840 			} else {
11841 				rval = TRUE;
11842 			}
11843 			break;
11844 
11845 		case SCSI_CAP_LUN_RESET:
11846 			if (val) {
11847 				plun->lun_cap |= FCP_LUN_CAP_RESET;
11848 			} else {
11849 				plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11850 			}
11851 			rval = TRUE;
11852 			break;
11853 
11854 		case SCSI_CAP_SECTOR_SIZE:
11855 			rval = TRUE;
11856 			break;
11857 		default:
11858 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11859 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11860 			    "fcp_setcap: unsupported %d", cidx);
11861 			rval = UNDEFINED;
11862 			break;
11863 		}
11864 
11865 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11866 		    fcp_trace, FCP_BUF_LEVEL_5, 0,
11867 		    "set cap: cap=%s, val/tgtonly/doset/rval = "
11868 		    "0x%x/0x%x/0x%x/%d",
11869 		    cap, val, tgtonly, doset, rval);
11870 
11871 	} else {
11872 		/*
11873 		 * Process getcap request.
11874 		 */
11875 		switch (cidx) {
11876 		case SCSI_CAP_DMA_MAX:
11877 			rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11878 
11879 			/*
11880 			 * Need to make an adjustment qlc is uint_t 64
11881 			 * st is int, so we will make the adjustment here
11882 			 * being as nobody wants to touch this.
11883 			 * It still leaves the max single block length
11884 			 * of 2 gig. This should last .
11885 			 */
11886 
11887 			if (rval == -1) {
11888 				rval = MAX_INT_DMA;
11889 			}
11890 
11891 			break;
11892 
11893 		case SCSI_CAP_INITIATOR_ID:
11894 			rval = pptr->port_id;
11895 			break;
11896 
11897 		case SCSI_CAP_ARQ:
11898 		case SCSI_CAP_RESET_NOTIFICATION:
11899 		case SCSI_CAP_TAGGED_QING:
11900 			rval = TRUE;
11901 			break;
11902 
11903 		case SCSI_CAP_SCSI_VERSION:
11904 			rval = 3;
11905 			break;
11906 
11907 		case SCSI_CAP_INTERCONNECT_TYPE:
11908 			if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11909 			    (ptgt->tgt_hard_addr == 0)) {
11910 				rval = INTERCONNECT_FABRIC;
11911 			} else {
11912 				rval = INTERCONNECT_FIBRE;
11913 			}
11914 			break;
11915 
11916 		case SCSI_CAP_LUN_RESET:
11917 			rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11918 			    TRUE : FALSE;
11919 			break;
11920 
11921 		default:
11922 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
11923 			    fcp_trace, FCP_BUF_LEVEL_4, 0,
11924 			    "fcp_getcap: unsupported %d", cidx);
11925 			rval = UNDEFINED;
11926 			break;
11927 		}
11928 
11929 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
11930 		    fcp_trace, FCP_BUF_LEVEL_8, 0,
11931 		    "get cap: cap=%s, val/tgtonly/doset/rval = "
11932 		    "0x%x/0x%x/0x%x/%d",
11933 		    cap, val, tgtonly, doset, rval);
11934 	}
11935 
11936 	return (rval);
11937 }
11938 
11939 /*
11940  * called by the transport to get the port-wwn and lun
11941  * properties of this device, and to create a "name" based on them
11942  *
11943  * these properties don't exist on sun4m
11944  *
11945  * return 1 for success else return 0
11946  */
11947 /* ARGSUSED */
11948 static int
fcp_scsi_get_name(struct scsi_device * sd,char * name,int len)11949 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11950 {
11951 	int			i;
11952 	int			*lun;
11953 	int			numChars;
11954 	uint_t			nlun;
11955 	uint_t			count;
11956 	uint_t			nbytes;
11957 	uchar_t			*bytes;
11958 	uint16_t		lun_num;
11959 	uint32_t		tgt_id;
11960 	char			**conf_wwn;
11961 	char			tbuf[(FC_WWN_SIZE << 1) + 1];
11962 	uchar_t			barray[FC_WWN_SIZE];
11963 	dev_info_t		*tgt_dip;
11964 	struct fcp_tgt	*ptgt;
11965 	struct fcp_port	*pptr;
11966 	struct fcp_lun	*plun;
11967 
11968 	ASSERT(sd != NULL);
11969 	ASSERT(name != NULL);
11970 
11971 	tgt_dip = sd->sd_dev;
11972 	pptr = ddi_get_soft_state(fcp_softstate,
11973 	    ddi_get_instance(ddi_get_parent(tgt_dip)));
11974 	if (pptr == NULL) {
11975 		return (0);
11976 	}
11977 
11978 	ASSERT(tgt_dip != NULL);
11979 
11980 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11981 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11982 	    LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11983 		name[0] = '\0';
11984 		return (0);
11985 	}
11986 
11987 	if (nlun == 0) {
11988 		ddi_prop_free(lun);
11989 		return (0);
11990 	}
11991 
11992 	lun_num = lun[0];
11993 	ddi_prop_free(lun);
11994 
11995 	/*
11996 	 * Lookup for .conf WWN property
11997 	 */
11998 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11999 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12000 	    &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12001 		ASSERT(count >= 1);
12002 
12003 		fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12004 		ddi_prop_free(conf_wwn);
12005 		mutex_enter(&pptr->port_mutex);
12006 		if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12007 			mutex_exit(&pptr->port_mutex);
12008 			return (0);
12009 		}
12010 		ptgt = plun->lun_tgt;
12011 		mutex_exit(&pptr->port_mutex);
12012 
12013 		(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12014 		    tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12015 
12016 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12017 		    ptgt->tgt_hard_addr != 0) {
12018 			tgt_id = (uint32_t)fcp_alpa_to_switch[
12019 			    ptgt->tgt_hard_addr];
12020 		} else {
12021 			tgt_id = ptgt->tgt_d_id;
12022 		}
12023 
12024 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12025 		    TARGET_PROP, tgt_id);
12026 	}
12027 
12028 	/* get the our port-wwn property */
12029 	bytes = NULL;
12030 	if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12031 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12032 	    &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12033 		if (bytes != NULL) {
12034 			ddi_prop_free(bytes);
12035 		}
12036 		return (0);
12037 	}
12038 
12039 	for (i = 0; i < FC_WWN_SIZE; i++) {
12040 		(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12041 	}
12042 
12043 	/* Stick in the address of the form "wWWN,LUN" */
12044 	numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12045 
12046 	ASSERT(numChars < len);
12047 	if (numChars >= len) {
12048 		fcp_log(CE_WARN, pptr->port_dip,
12049 		    "!fcp_scsi_get_name: "
12050 		    "name parameter length too small, it needs to be %d",
12051 		    numChars+1);
12052 	}
12053 
12054 	ddi_prop_free(bytes);
12055 
12056 	return (1);
12057 }
12058 
12059 
12060 /*
12061  * called by the transport to get the SCSI target id value, returning
12062  * it in "name"
12063  *
12064  * this isn't needed/used on sun4m
12065  *
12066  * return 1 for success else return 0
12067  */
12068 /* ARGSUSED */
12069 static int
fcp_scsi_get_bus_addr(struct scsi_device * sd,char * name,int len)12070 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12071 {
12072 	struct fcp_lun	*plun = ADDR2LUN(&sd->sd_address);
12073 	struct fcp_tgt	*ptgt;
12074 	int    numChars;
12075 
12076 	if (plun == NULL) {
12077 		return (0);
12078 	}
12079 
12080 	if ((ptgt = plun->lun_tgt) == NULL) {
12081 		return (0);
12082 	}
12083 
12084 	numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12085 
12086 	ASSERT(numChars < len);
12087 	if (numChars >= len) {
12088 		fcp_log(CE_WARN, NULL,
12089 		    "!fcp_scsi_get_bus_addr: "
12090 		    "name parameter length too small, it needs to be %d",
12091 		    numChars+1);
12092 	}
12093 
12094 	return (1);
12095 }
12096 
12097 
12098 /*
12099  * called internally to reset the link where the specified port lives
12100  */
12101 static int
fcp_linkreset(struct fcp_port * pptr,struct scsi_address * ap,int sleep)12102 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12103 {
12104 	la_wwn_t		wwn;
12105 	struct fcp_lun	*plun;
12106 	struct fcp_tgt	*ptgt;
12107 
12108 	/* disable restart of lip if we're suspended */
12109 	mutex_enter(&pptr->port_mutex);
12110 
12111 	if (pptr->port_state & (FCP_STATE_SUSPENDED |
12112 	    FCP_STATE_POWER_DOWN)) {
12113 		mutex_exit(&pptr->port_mutex);
12114 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
12115 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
12116 		    "fcp_linkreset, fcp%d: link reset "
12117 		    "disabled due to DDI_SUSPEND",
12118 		    ddi_get_instance(pptr->port_dip));
12119 		return (FC_FAILURE);
12120 	}
12121 
12122 	if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12123 		mutex_exit(&pptr->port_mutex);
12124 		return (FC_SUCCESS);
12125 	}
12126 
12127 	FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12128 	    fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12129 
12130 	/*
12131 	 * If ap == NULL assume local link reset.
12132 	 */
12133 	if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12134 		plun = ADDR2LUN(ap);
12135 		ptgt = plun->lun_tgt;
12136 		bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12137 	} else {
12138 		bzero((caddr_t)&wwn, sizeof (wwn));
12139 	}
12140 	mutex_exit(&pptr->port_mutex);
12141 
12142 	return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12143 }
12144 
12145 
12146 /*
12147  * called from fcp_port_attach() to resume a port
12148  * return DDI_* success/failure status
12149  * acquires and releases the global mutex
12150  * acquires and releases the port mutex
12151  */
12152 /*ARGSUSED*/
12153 
12154 static int
fcp_handle_port_resume(opaque_t ulph,fc_ulp_port_info_t * pinfo,uint32_t s_id,fc_attach_cmd_t cmd,int instance)12155 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12156     uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12157 {
12158 	int			res = DDI_FAILURE; /* default result */
12159 	struct fcp_port	*pptr;		/* port state ptr */
12160 	uint32_t		alloc_cnt;
12161 	uint32_t		max_cnt;
12162 	fc_portmap_t		*tmp_list = NULL;
12163 
12164 	FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12165 	    FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12166 	    instance);
12167 
12168 	if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12169 		cmn_err(CE_WARN, "fcp: bad soft state");
12170 		return (res);
12171 	}
12172 
12173 	mutex_enter(&pptr->port_mutex);
12174 	switch (cmd) {
12175 	case FC_CMD_RESUME:
12176 		ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12177 		pptr->port_state &= ~FCP_STATE_SUSPENDED;
12178 		break;
12179 
12180 	case FC_CMD_POWER_UP:
12181 		/*
12182 		 * If the port is DDI_SUSPENded, defer rediscovery
12183 		 * until DDI_RESUME occurs
12184 		 */
12185 		if (pptr->port_state & FCP_STATE_SUSPENDED) {
12186 			pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12187 			mutex_exit(&pptr->port_mutex);
12188 			return (DDI_SUCCESS);
12189 		}
12190 		pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12191 	}
12192 	pptr->port_id = s_id;
12193 	pptr->port_state = FCP_STATE_INIT;
12194 	mutex_exit(&pptr->port_mutex);
12195 
12196 	/*
12197 	 * Make a copy of ulp_port_info as fctl allocates
12198 	 * a temp struct.
12199 	 */
12200 	(void) fcp_cp_pinfo(pptr, pinfo);
12201 
12202 	mutex_enter(&fcp_global_mutex);
12203 	if (fcp_watchdog_init++ == 0) {
12204 		fcp_watchdog_tick = fcp_watchdog_timeout *
12205 		    drv_usectohz(1000000);
12206 		fcp_watchdog_id = timeout(fcp_watch,
12207 		    NULL, fcp_watchdog_tick);
12208 	}
12209 	mutex_exit(&fcp_global_mutex);
12210 
12211 	/*
12212 	 * Handle various topologies and link states.
12213 	 */
12214 	switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12215 	case FC_STATE_OFFLINE:
12216 		/*
12217 		 * Wait for ONLINE, at which time a state
12218 		 * change will cause a statec_callback
12219 		 */
12220 		res = DDI_SUCCESS;
12221 		break;
12222 
12223 	case FC_STATE_ONLINE:
12224 
12225 		if (pptr->port_topology == FC_TOP_UNKNOWN) {
12226 			(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12227 			res = DDI_SUCCESS;
12228 			break;
12229 		}
12230 
12231 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12232 		    !fcp_enable_auto_configuration) {
12233 			tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12234 			if (tmp_list == NULL) {
12235 				if (!alloc_cnt) {
12236 					res = DDI_SUCCESS;
12237 				}
12238 				break;
12239 			}
12240 			max_cnt = alloc_cnt;
12241 		} else {
12242 			ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12243 
12244 			alloc_cnt = FCP_MAX_DEVICES;
12245 
12246 			if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12247 			    (sizeof (fc_portmap_t)) * alloc_cnt,
12248 			    KM_NOSLEEP)) == NULL) {
12249 				fcp_log(CE_WARN, pptr->port_dip,
12250 				    "!fcp%d: failed to allocate portmap",
12251 				    instance);
12252 				break;
12253 			}
12254 
12255 			max_cnt = alloc_cnt;
12256 			if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12257 			    &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12258 			    FC_SUCCESS) {
12259 				caddr_t msg;
12260 
12261 				(void) fc_ulp_error(res, &msg);
12262 
12263 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
12264 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
12265 				    "resume failed getportmap: reason=0x%x",
12266 				    res);
12267 
12268 				fcp_log(CE_WARN, pptr->port_dip,
12269 				    "!failed to get port map : %s", msg);
12270 				break;
12271 			}
12272 			if (max_cnt > alloc_cnt) {
12273 				alloc_cnt = max_cnt;
12274 			}
12275 		}
12276 
12277 		/*
12278 		 * do the SCSI device discovery and create
12279 		 * the devinfos
12280 		 */
12281 		fcp_statec_callback(ulph, pptr->port_fp_handle,
12282 		    pptr->port_phys_state, pptr->port_topology, tmp_list,
12283 		    max_cnt, pptr->port_id);
12284 
12285 		res = DDI_SUCCESS;
12286 		break;
12287 
12288 	default:
12289 		fcp_log(CE_WARN, pptr->port_dip,
12290 		    "!fcp%d: invalid port state at attach=0x%x",
12291 		    instance, pptr->port_phys_state);
12292 
12293 		mutex_enter(&pptr->port_mutex);
12294 		pptr->port_phys_state = FCP_STATE_OFFLINE;
12295 		mutex_exit(&pptr->port_mutex);
12296 		res = DDI_SUCCESS;
12297 
12298 		break;
12299 	}
12300 
12301 	if (tmp_list != NULL) {
12302 		kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12303 	}
12304 
12305 	return (res);
12306 }
12307 
12308 
12309 static void
fcp_cp_pinfo(struct fcp_port * pptr,fc_ulp_port_info_t * pinfo)12310 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12311 {
12312 	pptr->port_fp_modlinkage = *pinfo->port_linkage;
12313 	pptr->port_dip = pinfo->port_dip;
12314 	pptr->port_fp_handle = pinfo->port_handle;
12315 	if (pinfo->port_acc_attr != NULL) {
12316 		/*
12317 		 * FCA supports DMA
12318 		 */
12319 		pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12320 		pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12321 		pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12322 		pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12323 	}
12324 	pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12325 	pptr->port_max_exch = pinfo->port_fca_max_exch;
12326 	pptr->port_phys_state = pinfo->port_state;
12327 	pptr->port_topology = pinfo->port_flags;
12328 	pptr->port_reset_action = pinfo->port_reset_action;
12329 	pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12330 	pptr->port_fcp_dma = pinfo->port_fcp_dma;
12331 	bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12332 	bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12333 
12334 	/* Clear FMA caps to avoid fm-capability ereport */
12335 	if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12336 		pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12337 	if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12338 		pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12339 	if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12340 		pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12341 }
12342 
12343 /*
12344  * If the elements wait field is set to 1 then
12345  * another thread is waiting for the operation to complete. Once
12346  * it is complete, the waiting thread is signaled and the element is
12347  * freed by the waiting thread. If the elements wait field is set to 0
12348  * the element is freed.
12349  */
12350 static void
fcp_process_elem(struct fcp_hp_elem * elem,int result)12351 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12352 {
12353 	ASSERT(elem != NULL);
12354 	mutex_enter(&elem->mutex);
12355 	elem->result = result;
12356 	if (elem->wait) {
12357 		elem->wait = 0;
12358 		cv_signal(&elem->cv);
12359 		mutex_exit(&elem->mutex);
12360 	} else {
12361 		mutex_exit(&elem->mutex);
12362 		cv_destroy(&elem->cv);
12363 		mutex_destroy(&elem->mutex);
12364 		kmem_free(elem, sizeof (struct fcp_hp_elem));
12365 	}
12366 }
12367 
12368 /*
12369  * This function is invoked from the taskq thread to allocate
12370  * devinfo nodes and to online/offline them.
12371  */
12372 static void
fcp_hp_task(void * arg)12373 fcp_hp_task(void *arg)
12374 {
12375 	struct fcp_hp_elem	*elem = (struct fcp_hp_elem *)arg;
12376 	struct fcp_lun	*plun = elem->lun;
12377 	struct fcp_port		*pptr = elem->port;
12378 	int			result;
12379 
12380 	ASSERT(elem->what == FCP_ONLINE ||
12381 	    elem->what == FCP_OFFLINE ||
12382 	    elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12383 	    elem->what == FCP_MPXIO_PATH_SET_BUSY);
12384 
12385 	mutex_enter(&pptr->port_mutex);
12386 	mutex_enter(&plun->lun_mutex);
12387 	if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12388 	    plun->lun_event_count != elem->event_cnt) ||
12389 	    pptr->port_state & (FCP_STATE_SUSPENDED |
12390 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12391 		mutex_exit(&plun->lun_mutex);
12392 		mutex_exit(&pptr->port_mutex);
12393 		fcp_process_elem(elem, NDI_FAILURE);
12394 		return;
12395 	}
12396 	mutex_exit(&plun->lun_mutex);
12397 	mutex_exit(&pptr->port_mutex);
12398 
12399 	result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12400 	    elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12401 	fcp_process_elem(elem, result);
12402 }
12403 
12404 
12405 static child_info_t *
fcp_get_cip(struct fcp_lun * plun,child_info_t * cip,int lcount,int tcount)12406 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12407     int tcount)
12408 {
12409 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12410 
12411 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12412 		struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12413 
12414 		ASSERT(MUTEX_HELD(&pptr->port_mutex));
12415 		/*
12416 		 * Child has not been created yet. Create the child device
12417 		 * based on the per-Lun flags.
12418 		 */
12419 		if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12420 			plun->lun_cip =
12421 			    CIP(fcp_create_dip(plun, lcount, tcount));
12422 			plun->lun_mpxio = 0;
12423 		} else {
12424 			plun->lun_cip =
12425 			    CIP(fcp_create_pip(plun, lcount, tcount));
12426 			plun->lun_mpxio = 1;
12427 		}
12428 	} else {
12429 		plun->lun_cip = cip;
12430 	}
12431 
12432 	return (plun->lun_cip);
12433 }
12434 
12435 
12436 static int
fcp_is_dip_present(struct fcp_lun * plun,dev_info_t * cdip)12437 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12438 {
12439 	int		rval = FC_FAILURE;
12440 	dev_info_t	*pdip;
12441 	struct dev_info	*dip;
12442 
12443 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12444 
12445 	pdip = plun->lun_tgt->tgt_port->port_dip;
12446 
12447 	if (plun->lun_cip == NULL) {
12448 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12449 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
12450 		    "fcp_is_dip_present: plun->lun_cip is NULL: "
12451 		    "plun: %p lun state: %x num: %d target state: %x",
12452 		    plun, plun->lun_state, plun->lun_num,
12453 		    plun->lun_tgt->tgt_port->port_state);
12454 		return (rval);
12455 	}
12456 	ndi_devi_enter(pdip);
12457 	dip = DEVI(pdip)->devi_child;
12458 	while (dip) {
12459 		if (dip == DEVI(cdip)) {
12460 			rval = FC_SUCCESS;
12461 			break;
12462 		}
12463 		dip = dip->devi_sibling;
12464 	}
12465 	ndi_devi_exit(pdip);
12466 	return (rval);
12467 }
12468 
12469 static int
fcp_is_child_present(struct fcp_lun * plun,child_info_t * cip)12470 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12471 {
12472 	int		rval = FC_FAILURE;
12473 
12474 	ASSERT(plun != NULL);
12475 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12476 
12477 	if (plun->lun_mpxio == 0) {
12478 		rval = fcp_is_dip_present(plun, DIP(cip));
12479 	} else {
12480 		rval = fcp_is_pip_present(plun, PIP(cip));
12481 	}
12482 
12483 	return (rval);
12484 }
12485 
12486 /*
12487  *     Function: fcp_create_dip
12488  *
12489  *  Description: Creates a dev_info_t structure for the LUN specified by the
12490  *		 caller.
12491  *
12492  *     Argument: plun		Lun structure
12493  *		 link_cnt	Link state count.
12494  *		 tgt_cnt	Target state change count.
12495  *
12496  * Return Value: NULL if it failed
12497  *		 dev_info_t structure address if it succeeded
12498  *
12499  *	Context: Kernel context
12500  */
12501 static dev_info_t *
fcp_create_dip(struct fcp_lun * plun,int link_cnt,int tgt_cnt)12502 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12503 {
12504 	int			failure = 0;
12505 	uint32_t		tgt_id;
12506 	uint64_t		sam_lun;
12507 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12508 	struct fcp_port	*pptr = ptgt->tgt_port;
12509 	dev_info_t		*pdip = pptr->port_dip;
12510 	dev_info_t		*cdip = NULL;
12511 	dev_info_t		*old_dip = DIP(plun->lun_cip);
12512 	char			*nname = NULL;
12513 	char			**compatible = NULL;
12514 	int			ncompatible;
12515 	char			*scsi_binding_set;
12516 	char			t_pwwn[17];
12517 
12518 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12519 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12520 
12521 	/* get the 'scsi-binding-set' property */
12522 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12523 	    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12524 	    &scsi_binding_set) != DDI_PROP_SUCCESS) {
12525 		scsi_binding_set = NULL;
12526 	}
12527 
12528 	/* determine the node name and compatible */
12529 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12530 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12531 	if (scsi_binding_set) {
12532 		ddi_prop_free(scsi_binding_set);
12533 	}
12534 
12535 	if (nname == NULL) {
12536 #ifdef	DEBUG
12537 		cmn_err(CE_WARN, "%s%d: no driver for "
12538 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12539 		    "	 compatible: %s",
12540 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12541 		    ptgt->tgt_port_wwn.raw_wwn[0],
12542 		    ptgt->tgt_port_wwn.raw_wwn[1],
12543 		    ptgt->tgt_port_wwn.raw_wwn[2],
12544 		    ptgt->tgt_port_wwn.raw_wwn[3],
12545 		    ptgt->tgt_port_wwn.raw_wwn[4],
12546 		    ptgt->tgt_port_wwn.raw_wwn[5],
12547 		    ptgt->tgt_port_wwn.raw_wwn[6],
12548 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12549 		    *compatible);
12550 #endif	/* DEBUG */
12551 		failure++;
12552 		goto end_of_fcp_create_dip;
12553 	}
12554 
12555 	cdip = fcp_find_existing_dip(plun, pdip, nname);
12556 
12557 	/*
12558 	 * if the old_dip does not match the cdip, that means there is
12559 	 * some property change. since we'll be using the cdip, we need
12560 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12561 	 * then the dtype for the device has been updated. Offline the
12562 	 * the old device and create a new device with the new device type
12563 	 * Refer to bug: 4764752
12564 	 */
12565 	if (old_dip && (cdip != old_dip ||
12566 	    plun->lun_state & FCP_LUN_CHANGED)) {
12567 		plun->lun_state &= ~(FCP_LUN_INIT);
12568 		mutex_exit(&plun->lun_mutex);
12569 		mutex_exit(&pptr->port_mutex);
12570 
12571 		mutex_enter(&ptgt->tgt_mutex);
12572 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12573 		    link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12574 		mutex_exit(&ptgt->tgt_mutex);
12575 
12576 #ifdef DEBUG
12577 		if (cdip != NULL) {
12578 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12579 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12580 			    "Old dip=%p; New dip=%p don't match", old_dip,
12581 			    cdip);
12582 		} else {
12583 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12584 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12585 			    "Old dip=%p; New dip=NULL don't match", old_dip);
12586 		}
12587 #endif
12588 
12589 		mutex_enter(&pptr->port_mutex);
12590 		mutex_enter(&plun->lun_mutex);
12591 	}
12592 
12593 	if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12594 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12595 		if (ndi_devi_alloc(pptr->port_dip, nname,
12596 		    DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12597 			failure++;
12598 			goto end_of_fcp_create_dip;
12599 		}
12600 	}
12601 
12602 	/*
12603 	 * Previously all the properties for the devinfo were destroyed here
12604 	 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12605 	 * the devid property (and other properties established by the target
12606 	 * driver or framework) which the code does not always recreate, this
12607 	 * call was removed.
12608 	 * This opens a theoretical possibility that we may return with a
12609 	 * stale devid on the node if the scsi entity behind the fibre channel
12610 	 * lun has changed.
12611 	 */
12612 
12613 	/* decorate the node with compatible */
12614 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12615 	    "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12616 		failure++;
12617 		goto end_of_fcp_create_dip;
12618 	}
12619 
12620 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12621 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12622 		failure++;
12623 		goto end_of_fcp_create_dip;
12624 	}
12625 
12626 	if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12627 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12628 		failure++;
12629 		goto end_of_fcp_create_dip;
12630 	}
12631 
12632 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12633 	t_pwwn[16] = '\0';
12634 	if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12635 	    != DDI_PROP_SUCCESS) {
12636 		failure++;
12637 		goto end_of_fcp_create_dip;
12638 	}
12639 
12640 	/*
12641 	 * If there is no hard address - We might have to deal with
12642 	 * that by using WWN - Having said that it is important to
12643 	 * recognize this problem early so ssd can be informed of
12644 	 * the right interconnect type.
12645 	 */
12646 	if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12647 		tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12648 	} else {
12649 		tgt_id = ptgt->tgt_d_id;
12650 	}
12651 
12652 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12653 	    tgt_id) != DDI_PROP_SUCCESS) {
12654 		failure++;
12655 		goto end_of_fcp_create_dip;
12656 	}
12657 
12658 	if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12659 	    (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12660 		failure++;
12661 		goto end_of_fcp_create_dip;
12662 	}
12663 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12664 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12665 	    sam_lun) != DDI_PROP_SUCCESS) {
12666 		failure++;
12667 		goto end_of_fcp_create_dip;
12668 	}
12669 
12670 end_of_fcp_create_dip:
12671 	scsi_hba_nodename_compatible_free(nname, compatible);
12672 
12673 	if (cdip != NULL && failure) {
12674 		(void) ndi_prop_remove_all(cdip);
12675 		(void) ndi_devi_free(cdip);
12676 		cdip = NULL;
12677 	}
12678 
12679 	return (cdip);
12680 }
12681 
12682 /*
12683  *     Function: fcp_create_pip
12684  *
12685  *  Description: Creates a Path Id for the LUN specified by the caller.
12686  *
12687  *     Argument: plun		Lun structure
12688  *		 link_cnt	Link state count.
12689  *		 tgt_cnt	Target state count.
12690  *
12691  * Return Value: NULL if it failed
12692  *		 mdi_pathinfo_t structure address if it succeeded
12693  *
12694  *	Context: Kernel context
12695  */
12696 static mdi_pathinfo_t *
fcp_create_pip(struct fcp_lun * plun,int lcount,int tcount)12697 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12698 {
12699 	int			i;
12700 	char			buf[MAXNAMELEN];
12701 	char			uaddr[MAXNAMELEN];
12702 	int			failure = 0;
12703 	uint32_t		tgt_id;
12704 	uint64_t		sam_lun;
12705 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12706 	struct fcp_port	*pptr = ptgt->tgt_port;
12707 	dev_info_t		*pdip = pptr->port_dip;
12708 	mdi_pathinfo_t		*pip = NULL;
12709 	mdi_pathinfo_t		*old_pip = PIP(plun->lun_cip);
12710 	char			*nname = NULL;
12711 	char			**compatible = NULL;
12712 	int			ncompatible;
12713 	char			*scsi_binding_set;
12714 	char			t_pwwn[17];
12715 
12716 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
12717 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
12718 
12719 	scsi_binding_set = "vhci";
12720 
12721 	/* determine the node name and compatible */
12722 	scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12723 	    plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12724 
12725 	if (nname == NULL) {
12726 #ifdef	DEBUG
12727 		cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12728 		    "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12729 		    "	 compatible: %s",
12730 		    ddi_driver_name(pdip), ddi_get_instance(pdip),
12731 		    ptgt->tgt_port_wwn.raw_wwn[0],
12732 		    ptgt->tgt_port_wwn.raw_wwn[1],
12733 		    ptgt->tgt_port_wwn.raw_wwn[2],
12734 		    ptgt->tgt_port_wwn.raw_wwn[3],
12735 		    ptgt->tgt_port_wwn.raw_wwn[4],
12736 		    ptgt->tgt_port_wwn.raw_wwn[5],
12737 		    ptgt->tgt_port_wwn.raw_wwn[6],
12738 		    ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12739 		    *compatible);
12740 #endif	/* DEBUG */
12741 		failure++;
12742 		goto end_of_fcp_create_pip;
12743 	}
12744 
12745 	pip = fcp_find_existing_pip(plun, pdip);
12746 
12747 	/*
12748 	 * if the old_dip does not match the cdip, that means there is
12749 	 * some property change. since we'll be using the cdip, we need
12750 	 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12751 	 * then the dtype for the device has been updated. Offline the
12752 	 * the old device and create a new device with the new device type
12753 	 * Refer to bug: 4764752
12754 	 */
12755 	if (old_pip && (pip != old_pip ||
12756 	    plun->lun_state & FCP_LUN_CHANGED)) {
12757 		plun->lun_state &= ~(FCP_LUN_INIT);
12758 		mutex_exit(&plun->lun_mutex);
12759 		mutex_exit(&pptr->port_mutex);
12760 
12761 		mutex_enter(&ptgt->tgt_mutex);
12762 		(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12763 		    FCP_OFFLINE, lcount, tcount,
12764 		    NDI_DEVI_REMOVE, 0);
12765 		mutex_exit(&ptgt->tgt_mutex);
12766 
12767 		if (pip != NULL) {
12768 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12769 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12770 			    "Old pip=%p; New pip=%p don't match",
12771 			    old_pip, pip);
12772 		} else {
12773 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
12774 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
12775 			    "Old pip=%p; New pip=NULL don't match",
12776 			    old_pip);
12777 		}
12778 
12779 		mutex_enter(&pptr->port_mutex);
12780 		mutex_enter(&plun->lun_mutex);
12781 	}
12782 
12783 	/*
12784 	 * Since FC_WWN_SIZE is 8 bytes and its not like the
12785 	 * lun_guid_size which is dependent on the target, I don't
12786 	 * believe the same trancation happens here UNLESS the standards
12787 	 * change the FC_WWN_SIZE value to something larger than
12788 	 * MAXNAMELEN(currently 255 bytes).
12789 	 */
12790 
12791 	for (i = 0; i < FC_WWN_SIZE; i++) {
12792 		(void) sprintf(&buf[i << 1], "%02x",
12793 		    ptgt->tgt_port_wwn.raw_wwn[i]);
12794 	}
12795 
12796 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12797 	    buf, plun->lun_num);
12798 
12799 	if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12800 		/*
12801 		 * Release the locks before calling into
12802 		 * mdi_pi_alloc_compatible() since this can result in a
12803 		 * callback into fcp which can result in a deadlock
12804 		 * (see bug # 4870272).
12805 		 *
12806 		 * Basically, what we are trying to avoid is the scenario where
12807 		 * one thread does ndi_devi_enter() and tries to grab
12808 		 * fcp_mutex and another does it the other way round.
12809 		 *
12810 		 * But before we do that, make sure that nobody releases the
12811 		 * port in the meantime. We can do this by setting a flag.
12812 		 */
12813 		plun->lun_state &= ~(FCP_LUN_CHANGED);
12814 		pptr->port_state |= FCP_STATE_IN_MDI;
12815 		mutex_exit(&plun->lun_mutex);
12816 		mutex_exit(&pptr->port_mutex);
12817 		if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12818 		    uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12819 			fcp_log(CE_WARN, pptr->port_dip,
12820 			    "!path alloc failed:0x%x", plun);
12821 			mutex_enter(&pptr->port_mutex);
12822 			mutex_enter(&plun->lun_mutex);
12823 			pptr->port_state &= ~FCP_STATE_IN_MDI;
12824 			failure++;
12825 			goto end_of_fcp_create_pip;
12826 		}
12827 		mutex_enter(&pptr->port_mutex);
12828 		mutex_enter(&plun->lun_mutex);
12829 		pptr->port_state &= ~FCP_STATE_IN_MDI;
12830 	} else {
12831 		(void) mdi_prop_remove(pip, NULL);
12832 	}
12833 
12834 	mdi_pi_set_phci_private(pip, (caddr_t)plun);
12835 
12836 	if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12837 	    ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12838 	    != DDI_PROP_SUCCESS) {
12839 		failure++;
12840 		goto end_of_fcp_create_pip;
12841 	}
12842 
12843 	if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12844 	    ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12845 	    != DDI_PROP_SUCCESS) {
12846 		failure++;
12847 		goto end_of_fcp_create_pip;
12848 	}
12849 
12850 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12851 	t_pwwn[16] = '\0';
12852 	if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12853 	    != DDI_PROP_SUCCESS) {
12854 		failure++;
12855 		goto end_of_fcp_create_pip;
12856 	}
12857 
12858 	/*
12859 	 * If there is no hard address - We might have to deal with
12860 	 * that by using WWN - Having said that it is important to
12861 	 * recognize this problem early so ssd can be informed of
12862 	 * the right interconnect type.
12863 	 */
12864 	if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12865 	    ptgt->tgt_hard_addr != 0) {
12866 		tgt_id = (uint32_t)
12867 		    fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12868 	} else {
12869 		tgt_id = ptgt->tgt_d_id;
12870 	}
12871 
12872 	if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12873 	    != DDI_PROP_SUCCESS) {
12874 		failure++;
12875 		goto end_of_fcp_create_pip;
12876 	}
12877 
12878 	if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12879 	    != DDI_PROP_SUCCESS) {
12880 		failure++;
12881 		goto end_of_fcp_create_pip;
12882 	}
12883 	bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12884 	if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12885 	    != DDI_PROP_SUCCESS) {
12886 		failure++;
12887 		goto end_of_fcp_create_pip;
12888 	}
12889 
12890 end_of_fcp_create_pip:
12891 	scsi_hba_nodename_compatible_free(nname, compatible);
12892 
12893 	if (pip != NULL && failure) {
12894 		(void) mdi_prop_remove(pip, NULL);
12895 		mutex_exit(&plun->lun_mutex);
12896 		mutex_exit(&pptr->port_mutex);
12897 		(void) mdi_pi_free(pip, 0);
12898 		mutex_enter(&pptr->port_mutex);
12899 		mutex_enter(&plun->lun_mutex);
12900 		pip = NULL;
12901 	}
12902 
12903 	return (pip);
12904 }
12905 
12906 static dev_info_t *
fcp_find_existing_dip(struct fcp_lun * plun,dev_info_t * pdip,caddr_t name)12907 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12908 {
12909 	uint_t			nbytes;
12910 	uchar_t			*bytes;
12911 	uint_t			nwords;
12912 	uint32_t		tgt_id;
12913 	int			*words;
12914 	dev_info_t		*cdip;
12915 	dev_info_t		*ndip;
12916 	struct fcp_tgt	*ptgt = plun->lun_tgt;
12917 	struct fcp_port	*pptr = ptgt->tgt_port;
12918 
12919 	ndi_devi_enter(pdip);
12920 
12921 	ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12922 	while ((cdip = ndip) != NULL) {
12923 		ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12924 
12925 		if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12926 			continue;
12927 		}
12928 
12929 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12930 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12931 		    &nbytes) != DDI_PROP_SUCCESS) {
12932 			continue;
12933 		}
12934 
12935 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12936 			if (bytes != NULL) {
12937 				ddi_prop_free(bytes);
12938 			}
12939 			continue;
12940 		}
12941 		ASSERT(bytes != NULL);
12942 
12943 		if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12944 			ddi_prop_free(bytes);
12945 			continue;
12946 		}
12947 
12948 		ddi_prop_free(bytes);
12949 
12950 		if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12951 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12952 		    &nbytes) != DDI_PROP_SUCCESS) {
12953 			continue;
12954 		}
12955 
12956 		if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12957 			if (bytes != NULL) {
12958 				ddi_prop_free(bytes);
12959 			}
12960 			continue;
12961 		}
12962 		ASSERT(bytes != NULL);
12963 
12964 		if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12965 			ddi_prop_free(bytes);
12966 			continue;
12967 		}
12968 
12969 		ddi_prop_free(bytes);
12970 
12971 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12972 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12973 		    &nwords) != DDI_PROP_SUCCESS) {
12974 			continue;
12975 		}
12976 
12977 		if (nwords != 1 || words == NULL) {
12978 			if (words != NULL) {
12979 				ddi_prop_free(words);
12980 			}
12981 			continue;
12982 		}
12983 		ASSERT(words != NULL);
12984 
12985 		/*
12986 		 * If there is no hard address - We might have to deal with
12987 		 * that by using WWN - Having said that it is important to
12988 		 * recognize this problem early so ssd can be informed of
12989 		 * the right interconnect type.
12990 		 */
12991 		if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12992 		    ptgt->tgt_hard_addr != 0) {
12993 			tgt_id =
12994 			    (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12995 		} else {
12996 			tgt_id = ptgt->tgt_d_id;
12997 		}
12998 
12999 		if (tgt_id != (uint32_t)*words) {
13000 			ddi_prop_free(words);
13001 			continue;
13002 		}
13003 		ddi_prop_free(words);
13004 
13005 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13006 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13007 		    &nwords) != DDI_PROP_SUCCESS) {
13008 			continue;
13009 		}
13010 
13011 		if (nwords != 1 || words == NULL) {
13012 			if (words != NULL) {
13013 				ddi_prop_free(words);
13014 			}
13015 			continue;
13016 		}
13017 		ASSERT(words != NULL);
13018 
13019 		if (plun->lun_num == (uint16_t)*words) {
13020 			ddi_prop_free(words);
13021 			break;
13022 		}
13023 		ddi_prop_free(words);
13024 	}
13025 	ndi_devi_exit(pdip);
13026 
13027 	return (cdip);
13028 }
13029 
13030 
13031 static int
fcp_is_pip_present(struct fcp_lun * plun,mdi_pathinfo_t * pip)13032 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13033 {
13034 	dev_info_t	*pdip;
13035 	char		buf[MAXNAMELEN];
13036 	char		uaddr[MAXNAMELEN];
13037 	int		rval = FC_FAILURE;
13038 
13039 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13040 
13041 	pdip = plun->lun_tgt->tgt_port->port_dip;
13042 
13043 	/*
13044 	 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13045 	 * non-NULL even when the LUN is not there as in the case when a LUN is
13046 	 * configured and then deleted on the device end (for T3/T4 case). In
13047 	 * such cases, pip will be NULL.
13048 	 *
13049 	 * If the device generates an RSCN, it will end up getting offlined when
13050 	 * it disappeared and a new LUN will get created when it is rediscovered
13051 	 * on the device. If we check for lun_cip here, the LUN will not end
13052 	 * up getting onlined since this function will end up returning a
13053 	 * FC_SUCCESS.
13054 	 *
13055 	 * The behavior is different on other devices. For instance, on a HDS,
13056 	 * there was no RSCN generated by the device but the next I/O generated
13057 	 * a check condition and rediscovery got triggered that way. So, in
13058 	 * such cases, this path will not be exercised
13059 	 */
13060 	if (pip == NULL) {
13061 		FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13062 		    fcp_trace, FCP_BUF_LEVEL_4, 0,
13063 		    "fcp_is_pip_present: plun->lun_cip is NULL: "
13064 		    "plun: %p lun state: %x num: %d target state: %x",
13065 		    plun, plun->lun_state, plun->lun_num,
13066 		    plun->lun_tgt->tgt_port->port_state);
13067 		return (rval);
13068 	}
13069 
13070 	fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13071 
13072 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13073 
13074 	if (plun->lun_old_guid) {
13075 		if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13076 			rval = FC_SUCCESS;
13077 		}
13078 	} else {
13079 		if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13080 			rval = FC_SUCCESS;
13081 		}
13082 	}
13083 	return (rval);
13084 }
13085 
13086 static mdi_pathinfo_t *
fcp_find_existing_pip(struct fcp_lun * plun,dev_info_t * pdip)13087 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13088 {
13089 	char			buf[MAXNAMELEN];
13090 	char			uaddr[MAXNAMELEN];
13091 	mdi_pathinfo_t		*pip;
13092 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13093 	struct fcp_port	*pptr = ptgt->tgt_port;
13094 
13095 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13096 
13097 	fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13098 	(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13099 
13100 	pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13101 
13102 	return (pip);
13103 }
13104 
13105 
13106 static int
fcp_online_child(struct fcp_lun * plun,child_info_t * cip,int lcount,int tcount,int flags)13107 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13108     int tcount, int flags)
13109 {
13110 	int			rval;
13111 	struct fcp_port		*pptr = plun->lun_tgt->tgt_port;
13112 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13113 	dev_info_t		*cdip = NULL;
13114 
13115 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13116 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13117 
13118 	if (plun->lun_cip == NULL) {
13119 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13120 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13121 		    "fcp_online_child: plun->lun_cip is NULL: "
13122 		    "plun: %p state: %x num: %d target state: %x",
13123 		    plun, plun->lun_state, plun->lun_num,
13124 		    plun->lun_tgt->tgt_port->port_state);
13125 		return (NDI_FAILURE);
13126 	}
13127 again:
13128 	if (plun->lun_mpxio == 0) {
13129 		cdip = DIP(cip);
13130 		mutex_exit(&plun->lun_mutex);
13131 		mutex_exit(&pptr->port_mutex);
13132 
13133 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13134 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13135 		    "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13136 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13137 
13138 		/*
13139 		 * We could check for FCP_LUN_INIT here but chances
13140 		 * of getting here when it's already in FCP_LUN_INIT
13141 		 * is rare and a duplicate ndi_devi_online wouldn't
13142 		 * hurt either (as the node would already have been
13143 		 * in CF2)
13144 		 */
13145 		if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13146 			rval = ndi_devi_bind_driver(cdip, flags);
13147 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13148 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13149 			    "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13150 		} else {
13151 			rval = ndi_devi_online(cdip, flags);
13152 		}
13153 
13154 		/*
13155 		 * We log the message into trace buffer if the device
13156 		 * is "ses" and into syslog for any other device
13157 		 * type. This is to prevent the ndi_devi_online failure
13158 		 * message that appears for V880/A5K ses devices.
13159 		 */
13160 		if (rval == NDI_SUCCESS) {
13161 			mutex_enter(&ptgt->tgt_mutex);
13162 			plun->lun_state |= FCP_LUN_INIT;
13163 			mutex_exit(&ptgt->tgt_mutex);
13164 		} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13165 			fcp_log(CE_NOTE, pptr->port_dip,
13166 			    "!ndi_devi_online:"
13167 			    " failed for %s: target=%x lun=%x %x",
13168 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13169 			    plun->lun_num, rval);
13170 		} else {
13171 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13172 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13173 			    " !ndi_devi_online:"
13174 			    " failed for %s: target=%x lun=%x %x",
13175 			    ddi_get_name(cdip), ptgt->tgt_d_id,
13176 			    plun->lun_num, rval);
13177 		}
13178 	} else {
13179 		cdip = mdi_pi_get_client(PIP(cip));
13180 		mutex_exit(&plun->lun_mutex);
13181 		mutex_exit(&pptr->port_mutex);
13182 
13183 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13184 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13185 		    "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13186 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13187 
13188 		/*
13189 		 * Hold path and exit phci to avoid deadlock with power
13190 		 * management code during mdi_pi_online.
13191 		 */
13192 		mdi_hold_path(PIP(cip));
13193 		mdi_devi_exit_phci(pptr->port_dip);
13194 
13195 		rval = mdi_pi_online(PIP(cip), flags);
13196 
13197 		mdi_devi_enter_phci(pptr->port_dip);
13198 		mdi_rele_path(PIP(cip));
13199 
13200 		if (rval == MDI_SUCCESS) {
13201 			mutex_enter(&ptgt->tgt_mutex);
13202 			plun->lun_state |= FCP_LUN_INIT;
13203 			mutex_exit(&ptgt->tgt_mutex);
13204 
13205 			/*
13206 			 * Clear MPxIO path permanent disable in case
13207 			 * fcp hotplug dropped the offline event.
13208 			 */
13209 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13210 
13211 		} else if (rval == MDI_NOT_SUPPORTED) {
13212 			child_info_t	*old_cip = cip;
13213 
13214 			/*
13215 			 * MPxIO does not support this device yet.
13216 			 * Enumerate in legacy mode.
13217 			 */
13218 			mutex_enter(&pptr->port_mutex);
13219 			mutex_enter(&plun->lun_mutex);
13220 			plun->lun_mpxio = 0;
13221 			plun->lun_cip = NULL;
13222 			cdip = fcp_create_dip(plun, lcount, tcount);
13223 			plun->lun_cip = cip = CIP(cdip);
13224 			if (cip == NULL) {
13225 				fcp_log(CE_WARN, pptr->port_dip,
13226 				    "!fcp_online_child: "
13227 				    "Create devinfo failed for LU=%p", plun);
13228 				mutex_exit(&plun->lun_mutex);
13229 
13230 				mutex_enter(&ptgt->tgt_mutex);
13231 				plun->lun_state |= FCP_LUN_OFFLINE;
13232 				mutex_exit(&ptgt->tgt_mutex);
13233 
13234 				mutex_exit(&pptr->port_mutex);
13235 
13236 				/*
13237 				 * free the mdi_pathinfo node
13238 				 */
13239 				(void) mdi_pi_free(PIP(old_cip), 0);
13240 			} else {
13241 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13242 				    fcp_trace, FCP_BUF_LEVEL_3, 0,
13243 				    "fcp_online_child: creating devinfo "
13244 				    "node 0x%p for plun 0x%p",
13245 				    cip, plun);
13246 				mutex_exit(&plun->lun_mutex);
13247 				mutex_exit(&pptr->port_mutex);
13248 				/*
13249 				 * free the mdi_pathinfo node
13250 				 */
13251 				(void) mdi_pi_free(PIP(old_cip), 0);
13252 				mutex_enter(&pptr->port_mutex);
13253 				mutex_enter(&plun->lun_mutex);
13254 				goto again;
13255 			}
13256 		} else {
13257 			if (cdip) {
13258 				fcp_log(CE_NOTE, pptr->port_dip,
13259 				    "!fcp_online_child: mdi_pi_online:"
13260 				    " failed for %s: target=%x lun=%x %x",
13261 				    ddi_get_name(cdip), ptgt->tgt_d_id,
13262 				    plun->lun_num, rval);
13263 			}
13264 		}
13265 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13266 	}
13267 
13268 	if (rval == NDI_SUCCESS) {
13269 		if (cdip) {
13270 			(void) ndi_event_retrieve_cookie(
13271 			    pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13272 			    &fcp_insert_eid, NDI_EVENT_NOPASS);
13273 			(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13274 			    cdip, fcp_insert_eid, NULL);
13275 		}
13276 	}
13277 	mutex_enter(&pptr->port_mutex);
13278 	mutex_enter(&plun->lun_mutex);
13279 	return (rval);
13280 }
13281 
13282 /* ARGSUSED */
13283 static int
fcp_offline_child(struct fcp_lun * plun,child_info_t * cip,int lcount,int tcount,int flags)13284 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13285     int tcount, int flags)
13286 {
13287 	int		rval;
13288 	int		lun_mpxio;
13289 	struct fcp_port	*pptr = plun->lun_tgt->tgt_port;
13290 	struct fcp_tgt	*ptgt = plun->lun_tgt;
13291 	dev_info_t	*cdip;
13292 
13293 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13294 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
13295 
13296 	if (plun->lun_cip == NULL) {
13297 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13298 		    fcp_trace, FCP_BUF_LEVEL_3, 0,
13299 		    "fcp_offline_child: plun->lun_cip is NULL: "
13300 		    "plun: %p lun state: %x num: %d target state: %x",
13301 		    plun, plun->lun_state, plun->lun_num,
13302 		    plun->lun_tgt->tgt_port->port_state);
13303 		return (NDI_FAILURE);
13304 	}
13305 
13306 	/*
13307 	 * We will use this value twice. Make a copy to be sure we use
13308 	 * the same value in both places.
13309 	 */
13310 	lun_mpxio = plun->lun_mpxio;
13311 
13312 	if (lun_mpxio == 0) {
13313 		cdip = DIP(cip);
13314 		mutex_exit(&plun->lun_mutex);
13315 		mutex_exit(&pptr->port_mutex);
13316 		rval = ndi_devi_offline(DIP(cip), flags);
13317 		if (rval != NDI_SUCCESS) {
13318 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13319 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13320 			    "fcp_offline_child: ndi_devi_offline failed "
13321 			    "rval=%x cip=%p", rval, cip);
13322 		}
13323 	} else {
13324 		cdip = mdi_pi_get_client(PIP(cip));
13325 		mutex_exit(&plun->lun_mutex);
13326 		mutex_exit(&pptr->port_mutex);
13327 
13328 		/*
13329 		 * Exit phci to avoid deadlock with power management code
13330 		 * during mdi_pi_offline
13331 		 */
13332 		mdi_hold_path(PIP(cip));
13333 		mdi_devi_exit_phci(pptr->port_dip);
13334 
13335 		rval = mdi_pi_offline(PIP(cip), flags);
13336 
13337 		mdi_devi_enter_phci(pptr->port_dip);
13338 		mdi_rele_path(PIP(cip));
13339 
13340 		rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13341 	}
13342 
13343 	mutex_enter(&ptgt->tgt_mutex);
13344 	plun->lun_state &= ~FCP_LUN_INIT;
13345 	mutex_exit(&ptgt->tgt_mutex);
13346 
13347 	if (rval == NDI_SUCCESS) {
13348 		cdip = NULL;
13349 		if (flags & NDI_DEVI_REMOVE) {
13350 			mutex_enter(&plun->lun_mutex);
13351 			/*
13352 			 * If the guid of the LUN changes, lun_cip will not
13353 			 * equal to cip, and after offlining the LUN with the
13354 			 * old guid, we should keep lun_cip since it's the cip
13355 			 * of the LUN with the new guid.
13356 			 * Otherwise remove our reference to child node.
13357 			 *
13358 			 * This must be done before the child node is freed,
13359 			 * otherwise other threads could see a stale lun_cip
13360 			 * pointer.
13361 			 */
13362 			if (plun->lun_cip == cip) {
13363 				plun->lun_cip = NULL;
13364 			}
13365 			if (plun->lun_old_guid) {
13366 				kmem_free(plun->lun_old_guid,
13367 				    plun->lun_old_guid_size);
13368 				plun->lun_old_guid = NULL;
13369 				plun->lun_old_guid_size = 0;
13370 			}
13371 			mutex_exit(&plun->lun_mutex);
13372 		}
13373 	}
13374 
13375 	if (lun_mpxio != 0) {
13376 		if (rval == NDI_SUCCESS) {
13377 			/*
13378 			 * Clear MPxIO path permanent disable as the path is
13379 			 * already offlined.
13380 			 */
13381 			(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13382 
13383 			if (flags & NDI_DEVI_REMOVE) {
13384 				(void) mdi_pi_free(PIP(cip), 0);
13385 			}
13386 		} else {
13387 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
13388 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13389 			    "fcp_offline_child: mdi_pi_offline failed "
13390 			    "rval=%x cip=%p", rval, cip);
13391 		}
13392 	}
13393 
13394 	mutex_enter(&pptr->port_mutex);
13395 	mutex_enter(&plun->lun_mutex);
13396 
13397 	if (cdip) {
13398 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
13399 		    fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13400 		    " target=%x lun=%x", "ndi_offline",
13401 		    ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13402 	}
13403 
13404 	return (rval);
13405 }
13406 
13407 static void
fcp_remove_child(struct fcp_lun * plun)13408 fcp_remove_child(struct fcp_lun *plun)
13409 {
13410 	child_info_t *cip;
13411 	boolean_t enteredv;
13412 
13413 	ASSERT(MUTEX_HELD(&plun->lun_mutex));
13414 
13415 	if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13416 		if (plun->lun_mpxio == 0) {
13417 			(void) ndi_prop_remove_all(DIP(plun->lun_cip));
13418 			(void) ndi_devi_free(DIP(plun->lun_cip));
13419 			plun->lun_cip = NULL;
13420 		} else {
13421 			/*
13422 			 * Clear reference to the child node in the lun.
13423 			 * This must be done before freeing it with mdi_pi_free
13424 			 * and with lun_mutex held so that other threads always
13425 			 * see either valid lun_cip or NULL when holding
13426 			 * lun_mutex. We keep a copy in cip.
13427 			 */
13428 			cip = plun->lun_cip;
13429 			plun->lun_cip = NULL;
13430 
13431 			mutex_exit(&plun->lun_mutex);
13432 			mutex_exit(&plun->lun_tgt->tgt_mutex);
13433 			mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13434 
13435 			mdi_devi_enter(
13436 			    plun->lun_tgt->tgt_port->port_dip, &enteredv);
13437 
13438 			/*
13439 			 * Exit phci to avoid deadlock with power management
13440 			 * code during mdi_pi_offline
13441 			 */
13442 			mdi_hold_path(PIP(cip));
13443 			mdi_devi_exit_phci(
13444 			    plun->lun_tgt->tgt_port->port_dip);
13445 			(void) mdi_pi_offline(PIP(cip),
13446 			    NDI_DEVI_REMOVE);
13447 			mdi_devi_enter_phci(
13448 			    plun->lun_tgt->tgt_port->port_dip);
13449 			mdi_rele_path(PIP(cip));
13450 
13451 			mdi_devi_exit(
13452 			    plun->lun_tgt->tgt_port->port_dip, enteredv);
13453 
13454 			FCP_TRACE(fcp_logq,
13455 			    plun->lun_tgt->tgt_port->port_instbuf,
13456 			    fcp_trace, FCP_BUF_LEVEL_3, 0,
13457 			    "lun=%p pip freed %p", plun, cip);
13458 
13459 			(void) mdi_prop_remove(PIP(cip), NULL);
13460 			(void) mdi_pi_free(PIP(cip), 0);
13461 
13462 			mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13463 			mutex_enter(&plun->lun_tgt->tgt_mutex);
13464 			mutex_enter(&plun->lun_mutex);
13465 		}
13466 	} else {
13467 		plun->lun_cip = NULL;
13468 	}
13469 }
13470 
13471 /*
13472  * called when a timeout occurs
13473  *
13474  * can be scheduled during an attach or resume (if not already running)
13475  *
13476  * one timeout is set up for all ports
13477  *
13478  * acquires and releases the global mutex
13479  */
13480 /*ARGSUSED*/
13481 static void
fcp_watch(void * arg)13482 fcp_watch(void *arg)
13483 {
13484 	struct fcp_port	*pptr;
13485 	struct fcp_ipkt	*icmd;
13486 	struct fcp_ipkt	*nicmd;
13487 	struct fcp_pkt	*cmd;
13488 	struct fcp_pkt	*ncmd;
13489 	struct fcp_pkt	*tail;
13490 	struct fcp_pkt	*pcmd;
13491 	struct fcp_pkt	*save_head;
13492 	struct fcp_port	*save_port;
13493 
13494 	/* increment global watchdog time */
13495 	fcp_watchdog_time += fcp_watchdog_timeout;
13496 
13497 	mutex_enter(&fcp_global_mutex);
13498 
13499 	/* scan each port in our list */
13500 	for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13501 		save_port = fcp_port_head;
13502 		pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13503 		mutex_exit(&fcp_global_mutex);
13504 
13505 		mutex_enter(&pptr->port_mutex);
13506 		if (pptr->port_ipkt_list == NULL &&
13507 		    (pptr->port_state & (FCP_STATE_SUSPENDED |
13508 		    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13509 			pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13510 			mutex_exit(&pptr->port_mutex);
13511 			mutex_enter(&fcp_global_mutex);
13512 			goto end_of_watchdog;
13513 		}
13514 
13515 		/*
13516 		 * We check if a list of targets need to be offlined.
13517 		 */
13518 		if (pptr->port_offline_tgts) {
13519 			fcp_scan_offline_tgts(pptr);
13520 		}
13521 
13522 		/*
13523 		 * We check if a list of luns need to be offlined.
13524 		 */
13525 		if (pptr->port_offline_luns) {
13526 			fcp_scan_offline_luns(pptr);
13527 		}
13528 
13529 		/*
13530 		 * We check if a list of targets or luns need to be reset.
13531 		 */
13532 		if (pptr->port_reset_list) {
13533 			fcp_check_reset_delay(pptr);
13534 		}
13535 
13536 		mutex_exit(&pptr->port_mutex);
13537 
13538 		/*
13539 		 * This is where the pending commands (pkt) are checked for
13540 		 * timeout.
13541 		 */
13542 		mutex_enter(&pptr->port_pkt_mutex);
13543 		tail = pptr->port_pkt_tail;
13544 
13545 		for (pcmd = NULL, cmd = pptr->port_pkt_head;
13546 		    cmd != NULL; cmd = ncmd) {
13547 			ncmd = cmd->cmd_next;
13548 			/*
13549 			 * If a command is in this queue the bit CFLAG_IN_QUEUE
13550 			 * must be set.
13551 			 */
13552 			ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13553 			/*
13554 			 * FCP_INVALID_TIMEOUT will be set for those
13555 			 * command that need to be failed. Mostly those
13556 			 * cmds that could not be queued down for the
13557 			 * "timeout" value. cmd->cmd_timeout is used
13558 			 * to try and requeue the command regularly.
13559 			 */
13560 			if (cmd->cmd_timeout >= fcp_watchdog_time) {
13561 				/*
13562 				 * This command hasn't timed out yet.  Let's
13563 				 * go to the next one.
13564 				 */
13565 				pcmd = cmd;
13566 				goto end_of_loop;
13567 			}
13568 
13569 			if (cmd == pptr->port_pkt_head) {
13570 				ASSERT(pcmd == NULL);
13571 				pptr->port_pkt_head = cmd->cmd_next;
13572 			} else {
13573 				ASSERT(pcmd != NULL);
13574 				pcmd->cmd_next = cmd->cmd_next;
13575 			}
13576 
13577 			if (cmd == pptr->port_pkt_tail) {
13578 				ASSERT(cmd->cmd_next == NULL);
13579 				pptr->port_pkt_tail = pcmd;
13580 				if (pcmd) {
13581 					pcmd->cmd_next = NULL;
13582 				}
13583 			}
13584 			cmd->cmd_next = NULL;
13585 
13586 			/*
13587 			 * save the current head before dropping the
13588 			 * mutex - If the head doesn't remain the
13589 			 * same after re acquiring the mutex, just
13590 			 * bail out and revisit on next tick.
13591 			 *
13592 			 * PS: The tail pointer can change as the commands
13593 			 * get requeued after failure to retransport
13594 			 */
13595 			save_head = pptr->port_pkt_head;
13596 			mutex_exit(&pptr->port_pkt_mutex);
13597 
13598 			if (cmd->cmd_fp_pkt->pkt_timeout ==
13599 			    FCP_INVALID_TIMEOUT) {
13600 				struct scsi_pkt		*pkt = cmd->cmd_pkt;
13601 				struct fcp_lun	*plun;
13602 				struct fcp_tgt	*ptgt;
13603 
13604 				plun = ADDR2LUN(&pkt->pkt_address);
13605 				ptgt = plun->lun_tgt;
13606 
13607 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13608 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13609 				    "SCSI cmd 0x%x to D_ID=%x timed out",
13610 				    pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13611 
13612 				cmd->cmd_state == FCP_PKT_ABORTING ?
13613 				    fcp_fail_cmd(cmd, CMD_RESET,
13614 				    STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13615 				    CMD_TIMEOUT, STAT_ABORTED);
13616 			} else {
13617 				fcp_retransport_cmd(pptr, cmd);
13618 			}
13619 			mutex_enter(&pptr->port_pkt_mutex);
13620 			if (save_head && save_head != pptr->port_pkt_head) {
13621 				/*
13622 				 * Looks like linked list got changed (mostly
13623 				 * happens when an an OFFLINE LUN code starts
13624 				 * returning overflow queue commands in
13625 				 * parallel. So bail out and revisit during
13626 				 * next tick
13627 				 */
13628 				break;
13629 			}
13630 		end_of_loop:
13631 			/*
13632 			 * Scan only upto the previously known tail pointer
13633 			 * to avoid excessive processing - lots of new packets
13634 			 * could have been added to the tail or the old ones
13635 			 * re-queued.
13636 			 */
13637 			if (cmd == tail) {
13638 				break;
13639 			}
13640 		}
13641 		mutex_exit(&pptr->port_pkt_mutex);
13642 
13643 		mutex_enter(&pptr->port_mutex);
13644 		for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13645 			struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13646 
13647 			nicmd = icmd->ipkt_next;
13648 			if ((icmd->ipkt_restart != 0) &&
13649 			    (icmd->ipkt_restart >= fcp_watchdog_time)) {
13650 				/* packet has not timed out */
13651 				continue;
13652 			}
13653 
13654 			/* time for packet re-transport */
13655 			if (icmd == pptr->port_ipkt_list) {
13656 				pptr->port_ipkt_list = icmd->ipkt_next;
13657 				if (pptr->port_ipkt_list) {
13658 					pptr->port_ipkt_list->ipkt_prev =
13659 					    NULL;
13660 				}
13661 			} else {
13662 				icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13663 				if (icmd->ipkt_next) {
13664 					icmd->ipkt_next->ipkt_prev =
13665 					    icmd->ipkt_prev;
13666 				}
13667 			}
13668 			icmd->ipkt_next = NULL;
13669 			icmd->ipkt_prev = NULL;
13670 			mutex_exit(&pptr->port_mutex);
13671 
13672 			if (fcp_is_retryable(icmd)) {
13673 				fc_ulp_rscn_info_t *rscnp =
13674 				    (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13675 				    pkt_ulp_rscn_infop;
13676 
13677 				FCP_TRACE(fcp_logq, pptr->port_instbuf,
13678 				    fcp_trace, FCP_BUF_LEVEL_2, 0,
13679 				    "%x to D_ID=%x Retrying..",
13680 				    icmd->ipkt_opcode,
13681 				    icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13682 
13683 				/*
13684 				 * Update the RSCN count in the packet
13685 				 * before resending.
13686 				 */
13687 
13688 				if (rscnp != NULL) {
13689 					rscnp->ulp_rscn_count =
13690 					    fc_ulp_get_rscn_count(pptr->
13691 					    port_fp_handle);
13692 				}
13693 
13694 				mutex_enter(&pptr->port_mutex);
13695 				mutex_enter(&ptgt->tgt_mutex);
13696 				if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13697 					mutex_exit(&ptgt->tgt_mutex);
13698 					mutex_exit(&pptr->port_mutex);
13699 					switch (icmd->ipkt_opcode) {
13700 						int rval;
13701 					case LA_ELS_PLOGI:
13702 						if ((rval = fc_ulp_login(
13703 						    pptr->port_fp_handle,
13704 						    &icmd->ipkt_fpkt, 1)) ==
13705 						    FC_SUCCESS) {
13706 							mutex_enter(
13707 							    &pptr->port_mutex);
13708 							continue;
13709 						}
13710 						if (fcp_handle_ipkt_errors(
13711 						    pptr, ptgt, icmd, rval,
13712 						    "PLOGI") == DDI_SUCCESS) {
13713 							mutex_enter(
13714 							    &pptr->port_mutex);
13715 							continue;
13716 						}
13717 						break;
13718 
13719 					case LA_ELS_PRLI:
13720 						if ((rval = fc_ulp_issue_els(
13721 						    pptr->port_fp_handle,
13722 						    icmd->ipkt_fpkt)) ==
13723 						    FC_SUCCESS) {
13724 							mutex_enter(
13725 							    &pptr->port_mutex);
13726 							continue;
13727 						}
13728 						if (fcp_handle_ipkt_errors(
13729 						    pptr, ptgt, icmd, rval,
13730 						    "PRLI") == DDI_SUCCESS) {
13731 							mutex_enter(
13732 							    &pptr->port_mutex);
13733 							continue;
13734 						}
13735 						break;
13736 
13737 					default:
13738 						if ((rval = fcp_transport(
13739 						    pptr->port_fp_handle,
13740 						    icmd->ipkt_fpkt, 1)) ==
13741 						    FC_SUCCESS) {
13742 							mutex_enter(
13743 							    &pptr->port_mutex);
13744 							continue;
13745 						}
13746 						if (fcp_handle_ipkt_errors(
13747 						    pptr, ptgt, icmd, rval,
13748 						    "PRLI") == DDI_SUCCESS) {
13749 							mutex_enter(
13750 							    &pptr->port_mutex);
13751 							continue;
13752 						}
13753 						break;
13754 					}
13755 				} else {
13756 					mutex_exit(&ptgt->tgt_mutex);
13757 					mutex_exit(&pptr->port_mutex);
13758 				}
13759 			} else {
13760 				fcp_print_error(icmd->ipkt_fpkt);
13761 			}
13762 
13763 			(void) fcp_call_finish_init(pptr, ptgt,
13764 			    icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13765 			    icmd->ipkt_cause);
13766 			fcp_icmd_free(pptr, icmd);
13767 			mutex_enter(&pptr->port_mutex);
13768 		}
13769 
13770 		pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13771 		mutex_exit(&pptr->port_mutex);
13772 		mutex_enter(&fcp_global_mutex);
13773 
13774 	end_of_watchdog:
13775 		/*
13776 		 * Bail out early before getting into trouble
13777 		 */
13778 		if (save_port != fcp_port_head) {
13779 			break;
13780 		}
13781 	}
13782 
13783 	if (fcp_watchdog_init > 0) {
13784 		/* reschedule timeout to go again */
13785 		fcp_watchdog_id =
13786 		    timeout(fcp_watch, NULL, fcp_watchdog_tick);
13787 	}
13788 	mutex_exit(&fcp_global_mutex);
13789 }
13790 
13791 
13792 static void
fcp_check_reset_delay(struct fcp_port * pptr)13793 fcp_check_reset_delay(struct fcp_port *pptr)
13794 {
13795 	uint32_t		tgt_cnt;
13796 	int			level;
13797 	struct fcp_tgt	*ptgt;
13798 	struct fcp_lun	*plun;
13799 	struct fcp_reset_elem *cur = NULL;
13800 	struct fcp_reset_elem *next = NULL;
13801 	struct fcp_reset_elem *prev = NULL;
13802 
13803 	ASSERT(mutex_owned(&pptr->port_mutex));
13804 
13805 	next = pptr->port_reset_list;
13806 	while ((cur = next) != NULL) {
13807 		next = cur->next;
13808 
13809 		if (cur->timeout < fcp_watchdog_time) {
13810 			prev = cur;
13811 			continue;
13812 		}
13813 
13814 		ptgt = cur->tgt;
13815 		plun = cur->lun;
13816 		tgt_cnt = cur->tgt_cnt;
13817 
13818 		if (ptgt) {
13819 			level = RESET_TARGET;
13820 		} else {
13821 			ASSERT(plun != NULL);
13822 			level = RESET_LUN;
13823 			ptgt = plun->lun_tgt;
13824 		}
13825 		if (prev) {
13826 			prev->next = next;
13827 		} else {
13828 			/*
13829 			 * Because we drop port mutex while doing aborts for
13830 			 * packets, we can't rely on reset_list pointing to
13831 			 * our head
13832 			 */
13833 			if (cur == pptr->port_reset_list) {
13834 				pptr->port_reset_list = next;
13835 			} else {
13836 				struct fcp_reset_elem *which;
13837 
13838 				which = pptr->port_reset_list;
13839 				while (which && which->next != cur) {
13840 					which = which->next;
13841 				}
13842 				ASSERT(which != NULL);
13843 
13844 				which->next = next;
13845 				prev = which;
13846 			}
13847 		}
13848 
13849 		kmem_free(cur, sizeof (*cur));
13850 
13851 		if (tgt_cnt == ptgt->tgt_change_cnt) {
13852 			mutex_enter(&ptgt->tgt_mutex);
13853 			if (level == RESET_TARGET) {
13854 				fcp_update_tgt_state(ptgt,
13855 				    FCP_RESET, FCP_LUN_BUSY);
13856 			} else {
13857 				fcp_update_lun_state(plun,
13858 				    FCP_RESET, FCP_LUN_BUSY);
13859 			}
13860 			mutex_exit(&ptgt->tgt_mutex);
13861 
13862 			mutex_exit(&pptr->port_mutex);
13863 			fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13864 			mutex_enter(&pptr->port_mutex);
13865 		}
13866 	}
13867 }
13868 
13869 
13870 static void
fcp_abort_all(struct fcp_port * pptr,struct fcp_tgt * ttgt,struct fcp_lun * rlun,int tgt_cnt)13871 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13872     struct fcp_lun *rlun, int tgt_cnt)
13873 {
13874 	int			rval;
13875 	struct fcp_lun	*tlun, *nlun;
13876 	struct fcp_pkt	*pcmd = NULL, *ncmd = NULL,
13877 	    *cmd = NULL, *head = NULL,
13878 	    *tail = NULL;
13879 
13880 	mutex_enter(&pptr->port_pkt_mutex);
13881 	for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13882 		struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13883 		struct fcp_tgt *ptgt = plun->lun_tgt;
13884 
13885 		ncmd = cmd->cmd_next;
13886 
13887 		if (ptgt != ttgt && plun != rlun) {
13888 			pcmd = cmd;
13889 			continue;
13890 		}
13891 
13892 		if (pcmd != NULL) {
13893 			ASSERT(pptr->port_pkt_head != cmd);
13894 			pcmd->cmd_next = ncmd;
13895 		} else {
13896 			ASSERT(cmd == pptr->port_pkt_head);
13897 			pptr->port_pkt_head = ncmd;
13898 		}
13899 		if (pptr->port_pkt_tail == cmd) {
13900 			ASSERT(cmd->cmd_next == NULL);
13901 			pptr->port_pkt_tail = pcmd;
13902 			if (pcmd != NULL) {
13903 				pcmd->cmd_next = NULL;
13904 			}
13905 		}
13906 
13907 		if (head == NULL) {
13908 			head = tail = cmd;
13909 		} else {
13910 			ASSERT(tail != NULL);
13911 			tail->cmd_next = cmd;
13912 			tail = cmd;
13913 		}
13914 		cmd->cmd_next = NULL;
13915 	}
13916 	mutex_exit(&pptr->port_pkt_mutex);
13917 
13918 	for (cmd = head; cmd != NULL; cmd = ncmd) {
13919 		struct scsi_pkt *pkt = cmd->cmd_pkt;
13920 
13921 		ncmd = cmd->cmd_next;
13922 		ASSERT(pkt != NULL);
13923 
13924 		mutex_enter(&pptr->port_mutex);
13925 		if (ttgt->tgt_change_cnt == tgt_cnt) {
13926 			mutex_exit(&pptr->port_mutex);
13927 			cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13928 			pkt->pkt_reason = CMD_RESET;
13929 			pkt->pkt_statistics |= STAT_DEV_RESET;
13930 			cmd->cmd_state = FCP_PKT_IDLE;
13931 			fcp_post_callback(cmd);
13932 		} else {
13933 			mutex_exit(&pptr->port_mutex);
13934 		}
13935 	}
13936 
13937 	/*
13938 	 * If the FCA will return all the commands in its queue then our
13939 	 * work is easy, just return.
13940 	 */
13941 
13942 	if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13943 		return;
13944 	}
13945 
13946 	/*
13947 	 * For RESET_LUN get hold of target pointer
13948 	 */
13949 	if (ttgt == NULL) {
13950 		ASSERT(rlun != NULL);
13951 
13952 		ttgt = rlun->lun_tgt;
13953 
13954 		ASSERT(ttgt != NULL);
13955 	}
13956 
13957 	/*
13958 	 * There are some severe race conditions here.
13959 	 * While we are trying to abort the pkt, it might be completing
13960 	 * so mark it aborted and if the abort does not succeed then
13961 	 * handle it in the watch thread.
13962 	 */
13963 	mutex_enter(&ttgt->tgt_mutex);
13964 	nlun = ttgt->tgt_lun;
13965 	mutex_exit(&ttgt->tgt_mutex);
13966 	while ((tlun = nlun) != NULL) {
13967 		int restart = 0;
13968 		if (rlun && rlun != tlun) {
13969 			mutex_enter(&ttgt->tgt_mutex);
13970 			nlun = tlun->lun_next;
13971 			mutex_exit(&ttgt->tgt_mutex);
13972 			continue;
13973 		}
13974 		mutex_enter(&tlun->lun_mutex);
13975 		cmd = tlun->lun_pkt_head;
13976 		while (cmd != NULL) {
13977 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
13978 				struct scsi_pkt *pkt;
13979 
13980 				restart = 1;
13981 				cmd->cmd_state = FCP_PKT_ABORTING;
13982 				mutex_exit(&tlun->lun_mutex);
13983 				rval = fc_ulp_abort(pptr->port_fp_handle,
13984 				    cmd->cmd_fp_pkt, KM_SLEEP);
13985 				if (rval == FC_SUCCESS) {
13986 					pkt = cmd->cmd_pkt;
13987 					pkt->pkt_reason = CMD_RESET;
13988 					pkt->pkt_statistics |= STAT_DEV_RESET;
13989 					cmd->cmd_state = FCP_PKT_IDLE;
13990 					fcp_post_callback(cmd);
13991 				} else {
13992 					caddr_t msg;
13993 
13994 					(void) fc_ulp_error(rval, &msg);
13995 
13996 					/*
13997 					 * This part is tricky. The abort
13998 					 * failed and now the command could
13999 					 * be completing.  The cmd_state ==
14000 					 * FCP_PKT_ABORTING should save
14001 					 * us in fcp_cmd_callback. If we
14002 					 * are already aborting ignore the
14003 					 * command in fcp_cmd_callback.
14004 					 * Here we leave this packet for 20
14005 					 * sec to be aborted in the
14006 					 * fcp_watch thread.
14007 					 */
14008 					fcp_log(CE_WARN, pptr->port_dip,
14009 					    "!Abort failed after reset %s",
14010 					    msg);
14011 
14012 					cmd->cmd_timeout =
14013 					    fcp_watchdog_time +
14014 					    cmd->cmd_pkt->pkt_time +
14015 					    FCP_FAILED_DELAY;
14016 
14017 					cmd->cmd_fp_pkt->pkt_timeout =
14018 					    FCP_INVALID_TIMEOUT;
14019 					/*
14020 					 * This is a hack, cmd is put in the
14021 					 * overflow queue so that it can be
14022 					 * timed out finally
14023 					 */
14024 					cmd->cmd_flags |= CFLAG_IN_QUEUE;
14025 
14026 					mutex_enter(&pptr->port_pkt_mutex);
14027 					if (pptr->port_pkt_head) {
14028 						ASSERT(pptr->port_pkt_tail
14029 						    != NULL);
14030 						pptr->port_pkt_tail->cmd_next
14031 						    = cmd;
14032 						pptr->port_pkt_tail = cmd;
14033 					} else {
14034 						ASSERT(pptr->port_pkt_tail
14035 						    == NULL);
14036 						pptr->port_pkt_head =
14037 						    pptr->port_pkt_tail
14038 						    = cmd;
14039 					}
14040 					cmd->cmd_next = NULL;
14041 					mutex_exit(&pptr->port_pkt_mutex);
14042 				}
14043 				mutex_enter(&tlun->lun_mutex);
14044 				cmd = tlun->lun_pkt_head;
14045 			} else {
14046 				cmd = cmd->cmd_forw;
14047 			}
14048 		}
14049 		mutex_exit(&tlun->lun_mutex);
14050 
14051 		mutex_enter(&ttgt->tgt_mutex);
14052 		restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14053 		mutex_exit(&ttgt->tgt_mutex);
14054 
14055 		mutex_enter(&pptr->port_mutex);
14056 		if (tgt_cnt != ttgt->tgt_change_cnt) {
14057 			mutex_exit(&pptr->port_mutex);
14058 			return;
14059 		} else {
14060 			mutex_exit(&pptr->port_mutex);
14061 		}
14062 	}
14063 }
14064 
14065 
14066 /*
14067  * unlink the soft state, returning the soft state found (if any)
14068  *
14069  * acquires and releases the global mutex
14070  */
14071 struct fcp_port *
fcp_soft_state_unlink(struct fcp_port * pptr)14072 fcp_soft_state_unlink(struct fcp_port *pptr)
14073 {
14074 	struct fcp_port	*hptr;		/* ptr index */
14075 	struct fcp_port	*tptr;		/* prev hptr */
14076 
14077 	mutex_enter(&fcp_global_mutex);
14078 	for (hptr = fcp_port_head, tptr = NULL;
14079 	    hptr != NULL;
14080 	    tptr = hptr, hptr = hptr->port_next) {
14081 		if (hptr == pptr) {
14082 			/* we found a match -- remove this item */
14083 			if (tptr == NULL) {
14084 				/* we're at the head of the list */
14085 				fcp_port_head = hptr->port_next;
14086 			} else {
14087 				tptr->port_next = hptr->port_next;
14088 			}
14089 			break;			/* success */
14090 		}
14091 	}
14092 	if (fcp_port_head == NULL) {
14093 		fcp_cleanup_blacklist(&fcp_lun_blacklist);
14094 	}
14095 	mutex_exit(&fcp_global_mutex);
14096 	return (hptr);
14097 }
14098 
14099 
14100 /*
14101  * called by fcp_scsi_hba_tgt_init to find a LUN given a
14102  * WWN and a LUN number
14103  */
14104 /* ARGSUSED */
14105 static struct fcp_lun *
fcp_lookup_lun(struct fcp_port * pptr,uchar_t * wwn,uint16_t lun)14106 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14107 {
14108 	int hash;
14109 	struct fcp_tgt *ptgt;
14110 	struct fcp_lun *plun;
14111 
14112 	ASSERT(mutex_owned(&pptr->port_mutex));
14113 
14114 	hash = FCP_HASH(wwn);
14115 	for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14116 	    ptgt = ptgt->tgt_next) {
14117 		if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14118 		    sizeof (ptgt->tgt_port_wwn)) == 0) {
14119 			mutex_enter(&ptgt->tgt_mutex);
14120 			for (plun = ptgt->tgt_lun;
14121 			    plun != NULL;
14122 			    plun = plun->lun_next) {
14123 				if (plun->lun_num == lun) {
14124 					mutex_exit(&ptgt->tgt_mutex);
14125 					return (plun);
14126 				}
14127 			}
14128 			mutex_exit(&ptgt->tgt_mutex);
14129 			return (NULL);
14130 		}
14131 	}
14132 	return (NULL);
14133 }
14134 
14135 /*
14136  *     Function: fcp_prepare_pkt
14137  *
14138  *  Description: This function prepares the SCSI cmd pkt, passed by the caller,
14139  *		 for fcp_start(). It binds the data or partially maps it.
14140  *		 Builds the FCP header and starts the initialization of the
14141  *		 Fibre Channel header.
14142  *
14143  *     Argument: *pptr		FCP port.
14144  *		 *cmd		FCP packet.
14145  *		 *plun		LUN the command will be sent to.
14146  *
14147  *	Context: User, Kernel and Interrupt context.
14148  */
14149 static void
fcp_prepare_pkt(struct fcp_port * pptr,struct fcp_pkt * cmd,struct fcp_lun * plun)14150 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14151     struct fcp_lun *plun)
14152 {
14153 	fc_packet_t		*fpkt = cmd->cmd_fp_pkt;
14154 	struct fcp_tgt		*ptgt = plun->lun_tgt;
14155 	struct fcp_cmd		*fcmd = &cmd->cmd_fcp_cmd;
14156 
14157 	ASSERT(cmd->cmd_pkt->pkt_comp ||
14158 	    (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14159 
14160 	if (cmd->cmd_pkt->pkt_numcookies) {
14161 		if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14162 			fcmd->fcp_cntl.cntl_read_data = 1;
14163 			fcmd->fcp_cntl.cntl_write_data = 0;
14164 			fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14165 		} else {
14166 			fcmd->fcp_cntl.cntl_read_data = 0;
14167 			fcmd->fcp_cntl.cntl_write_data = 1;
14168 			fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14169 		}
14170 
14171 		fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14172 
14173 		fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14174 		ASSERT(fpkt->pkt_data_cookie_cnt <=
14175 		    pptr->port_data_dma_attr.dma_attr_sgllen);
14176 
14177 		cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14178 
14179 		/* FCA needs pkt_datalen to be set */
14180 		fpkt->pkt_datalen = cmd->cmd_dmacount;
14181 		fcmd->fcp_data_len = cmd->cmd_dmacount;
14182 	} else {
14183 		fcmd->fcp_cntl.cntl_read_data = 0;
14184 		fcmd->fcp_cntl.cntl_write_data = 0;
14185 		fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14186 		fpkt->pkt_datalen = 0;
14187 		fcmd->fcp_data_len = 0;
14188 	}
14189 
14190 	/* set up the Tagged Queuing type */
14191 	if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14192 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14193 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14194 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14195 	} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14196 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14197 	} else {
14198 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14199 	}
14200 
14201 	fcmd->fcp_ent_addr = plun->lun_addr;
14202 
14203 	if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14204 		FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14205 		    fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14206 	} else {
14207 		ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14208 	}
14209 
14210 	cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14211 	cmd->cmd_pkt->pkt_state = 0;
14212 	cmd->cmd_pkt->pkt_statistics = 0;
14213 	cmd->cmd_pkt->pkt_resid = 0;
14214 
14215 	cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14216 
14217 	if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14218 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14219 		fpkt->pkt_comp = NULL;
14220 	} else {
14221 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14222 		if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14223 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14224 		}
14225 		fpkt->pkt_comp = fcp_cmd_callback;
14226 	}
14227 
14228 	mutex_enter(&pptr->port_mutex);
14229 	if (pptr->port_state & FCP_STATE_SUSPENDED) {
14230 		fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14231 	}
14232 	mutex_exit(&pptr->port_mutex);
14233 
14234 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14235 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14236 
14237 	/*
14238 	 * Save a few kernel cycles here
14239 	 */
14240 #ifndef	__lock_lint
14241 	fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14242 #endif /* __lock_lint */
14243 }
14244 
14245 static void
fcp_post_callback(struct fcp_pkt * cmd)14246 fcp_post_callback(struct fcp_pkt *cmd)
14247 {
14248 	scsi_hba_pkt_comp(cmd->cmd_pkt);
14249 }
14250 
14251 
14252 /*
14253  * called to do polled I/O by fcp_start()
14254  *
14255  * return a transport status value, i.e. TRAN_ACCECPT for success
14256  */
14257 static int
fcp_dopoll(struct fcp_port * pptr,struct fcp_pkt * cmd)14258 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14259 {
14260 	int	rval;
14261 
14262 #ifdef	DEBUG
14263 	mutex_enter(&pptr->port_pkt_mutex);
14264 	pptr->port_npkts++;
14265 	mutex_exit(&pptr->port_pkt_mutex);
14266 #endif /* DEBUG */
14267 
14268 	if (cmd->cmd_fp_pkt->pkt_timeout) {
14269 		cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14270 	} else {
14271 		cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14272 	}
14273 
14274 	ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14275 
14276 	cmd->cmd_state = FCP_PKT_ISSUED;
14277 
14278 	rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14279 
14280 #ifdef	DEBUG
14281 	mutex_enter(&pptr->port_pkt_mutex);
14282 	pptr->port_npkts--;
14283 	mutex_exit(&pptr->port_pkt_mutex);
14284 #endif /* DEBUG */
14285 
14286 	cmd->cmd_state = FCP_PKT_IDLE;
14287 
14288 	switch (rval) {
14289 	case FC_SUCCESS:
14290 		if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14291 			fcp_complete_pkt(cmd->cmd_fp_pkt);
14292 			rval = TRAN_ACCEPT;
14293 		} else {
14294 			rval = TRAN_FATAL_ERROR;
14295 		}
14296 		break;
14297 
14298 	case FC_TRAN_BUSY:
14299 		rval = TRAN_BUSY;
14300 		cmd->cmd_pkt->pkt_resid = 0;
14301 		break;
14302 
14303 	case FC_BADPACKET:
14304 		rval = TRAN_BADPKT;
14305 		break;
14306 
14307 	default:
14308 		rval = TRAN_FATAL_ERROR;
14309 		break;
14310 	}
14311 
14312 	return (rval);
14313 }
14314 
14315 
14316 /*
14317  * called by some of the following transport-called routines to convert
14318  * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14319  */
14320 static struct fcp_port *
fcp_dip2port(dev_info_t * dip)14321 fcp_dip2port(dev_info_t *dip)
14322 {
14323 	int	instance;
14324 
14325 	instance = ddi_get_instance(dip);
14326 	return (ddi_get_soft_state(fcp_softstate, instance));
14327 }
14328 
14329 
14330 /*
14331  * called internally to return a LUN given a dip
14332  */
14333 struct fcp_lun *
fcp_get_lun_from_cip(struct fcp_port * pptr,child_info_t * cip)14334 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14335 {
14336 	struct fcp_tgt *ptgt;
14337 	struct fcp_lun *plun;
14338 	int i;
14339 
14340 
14341 	ASSERT(mutex_owned(&pptr->port_mutex));
14342 
14343 	for (i = 0; i < FCP_NUM_HASH; i++) {
14344 		for (ptgt = pptr->port_tgt_hash_table[i];
14345 		    ptgt != NULL;
14346 		    ptgt = ptgt->tgt_next) {
14347 			mutex_enter(&ptgt->tgt_mutex);
14348 			for (plun = ptgt->tgt_lun; plun != NULL;
14349 			    plun = plun->lun_next) {
14350 				mutex_enter(&plun->lun_mutex);
14351 				if (plun->lun_cip == cip) {
14352 					mutex_exit(&plun->lun_mutex);
14353 					mutex_exit(&ptgt->tgt_mutex);
14354 					return (plun); /* match found */
14355 				}
14356 				mutex_exit(&plun->lun_mutex);
14357 			}
14358 			mutex_exit(&ptgt->tgt_mutex);
14359 		}
14360 	}
14361 	return (NULL);				/* no LUN found */
14362 }
14363 
14364 /*
14365  * pass an element to the hotplug list, kick the hotplug thread
14366  * and wait for the element to get processed by the hotplug thread.
14367  * on return the element is freed.
14368  *
14369  * return zero success and non-zero on failure
14370  *
14371  * acquires/releases the target mutex
14372  *
14373  */
14374 static int
fcp_pass_to_hp_and_wait(struct fcp_port * pptr,struct fcp_lun * plun,child_info_t * cip,int what,int link_cnt,int tgt_cnt,int flags)14375 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14376     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14377 {
14378 	struct fcp_hp_elem	*elem;
14379 	int			rval;
14380 
14381 	mutex_enter(&plun->lun_tgt->tgt_mutex);
14382 	if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14383 	    what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14384 		mutex_exit(&plun->lun_tgt->tgt_mutex);
14385 		fcp_log(CE_CONT, pptr->port_dip,
14386 		    "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14387 		    what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14388 		return (NDI_FAILURE);
14389 	}
14390 	mutex_exit(&plun->lun_tgt->tgt_mutex);
14391 	mutex_enter(&elem->mutex);
14392 	if (elem->wait) {
14393 		while (elem->wait) {
14394 			cv_wait(&elem->cv, &elem->mutex);
14395 		}
14396 	}
14397 	rval = (elem->result);
14398 	mutex_exit(&elem->mutex);
14399 	mutex_destroy(&elem->mutex);
14400 	cv_destroy(&elem->cv);
14401 	kmem_free(elem, sizeof (struct fcp_hp_elem));
14402 	return (rval);
14403 }
14404 
14405 /*
14406  * pass an element to the hotplug list, and then
14407  * kick the hotplug thread
14408  *
14409  * return Boolean success, i.e. non-zero if all goes well, else zero on error
14410  *
14411  * acquires/releases the hotplug mutex
14412  *
14413  * called with the target mutex owned
14414  *
14415  * memory acquired in NOSLEEP mode
14416  * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14417  *	 for the hp daemon to process the request and is responsible for
14418  *	 freeing the element
14419  */
14420 static struct fcp_hp_elem *
fcp_pass_to_hp(struct fcp_port * pptr,struct fcp_lun * plun,child_info_t * cip,int what,int link_cnt,int tgt_cnt,int flags,int wait)14421 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14422     child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14423 {
14424 	struct fcp_hp_elem	*elem;
14425 	dev_info_t *pdip;
14426 
14427 	ASSERT(pptr != NULL);
14428 	ASSERT(plun != NULL);
14429 	ASSERT(plun->lun_tgt != NULL);
14430 	ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14431 
14432 	/* create space for a hotplug element */
14433 	if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14434 	    == NULL) {
14435 		fcp_log(CE_WARN, NULL,
14436 		    "!can't allocate memory for hotplug element");
14437 		return (NULL);
14438 	}
14439 
14440 	/* fill in hotplug element */
14441 	elem->port = pptr;
14442 	elem->lun = plun;
14443 	elem->cip = cip;
14444 	elem->old_lun_mpxio = plun->lun_mpxio;
14445 	elem->what = what;
14446 	elem->flags = flags;
14447 	elem->link_cnt = link_cnt;
14448 	elem->tgt_cnt = tgt_cnt;
14449 	elem->wait = wait;
14450 	mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14451 	cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14452 
14453 	/* schedule the hotplug task */
14454 	pdip = pptr->port_dip;
14455 	mutex_enter(&plun->lun_mutex);
14456 	if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14457 		plun->lun_event_count++;
14458 		elem->event_cnt = plun->lun_event_count;
14459 	}
14460 	mutex_exit(&plun->lun_mutex);
14461 	if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14462 	    (void *)elem, KM_NOSLEEP) == TASKQID_INVALID) {
14463 		mutex_enter(&plun->lun_mutex);
14464 		if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14465 			plun->lun_event_count--;
14466 		}
14467 		mutex_exit(&plun->lun_mutex);
14468 		kmem_free(elem, sizeof (*elem));
14469 		return (0);
14470 	}
14471 
14472 	return (elem);
14473 }
14474 
14475 
14476 static void
fcp_retransport_cmd(struct fcp_port * pptr,struct fcp_pkt * cmd)14477 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14478 {
14479 	int			rval;
14480 	struct scsi_address	*ap;
14481 	struct fcp_lun	*plun;
14482 	struct fcp_tgt	*ptgt;
14483 	fc_packet_t	*fpkt;
14484 
14485 	ap = &cmd->cmd_pkt->pkt_address;
14486 	plun = ADDR2LUN(ap);
14487 	ptgt = plun->lun_tgt;
14488 
14489 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14490 
14491 	cmd->cmd_state = FCP_PKT_IDLE;
14492 
14493 	mutex_enter(&pptr->port_mutex);
14494 	mutex_enter(&ptgt->tgt_mutex);
14495 	if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14496 	    (!(pptr->port_state & FCP_STATE_ONLINING))) {
14497 		fc_ulp_rscn_info_t *rscnp;
14498 
14499 		cmd->cmd_state = FCP_PKT_ISSUED;
14500 
14501 		/*
14502 		 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14503 		 * originally NULL, hence we try to set it to the pd pointed
14504 		 * to by the SCSI device we're trying to get to.
14505 		 */
14506 
14507 		fpkt = cmd->cmd_fp_pkt;
14508 		if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14509 			fpkt->pkt_pd = ptgt->tgt_pd_handle;
14510 			/*
14511 			 * We need to notify the transport that we now have a
14512 			 * reference to the remote port handle.
14513 			 */
14514 			fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14515 		}
14516 
14517 		mutex_exit(&ptgt->tgt_mutex);
14518 		mutex_exit(&pptr->port_mutex);
14519 
14520 		ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14521 
14522 		/* prepare the packet */
14523 
14524 		fcp_prepare_pkt(pptr, cmd, plun);
14525 
14526 		rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14527 		    pkt_ulp_rscn_infop;
14528 
14529 		cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14530 		    fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14531 
14532 		if (rscnp != NULL) {
14533 			rscnp->ulp_rscn_count =
14534 			    fc_ulp_get_rscn_count(pptr->
14535 			    port_fp_handle);
14536 		}
14537 
14538 		rval = fcp_transport(pptr->port_fp_handle,
14539 		    cmd->cmd_fp_pkt, 0);
14540 
14541 		if (rval == FC_SUCCESS) {
14542 			return;
14543 		}
14544 		cmd->cmd_state &= ~FCP_PKT_ISSUED;
14545 	} else {
14546 		mutex_exit(&ptgt->tgt_mutex);
14547 		mutex_exit(&pptr->port_mutex);
14548 	}
14549 
14550 	fcp_queue_pkt(pptr, cmd);
14551 }
14552 
14553 
14554 static void
fcp_fail_cmd(struct fcp_pkt * cmd,uchar_t reason,uint_t statistics)14555 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14556 {
14557 	ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14558 
14559 	cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14560 	cmd->cmd_state = FCP_PKT_IDLE;
14561 
14562 	cmd->cmd_pkt->pkt_reason = reason;
14563 	cmd->cmd_pkt->pkt_state = 0;
14564 	cmd->cmd_pkt->pkt_statistics = statistics;
14565 
14566 	fcp_post_callback(cmd);
14567 }
14568 
14569 /*
14570  *     Function: fcp_queue_pkt
14571  *
14572  *  Description: This function queues the packet passed by the caller into
14573  *		 the list of packets of the FCP port.
14574  *
14575  *     Argument: *pptr		FCP port.
14576  *		 *cmd		FCP packet to queue.
14577  *
14578  * Return Value: None
14579  *
14580  *	Context: User, Kernel and Interrupt context.
14581  */
14582 static void
fcp_queue_pkt(struct fcp_port * pptr,struct fcp_pkt * cmd)14583 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14584 {
14585 	ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == 0);
14586 
14587 	mutex_enter(&pptr->port_pkt_mutex);
14588 	cmd->cmd_flags |= CFLAG_IN_QUEUE;
14589 	ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14590 	cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14591 
14592 	/*
14593 	 * zero pkt_time means hang around for ever
14594 	 */
14595 	if (cmd->cmd_pkt->pkt_time) {
14596 		if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14597 			cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14598 		} else {
14599 			/*
14600 			 * Indicate the watch thread to fail the
14601 			 * command by setting it to highest value
14602 			 */
14603 			cmd->cmd_timeout = fcp_watchdog_time;
14604 			cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14605 		}
14606 	}
14607 
14608 	if (pptr->port_pkt_head) {
14609 		ASSERT(pptr->port_pkt_tail != NULL);
14610 
14611 		pptr->port_pkt_tail->cmd_next = cmd;
14612 		pptr->port_pkt_tail = cmd;
14613 	} else {
14614 		ASSERT(pptr->port_pkt_tail == NULL);
14615 
14616 		pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14617 	}
14618 	cmd->cmd_next = NULL;
14619 	mutex_exit(&pptr->port_pkt_mutex);
14620 }
14621 
14622 /*
14623  *     Function: fcp_update_targets
14624  *
14625  *  Description: This function applies the specified change of state to all
14626  *		 the targets listed.  The operation applied is 'set'.
14627  *
14628  *     Argument: *pptr		FCP port.
14629  *		 *dev_list	Array of fc_portmap_t structures.
14630  *		 count		Length of dev_list.
14631  *		 state		State bits to update.
14632  *		 cause		Reason for the update.
14633  *
14634  * Return Value: None
14635  *
14636  *	Context: User, Kernel and Interrupt context.
14637  *		 The mutex pptr->port_mutex must be held.
14638  */
14639 static void
fcp_update_targets(struct fcp_port * pptr,fc_portmap_t * dev_list,uint32_t count,uint32_t state,int cause)14640 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14641     uint32_t count, uint32_t state, int cause)
14642 {
14643 	fc_portmap_t		*map_entry;
14644 	struct fcp_tgt	*ptgt;
14645 
14646 	ASSERT(MUTEX_HELD(&pptr->port_mutex));
14647 
14648 	while (count--) {
14649 		map_entry = &(dev_list[count]);
14650 		ptgt = fcp_lookup_target(pptr,
14651 		    (uchar_t *)&(map_entry->map_pwwn));
14652 		if (ptgt == NULL) {
14653 			continue;
14654 		}
14655 
14656 		mutex_enter(&ptgt->tgt_mutex);
14657 		ptgt->tgt_trace = 0;
14658 		ptgt->tgt_change_cnt++;
14659 		ptgt->tgt_statec_cause = cause;
14660 		ptgt->tgt_tmp_cnt = 1;
14661 		fcp_update_tgt_state(ptgt, FCP_SET, state);
14662 		mutex_exit(&ptgt->tgt_mutex);
14663 	}
14664 }
14665 
14666 static int
fcp_call_finish_init(struct fcp_port * pptr,struct fcp_tgt * ptgt,int lcount,int tcount,int cause)14667 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14668     int lcount, int tcount, int cause)
14669 {
14670 	int rval;
14671 
14672 	mutex_enter(&pptr->port_mutex);
14673 	rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14674 	mutex_exit(&pptr->port_mutex);
14675 
14676 	return (rval);
14677 }
14678 
14679 
14680 static int
fcp_call_finish_init_held(struct fcp_port * pptr,struct fcp_tgt * ptgt,int lcount,int tcount,int cause)14681 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14682     int lcount, int tcount, int cause)
14683 {
14684 	int	finish_init = 0;
14685 	int	finish_tgt = 0;
14686 	int	do_finish_init = 0;
14687 	int	rval = FCP_NO_CHANGE;
14688 
14689 	if (cause == FCP_CAUSE_LINK_CHANGE ||
14690 	    cause == FCP_CAUSE_LINK_DOWN) {
14691 		do_finish_init = 1;
14692 	}
14693 
14694 	if (ptgt != NULL) {
14695 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14696 		    FCP_BUF_LEVEL_2, 0,
14697 		    "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14698 		    " cause = %d, d_id = 0x%x, tgt_done = %d",
14699 		    pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14700 		    pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14701 		    ptgt->tgt_d_id, ptgt->tgt_done);
14702 
14703 		mutex_enter(&ptgt->tgt_mutex);
14704 
14705 		if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14706 			rval = FCP_DEV_CHANGE;
14707 			if (do_finish_init && ptgt->tgt_done == 0) {
14708 				ptgt->tgt_done++;
14709 				finish_init = 1;
14710 			}
14711 		} else {
14712 			if (--ptgt->tgt_tmp_cnt <= 0) {
14713 				ptgt->tgt_tmp_cnt = 0;
14714 				finish_tgt = 1;
14715 
14716 				if (do_finish_init) {
14717 					finish_init = 1;
14718 				}
14719 			}
14720 		}
14721 		mutex_exit(&ptgt->tgt_mutex);
14722 	} else {
14723 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14724 		    FCP_BUF_LEVEL_2, 0,
14725 		    "Call Finish Init for NO target");
14726 
14727 		if (do_finish_init) {
14728 			finish_init = 1;
14729 		}
14730 	}
14731 
14732 	if (finish_tgt) {
14733 		ASSERT(ptgt != NULL);
14734 
14735 		mutex_enter(&ptgt->tgt_mutex);
14736 #ifdef	DEBUG
14737 		bzero(ptgt->tgt_tmp_cnt_stack,
14738 		    sizeof (ptgt->tgt_tmp_cnt_stack));
14739 
14740 		ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14741 		    FCP_STACK_DEPTH);
14742 #endif /* DEBUG */
14743 		mutex_exit(&ptgt->tgt_mutex);
14744 
14745 		(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14746 	}
14747 
14748 	if (finish_init && lcount == pptr->port_link_cnt) {
14749 		ASSERT(pptr->port_tmp_cnt > 0);
14750 		if (--pptr->port_tmp_cnt == 0) {
14751 			fcp_finish_init(pptr);
14752 		}
14753 	} else if (lcount != pptr->port_link_cnt) {
14754 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
14755 		    fcp_trace, FCP_BUF_LEVEL_2, 0,
14756 		    "fcp_call_finish_init_held,1: state change occured"
14757 		    " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14758 	}
14759 
14760 	return (rval);
14761 }
14762 
14763 static void
fcp_reconfigure_luns(void * tgt_handle)14764 fcp_reconfigure_luns(void * tgt_handle)
14765 {
14766 	uint32_t		dev_cnt;
14767 	fc_portmap_t		*devlist;
14768 	struct fcp_tgt	*ptgt = (struct fcp_tgt *)tgt_handle;
14769 	struct fcp_port		*pptr = ptgt->tgt_port;
14770 
14771 	/*
14772 	 * If the timer that fires this off got canceled too late, the
14773 	 * target could have been destroyed.
14774 	 */
14775 
14776 	if (ptgt->tgt_tid == NULL) {
14777 		return;
14778 	}
14779 
14780 	devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14781 	if (devlist == NULL) {
14782 		fcp_log(CE_WARN, pptr->port_dip,
14783 		    "!fcp%d: failed to allocate for portmap",
14784 		    pptr->port_instance);
14785 		return;
14786 	}
14787 
14788 	dev_cnt = 1;
14789 	devlist->map_pd = ptgt->tgt_pd_handle;
14790 	devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14791 	devlist->map_did.port_id = ptgt->tgt_d_id;
14792 
14793 	bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14794 	bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14795 
14796 	devlist->map_state = PORT_DEVICE_LOGGED_IN;
14797 	devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14798 	devlist->map_flags = 0;
14799 
14800 	fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14801 	    pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14802 
14803 	/*
14804 	 * Clear the tgt_tid after no more references to
14805 	 * the fcp_tgt
14806 	 */
14807 	mutex_enter(&ptgt->tgt_mutex);
14808 	ptgt->tgt_tid = NULL;
14809 	mutex_exit(&ptgt->tgt_mutex);
14810 
14811 	kmem_free(devlist, sizeof (*devlist));
14812 }
14813 
14814 
14815 static void
fcp_free_targets(struct fcp_port * pptr)14816 fcp_free_targets(struct fcp_port *pptr)
14817 {
14818 	int			i;
14819 	struct fcp_tgt	*ptgt;
14820 
14821 	mutex_enter(&pptr->port_mutex);
14822 	for (i = 0; i < FCP_NUM_HASH; i++) {
14823 		ptgt = pptr->port_tgt_hash_table[i];
14824 		while (ptgt != NULL) {
14825 			struct fcp_tgt *next_tgt = ptgt->tgt_next;
14826 
14827 			fcp_free_target(ptgt);
14828 			ptgt = next_tgt;
14829 		}
14830 	}
14831 	mutex_exit(&pptr->port_mutex);
14832 }
14833 
14834 
14835 static void
fcp_free_target(struct fcp_tgt * ptgt)14836 fcp_free_target(struct fcp_tgt *ptgt)
14837 {
14838 	struct fcp_lun	*plun;
14839 	timeout_id_t		tid;
14840 
14841 	mutex_enter(&ptgt->tgt_mutex);
14842 	tid = ptgt->tgt_tid;
14843 
14844 	/*
14845 	 * Cancel any pending timeouts for this target.
14846 	 */
14847 
14848 	if (tid != NULL) {
14849 		/*
14850 		 * Set tgt_tid to NULL first to avoid a race in the callback.
14851 		 * If tgt_tid is NULL, the callback will simply return.
14852 		 */
14853 		ptgt->tgt_tid = NULL;
14854 		mutex_exit(&ptgt->tgt_mutex);
14855 		(void) untimeout(tid);
14856 		mutex_enter(&ptgt->tgt_mutex);
14857 	}
14858 
14859 	plun = ptgt->tgt_lun;
14860 	while (plun != NULL) {
14861 		struct fcp_lun *next_lun = plun->lun_next;
14862 
14863 		fcp_dealloc_lun(plun);
14864 		plun = next_lun;
14865 	}
14866 
14867 	mutex_exit(&ptgt->tgt_mutex);
14868 	fcp_dealloc_tgt(ptgt);
14869 }
14870 
14871 /*
14872  *     Function: fcp_is_retryable
14873  *
14874  *  Description: Indicates if the internal packet is retryable.
14875  *
14876  *     Argument: *icmd		FCP internal packet.
14877  *
14878  * Return Value: 0	Not retryable
14879  *		 1	Retryable
14880  *
14881  *	Context: User, Kernel and Interrupt context
14882  */
14883 static int
fcp_is_retryable(struct fcp_ipkt * icmd)14884 fcp_is_retryable(struct fcp_ipkt *icmd)
14885 {
14886 	if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14887 	    FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14888 		return (0);
14889 	}
14890 
14891 	return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14892 	    icmd->ipkt_port->port_deadline) ? 1 : 0);
14893 }
14894 
14895 /*
14896  *     Function: fcp_create_on_demand
14897  *
14898  *     Argument: *pptr		FCP port.
14899  *		 *pwwn		Port WWN.
14900  *
14901  * Return Value: 0	Success
14902  *		 EIO
14903  *		 ENOMEM
14904  *		 EBUSY
14905  *		 EINVAL
14906  *
14907  *	Context: User and Kernel context
14908  */
14909 static int
fcp_create_on_demand(struct fcp_port * pptr,uchar_t * pwwn)14910 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14911 {
14912 	int			wait_ms;
14913 	int			tcount;
14914 	int			lcount;
14915 	int			ret;
14916 	int			error;
14917 	int			rval = EIO;
14918 	int			ntries;
14919 	fc_portmap_t		*devlist;
14920 	opaque_t		pd;
14921 	struct fcp_lun		*plun;
14922 	struct fcp_tgt		*ptgt;
14923 	int			old_manual = 0;
14924 
14925 	/* Allocates the fc_portmap_t structure. */
14926 	devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14927 
14928 	/*
14929 	 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14930 	 * in the commented statement below:
14931 	 *
14932 	 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14933 	 *
14934 	 * Below, the deadline for the discovery process is set.
14935 	 */
14936 	mutex_enter(&pptr->port_mutex);
14937 	pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14938 	mutex_exit(&pptr->port_mutex);
14939 
14940 	/*
14941 	 * We try to find the remote port based on the WWN provided by the
14942 	 * caller.  We actually ask fp/fctl if it has it.
14943 	 */
14944 	pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14945 	    (la_wwn_t *)pwwn, &error, 1);
14946 
14947 	if (pd == NULL) {
14948 		kmem_free(devlist, sizeof (*devlist));
14949 		return (rval);
14950 	}
14951 
14952 	/*
14953 	 * The remote port was found.  We ask fp/fctl to update our
14954 	 * fc_portmap_t structure.
14955 	 */
14956 	ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14957 	    (la_wwn_t *)pwwn, devlist);
14958 	if (ret != FC_SUCCESS) {
14959 		kmem_free(devlist, sizeof (*devlist));
14960 		return (rval);
14961 	}
14962 
14963 	/*
14964 	 * The map flag field is set to indicates that the creation is being
14965 	 * done at the user request (Ioclt probably luxadm or cfgadm).
14966 	 */
14967 	devlist->map_type = PORT_DEVICE_USER_CREATE;
14968 
14969 	mutex_enter(&pptr->port_mutex);
14970 
14971 	/*
14972 	 * We check to see if fcp already has a target that describes the
14973 	 * device being created.  If not it is created.
14974 	 */
14975 	ptgt = fcp_lookup_target(pptr, pwwn);
14976 	if (ptgt == NULL) {
14977 		lcount = pptr->port_link_cnt;
14978 		mutex_exit(&pptr->port_mutex);
14979 
14980 		ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14981 		if (ptgt == NULL) {
14982 			fcp_log(CE_WARN, pptr->port_dip,
14983 			    "!FC target allocation failed");
14984 			return (ENOMEM);
14985 		}
14986 
14987 		mutex_enter(&pptr->port_mutex);
14988 	}
14989 
14990 	mutex_enter(&ptgt->tgt_mutex);
14991 	ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14992 	ptgt->tgt_tmp_cnt = 1;
14993 	ptgt->tgt_device_created = 0;
14994 	/*
14995 	 * If fabric and auto config is set but the target was
14996 	 * manually unconfigured then reset to the manual_config_only to
14997 	 * 0 so the device will get configured.
14998 	 */
14999 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15000 	    fcp_enable_auto_configuration &&
15001 	    ptgt->tgt_manual_config_only == 1) {
15002 		old_manual = 1;
15003 		ptgt->tgt_manual_config_only = 0;
15004 	}
15005 	mutex_exit(&ptgt->tgt_mutex);
15006 
15007 	fcp_update_targets(pptr, devlist, 1,
15008 	    FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15009 
15010 	lcount = pptr->port_link_cnt;
15011 	tcount = ptgt->tgt_change_cnt;
15012 
15013 	if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15014 	    tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15015 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15016 		    fcp_enable_auto_configuration && old_manual) {
15017 			mutex_enter(&ptgt->tgt_mutex);
15018 			ptgt->tgt_manual_config_only = 1;
15019 			mutex_exit(&ptgt->tgt_mutex);
15020 		}
15021 
15022 		if (pptr->port_link_cnt != lcount ||
15023 		    ptgt->tgt_change_cnt != tcount) {
15024 			rval = EBUSY;
15025 		}
15026 		mutex_exit(&pptr->port_mutex);
15027 
15028 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15029 		    FCP_BUF_LEVEL_3, 0,
15030 		    "fcp_create_on_demand: mapflags ptgt=%x, "
15031 		    "lcount=%x::port_link_cnt=%x, "
15032 		    "tcount=%x: tgt_change_cnt=%x, rval=%x",
15033 		    ptgt, lcount, pptr->port_link_cnt,
15034 		    tcount, ptgt->tgt_change_cnt, rval);
15035 		return (rval);
15036 	}
15037 
15038 	/*
15039 	 * Due to lack of synchronization mechanisms, we perform
15040 	 * periodic monitoring of our request; Because requests
15041 	 * get dropped when another one supercedes (either because
15042 	 * of a link change or a target change), it is difficult to
15043 	 * provide a clean synchronization mechanism (such as a
15044 	 * semaphore or a conditional variable) without exhaustively
15045 	 * rewriting the mainline discovery code of this driver.
15046 	 */
15047 	wait_ms = 500;
15048 
15049 	ntries = fcp_max_target_retries;
15050 
15051 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15052 	    FCP_BUF_LEVEL_3, 0,
15053 	    "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15054 	    "lcount=%x::port_link_cnt=%x, "
15055 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15056 	    "tgt_tmp_cnt =%x",
15057 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15058 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15059 	    ptgt->tgt_tmp_cnt);
15060 
15061 	mutex_enter(&ptgt->tgt_mutex);
15062 	while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15063 	    ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15064 		mutex_exit(&ptgt->tgt_mutex);
15065 		mutex_exit(&pptr->port_mutex);
15066 
15067 		delay(drv_usectohz(wait_ms * 1000));
15068 
15069 		mutex_enter(&pptr->port_mutex);
15070 		mutex_enter(&ptgt->tgt_mutex);
15071 	}
15072 
15073 
15074 	if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15075 		rval = EBUSY;
15076 	} else {
15077 		if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15078 		    FCP_TGT_NODE_PRESENT) {
15079 			rval = 0;
15080 		}
15081 	}
15082 
15083 	FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15084 	    FCP_BUF_LEVEL_3, 0,
15085 	    "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15086 	    "lcount=%x::port_link_cnt=%x, "
15087 	    "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15088 	    "tgt_tmp_cnt =%x",
15089 	    ntries, ptgt, lcount, pptr->port_link_cnt,
15090 	    tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15091 	    ptgt->tgt_tmp_cnt);
15092 
15093 	if (rval) {
15094 		if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15095 		    fcp_enable_auto_configuration && old_manual) {
15096 			ptgt->tgt_manual_config_only = 1;
15097 		}
15098 		mutex_exit(&ptgt->tgt_mutex);
15099 		mutex_exit(&pptr->port_mutex);
15100 		kmem_free(devlist, sizeof (*devlist));
15101 
15102 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15103 		    FCP_BUF_LEVEL_3, 0,
15104 		    "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15105 		    "lcount=%x::port_link_cnt=%x, "
15106 		    "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15107 		    "tgt_device_created=%x, tgt D_ID=%x",
15108 		    ntries, ptgt, lcount, pptr->port_link_cnt,
15109 		    tcount, ptgt->tgt_change_cnt, rval,
15110 		    ptgt->tgt_device_created, ptgt->tgt_d_id);
15111 		return (rval);
15112 	}
15113 
15114 	if ((plun = ptgt->tgt_lun) != NULL) {
15115 		tcount = plun->lun_tgt->tgt_change_cnt;
15116 	} else {
15117 		rval = EINVAL;
15118 	}
15119 	lcount = pptr->port_link_cnt;
15120 
15121 	/*
15122 	 * Configuring the target with no LUNs will fail. We
15123 	 * should reset the node state so that it is not
15124 	 * automatically configured when the LUNs are added
15125 	 * to this target.
15126 	 */
15127 	if (ptgt->tgt_lun_cnt == 0) {
15128 		ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15129 	}
15130 	mutex_exit(&ptgt->tgt_mutex);
15131 	mutex_exit(&pptr->port_mutex);
15132 
15133 	while (plun) {
15134 		child_info_t	*cip;
15135 
15136 		mutex_enter(&plun->lun_mutex);
15137 		cip = plun->lun_cip;
15138 		mutex_exit(&plun->lun_mutex);
15139 
15140 		mutex_enter(&ptgt->tgt_mutex);
15141 		if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15142 			mutex_exit(&ptgt->tgt_mutex);
15143 
15144 			rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15145 			    FCP_ONLINE, lcount, tcount,
15146 			    NDI_ONLINE_ATTACH);
15147 			if (rval != NDI_SUCCESS) {
15148 				FCP_TRACE(fcp_logq,
15149 				    pptr->port_instbuf, fcp_trace,
15150 				    FCP_BUF_LEVEL_3, 0,
15151 				    "fcp_create_on_demand: "
15152 				    "pass_to_hp_and_wait failed "
15153 				    "rval=%x", rval);
15154 				rval = EIO;
15155 			} else {
15156 				mutex_enter(&LUN_TGT->tgt_mutex);
15157 				plun->lun_state &= ~(FCP_LUN_OFFLINE |
15158 				    FCP_LUN_BUSY);
15159 				mutex_exit(&LUN_TGT->tgt_mutex);
15160 			}
15161 			mutex_enter(&ptgt->tgt_mutex);
15162 		}
15163 
15164 		plun = plun->lun_next;
15165 		mutex_exit(&ptgt->tgt_mutex);
15166 	}
15167 
15168 	kmem_free(devlist, sizeof (*devlist));
15169 
15170 	if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15171 	    fcp_enable_auto_configuration && old_manual) {
15172 		mutex_enter(&ptgt->tgt_mutex);
15173 		/* if successful then set manual to 0 */
15174 		if (rval == 0) {
15175 			ptgt->tgt_manual_config_only = 0;
15176 		} else {
15177 			/* reset to 1 so the user has to do the config */
15178 			ptgt->tgt_manual_config_only = 1;
15179 		}
15180 		mutex_exit(&ptgt->tgt_mutex);
15181 	}
15182 
15183 	return (rval);
15184 }
15185 
15186 
15187 static void
fcp_ascii_to_wwn(caddr_t string,uchar_t bytes[],unsigned int byte_len)15188 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15189 {
15190 	int		count;
15191 	uchar_t		byte;
15192 
15193 	count = 0;
15194 	while (*string) {
15195 		byte = FCP_ATOB(*string); string++;
15196 		byte = byte << 4 | FCP_ATOB(*string); string++;
15197 		bytes[count++] = byte;
15198 
15199 		if (count >= byte_len) {
15200 			break;
15201 		}
15202 	}
15203 }
15204 
15205 static void
fcp_wwn_to_ascii(uchar_t wwn[],char * string)15206 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15207 {
15208 	int		i;
15209 
15210 	for (i = 0; i < FC_WWN_SIZE; i++) {
15211 		(void) sprintf(string + (i * 2),
15212 		    "%02x", wwn[i]);
15213 	}
15214 
15215 }
15216 
15217 static void
fcp_print_error(fc_packet_t * fpkt)15218 fcp_print_error(fc_packet_t *fpkt)
15219 {
15220 	struct fcp_ipkt	*icmd = (struct fcp_ipkt *)
15221 	    fpkt->pkt_ulp_private;
15222 	struct fcp_port	*pptr;
15223 	struct fcp_tgt	*ptgt;
15224 	struct fcp_lun	*plun;
15225 	caddr_t			buf;
15226 	int			scsi_cmd = 0;
15227 
15228 	ptgt = icmd->ipkt_tgt;
15229 	plun = icmd->ipkt_lun;
15230 	pptr = ptgt->tgt_port;
15231 
15232 	buf = kmem_zalloc(256, KM_NOSLEEP);
15233 	if (buf == NULL) {
15234 		return;
15235 	}
15236 
15237 	switch (icmd->ipkt_opcode) {
15238 	case SCMD_REPORT_LUN:
15239 		(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15240 		    " lun=0x%%x failed");
15241 		scsi_cmd++;
15242 		break;
15243 
15244 	case SCMD_INQUIRY_PAGE83:
15245 		(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15246 		    " lun=0x%%x failed");
15247 		scsi_cmd++;
15248 		break;
15249 
15250 	case SCMD_INQUIRY:
15251 		(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15252 		    " lun=0x%%x failed");
15253 		scsi_cmd++;
15254 		break;
15255 
15256 	case LA_ELS_PLOGI:
15257 		(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15258 		break;
15259 
15260 	case LA_ELS_PRLI:
15261 		(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15262 		break;
15263 	}
15264 
15265 	if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15266 		struct fcp_rsp		response, *rsp;
15267 		uchar_t			asc, ascq;
15268 		caddr_t			sense_key = NULL;
15269 		struct fcp_rsp_info	fcp_rsp_err, *bep;
15270 
15271 		if (icmd->ipkt_nodma) {
15272 			rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15273 			bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15274 			    sizeof (struct fcp_rsp));
15275 		} else {
15276 			rsp = &response;
15277 			bep = &fcp_rsp_err;
15278 
15279 			FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15280 			    sizeof (struct fcp_rsp));
15281 
15282 			FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15283 			    bep, fpkt->pkt_resp_acc,
15284 			    sizeof (struct fcp_rsp_info));
15285 		}
15286 
15287 
15288 		if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15289 			(void) sprintf(buf + strlen(buf),
15290 			    " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15291 			    " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15292 			    " senselen=%%x. Giving up");
15293 
15294 			fcp_log(CE_WARN, pptr->port_dip, buf,
15295 			    ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15296 			    rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15297 			    rsp->fcp_u.fcp_status.reserved_1,
15298 			    rsp->fcp_response_len, rsp->fcp_sense_len);
15299 
15300 			kmem_free(buf, 256);
15301 			return;
15302 		}
15303 
15304 		if (rsp->fcp_u.fcp_status.rsp_len_set &&
15305 		    bep->rsp_code != FCP_NO_FAILURE) {
15306 			(void) sprintf(buf + strlen(buf),
15307 			    " FCP Response code = 0x%x", bep->rsp_code);
15308 		}
15309 
15310 		if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15311 			struct scsi_extended_sense sense_info, *sense_ptr;
15312 
15313 			if (icmd->ipkt_nodma) {
15314 				sense_ptr = (struct scsi_extended_sense *)
15315 				    ((caddr_t)fpkt->pkt_resp +
15316 				    sizeof (struct fcp_rsp) +
15317 				    rsp->fcp_response_len);
15318 			} else {
15319 				sense_ptr = &sense_info;
15320 
15321 				FCP_CP_IN(fpkt->pkt_resp +
15322 				    sizeof (struct fcp_rsp) +
15323 				    rsp->fcp_response_len, &sense_info,
15324 				    fpkt->pkt_resp_acc,
15325 				    sizeof (struct scsi_extended_sense));
15326 			}
15327 
15328 			if (sense_ptr->es_key < NUM_SENSE_KEYS +
15329 			    NUM_IMPL_SENSE_KEYS) {
15330 				sense_key = sense_keys[sense_ptr->es_key];
15331 			} else {
15332 				sense_key = "Undefined";
15333 			}
15334 
15335 			asc = sense_ptr->es_add_code;
15336 			ascq = sense_ptr->es_qual_code;
15337 
15338 			(void) sprintf(buf + strlen(buf),
15339 			    ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15340 			    " Giving up");
15341 
15342 			fcp_log(CE_WARN, pptr->port_dip, buf,
15343 			    ptgt->tgt_d_id, plun->lun_num, sense_key,
15344 			    asc, ascq);
15345 		} else {
15346 			(void) sprintf(buf + strlen(buf),
15347 			    " : SCSI status=%%x. Giving up");
15348 
15349 			fcp_log(CE_WARN, pptr->port_dip, buf,
15350 			    ptgt->tgt_d_id, plun->lun_num,
15351 			    rsp->fcp_u.fcp_status.scsi_status);
15352 		}
15353 	} else {
15354 		caddr_t state, reason, action, expln;
15355 
15356 		(void) fc_ulp_pkt_error(fpkt, &state, &reason,
15357 		    &action, &expln);
15358 
15359 		(void) sprintf(buf + strlen(buf), ": State:%%s,"
15360 		    " Reason:%%s. Giving up");
15361 
15362 		if (scsi_cmd) {
15363 			fcp_log(CE_WARN, pptr->port_dip, buf,
15364 			    ptgt->tgt_d_id, plun->lun_num, state, reason);
15365 		} else {
15366 			fcp_log(CE_WARN, pptr->port_dip, buf,
15367 			    ptgt->tgt_d_id, state, reason);
15368 		}
15369 	}
15370 
15371 	kmem_free(buf, 256);
15372 }
15373 
15374 
15375 static int
fcp_handle_ipkt_errors(struct fcp_port * pptr,struct fcp_tgt * ptgt,struct fcp_ipkt * icmd,int rval,caddr_t op)15376 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15377     struct fcp_ipkt *icmd, int rval, caddr_t op)
15378 {
15379 	int	ret = DDI_FAILURE;
15380 	char	*error;
15381 
15382 	switch (rval) {
15383 	case FC_DEVICE_BUSY_NEW_RSCN:
15384 		/*
15385 		 * This means that there was a new RSCN that the transport
15386 		 * knows about (which the ULP *may* know about too) but the
15387 		 * pkt that was sent down was related to an older RSCN. So, we
15388 		 * are just going to reset the retry count and deadline and
15389 		 * continue to retry. The idea is that transport is currently
15390 		 * working on the new RSCN and will soon let the ULPs know
15391 		 * about it and when it does the existing logic will kick in
15392 		 * where it will change the tcount to indicate that something
15393 		 * changed on the target. So, rediscovery will start and there
15394 		 * will not be an infinite retry.
15395 		 *
15396 		 * For a full flow of how the RSCN info is transferred back and
15397 		 * forth, see fp.c
15398 		 */
15399 		icmd->ipkt_retries = 0;
15400 		icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15401 		    FCP_ICMD_DEADLINE;
15402 
15403 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15404 		    FCP_BUF_LEVEL_3, 0,
15405 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15406 		    rval, ptgt->tgt_d_id);
15407 		/* FALLTHROUGH */
15408 
15409 	case FC_STATEC_BUSY:
15410 	case FC_DEVICE_BUSY:
15411 	case FC_PBUSY:
15412 	case FC_FBUSY:
15413 	case FC_TRAN_BUSY:
15414 	case FC_OFFLINE:
15415 		FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15416 		    FCP_BUF_LEVEL_3, 0,
15417 		    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15418 		    rval, ptgt->tgt_d_id);
15419 		if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15420 		    fcp_is_retryable(icmd)) {
15421 			fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15422 			ret = DDI_SUCCESS;
15423 		}
15424 		break;
15425 
15426 	case FC_LOGINREQ:
15427 		/*
15428 		 * FC_LOGINREQ used to be handled just like all the cases
15429 		 * above. It has been changed to handled a PRLI that fails
15430 		 * with FC_LOGINREQ different than other ipkts that fail
15431 		 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15432 		 * a simple matter to turn it into a PLOGI instead, so that's
15433 		 * exactly what we do here.
15434 		 */
15435 		if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15436 			ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15437 			    icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15438 			    icmd->ipkt_change_cnt, icmd->ipkt_cause);
15439 		} else {
15440 			FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15441 			    FCP_BUF_LEVEL_3, 0,
15442 			    "fcp_handle_ipkt_errors: rval=%x  for D_ID=%x",
15443 			    rval, ptgt->tgt_d_id);
15444 			if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15445 			    fcp_is_retryable(icmd)) {
15446 				fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15447 				ret = DDI_SUCCESS;
15448 			}
15449 		}
15450 		break;
15451 
15452 	default:
15453 		mutex_enter(&pptr->port_mutex);
15454 		mutex_enter(&ptgt->tgt_mutex);
15455 		if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15456 			mutex_exit(&ptgt->tgt_mutex);
15457 			mutex_exit(&pptr->port_mutex);
15458 
15459 			(void) fc_ulp_error(rval, &error);
15460 			fcp_log(CE_WARN, pptr->port_dip,
15461 			    "!Failed to send %s to D_ID=%x error=%s",
15462 			    op, ptgt->tgt_d_id, error);
15463 		} else {
15464 			FCP_TRACE(fcp_logq, pptr->port_instbuf,
15465 			    fcp_trace, FCP_BUF_LEVEL_2, 0,
15466 			    "fcp_handle_ipkt_errors,1: state change occured"
15467 			    " for D_ID=0x%x", ptgt->tgt_d_id);
15468 			mutex_exit(&ptgt->tgt_mutex);
15469 			mutex_exit(&pptr->port_mutex);
15470 		}
15471 		break;
15472 	}
15473 
15474 	return (ret);
15475 }
15476 
15477 
15478 /*
15479  * Check of outstanding commands on any LUN for this target
15480  */
15481 static int
fcp_outstanding_lun_cmds(struct fcp_tgt * ptgt)15482 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15483 {
15484 	struct	fcp_lun	*plun;
15485 	struct	fcp_pkt	*cmd;
15486 
15487 	for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15488 		mutex_enter(&plun->lun_mutex);
15489 		for (cmd = plun->lun_pkt_head; cmd != NULL;
15490 		    cmd = cmd->cmd_forw) {
15491 			if (cmd->cmd_state == FCP_PKT_ISSUED) {
15492 				mutex_exit(&plun->lun_mutex);
15493 				return (FC_SUCCESS);
15494 			}
15495 		}
15496 		mutex_exit(&plun->lun_mutex);
15497 	}
15498 
15499 	return (FC_FAILURE);
15500 }
15501 
15502 static fc_portmap_t *
fcp_construct_map(struct fcp_port * pptr,uint32_t * dev_cnt)15503 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15504 {
15505 	int			i;
15506 	fc_portmap_t		*devlist;
15507 	fc_portmap_t		*devptr = NULL;
15508 	struct fcp_tgt	*ptgt;
15509 
15510 	mutex_enter(&pptr->port_mutex);
15511 	for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15512 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15513 		    ptgt = ptgt->tgt_next) {
15514 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15515 				++*dev_cnt;
15516 			}
15517 		}
15518 	}
15519 
15520 	devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15521 	    KM_NOSLEEP);
15522 	if (devlist == NULL) {
15523 		mutex_exit(&pptr->port_mutex);
15524 		fcp_log(CE_WARN, pptr->port_dip,
15525 		    "!fcp%d: failed to allocate for portmap for construct map",
15526 		    pptr->port_instance);
15527 		return (devptr);
15528 	}
15529 
15530 	for (i = 0; i < FCP_NUM_HASH; i++) {
15531 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15532 		    ptgt = ptgt->tgt_next) {
15533 			if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15534 				int ret;
15535 
15536 				ret = fc_ulp_pwwn_to_portmap(
15537 				    pptr->port_fp_handle,
15538 				    (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15539 				    devlist);
15540 
15541 				if (ret == FC_SUCCESS) {
15542 					devlist++;
15543 					continue;
15544 				}
15545 
15546 				devlist->map_pd = NULL;
15547 				devlist->map_did.port_id = ptgt->tgt_d_id;
15548 				devlist->map_hard_addr.hard_addr =
15549 				    ptgt->tgt_hard_addr;
15550 
15551 				devlist->map_state = PORT_DEVICE_INVALID;
15552 				devlist->map_type = PORT_DEVICE_OLD;
15553 
15554 				bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15555 				    &devlist->map_nwwn, FC_WWN_SIZE);
15556 
15557 				bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15558 				    &devlist->map_pwwn, FC_WWN_SIZE);
15559 
15560 				devlist++;
15561 			}
15562 		}
15563 	}
15564 
15565 	mutex_exit(&pptr->port_mutex);
15566 
15567 	return (devptr);
15568 }
15569 /*
15570  * Inimate MPxIO that the lun is busy and cannot accept regular IO
15571  */
15572 static void
fcp_update_mpxio_path_verifybusy(struct fcp_port * pptr)15573 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15574 {
15575 	int i;
15576 	struct fcp_tgt	*ptgt;
15577 	struct fcp_lun	*plun;
15578 
15579 	for (i = 0; i < FCP_NUM_HASH; i++) {
15580 		for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15581 		    ptgt = ptgt->tgt_next) {
15582 			mutex_enter(&ptgt->tgt_mutex);
15583 			for (plun = ptgt->tgt_lun; plun != NULL;
15584 			    plun = plun->lun_next) {
15585 				if (plun->lun_mpxio &&
15586 				    plun->lun_state & FCP_LUN_BUSY) {
15587 					if (!fcp_pass_to_hp(pptr, plun,
15588 					    plun->lun_cip,
15589 					    FCP_MPXIO_PATH_SET_BUSY,
15590 					    pptr->port_link_cnt,
15591 					    ptgt->tgt_change_cnt, 0, 0)) {
15592 						FCP_TRACE(fcp_logq,
15593 						    pptr->port_instbuf,
15594 						    fcp_trace,
15595 						    FCP_BUF_LEVEL_2, 0,
15596 						    "path_verifybusy: "
15597 						    "disable lun %p failed!",
15598 						    plun);
15599 					}
15600 				}
15601 			}
15602 			mutex_exit(&ptgt->tgt_mutex);
15603 		}
15604 	}
15605 }
15606 
15607 static int
fcp_update_mpxio_path(struct fcp_lun * plun,child_info_t * cip,int what)15608 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15609 {
15610 	dev_info_t		*cdip = NULL;
15611 	dev_info_t		*pdip = NULL;
15612 
15613 	ASSERT(plun);
15614 
15615 	mutex_enter(&plun->lun_mutex);
15616 	if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15617 		mutex_exit(&plun->lun_mutex);
15618 		return (NDI_FAILURE);
15619 	}
15620 	mutex_exit(&plun->lun_mutex);
15621 	cdip = mdi_pi_get_client(PIP(cip));
15622 	pdip = mdi_pi_get_phci(PIP(cip));
15623 
15624 	ASSERT(cdip != NULL);
15625 	ASSERT(pdip != NULL);
15626 
15627 	if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15628 		/* LUN ready for IO */
15629 		(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15630 	} else {
15631 		/* LUN busy to accept IO */
15632 		(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15633 	}
15634 	return (NDI_SUCCESS);
15635 }
15636 
15637 /*
15638  * Caller must free the returned string of MAXPATHLEN len
15639  * If the device is offline (-1 instance number) NULL
15640  * will be returned.
15641  */
15642 static char *
fcp_get_lun_path(struct fcp_lun * plun)15643 fcp_get_lun_path(struct fcp_lun *plun)
15644 {
15645 	dev_info_t	*dip = NULL;
15646 	char		*path = NULL;
15647 	mdi_pathinfo_t	*pip = NULL;
15648 
15649 	if (plun == NULL) {
15650 		return (NULL);
15651 	}
15652 
15653 	mutex_enter(&plun->lun_mutex);
15654 	if (plun->lun_mpxio == 0) {
15655 		dip = DIP(plun->lun_cip);
15656 		mutex_exit(&plun->lun_mutex);
15657 	} else {
15658 		/*
15659 		 * lun_cip must be accessed with lun_mutex held. Here
15660 		 * plun->lun_cip either points to a valid node or it is NULL.
15661 		 * Make a copy so that we can release lun_mutex.
15662 		 */
15663 		pip = PIP(plun->lun_cip);
15664 
15665 		/*
15666 		 * Increase ref count on the path so that we can release
15667 		 * lun_mutex and still be sure that the pathinfo node (and thus
15668 		 * also the client) is not deallocated. If pip is NULL, this
15669 		 * has no effect.
15670 		 */
15671 		mdi_hold_path(pip);
15672 
15673 		mutex_exit(&plun->lun_mutex);
15674 
15675 		/* Get the client. If pip is NULL, we get NULL. */
15676 		dip = mdi_pi_get_client(pip);
15677 	}
15678 
15679 	if (dip == NULL)
15680 		goto out;
15681 	if (ddi_get_instance(dip) < 0)
15682 		goto out;
15683 
15684 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15685 	if (path == NULL)
15686 		goto out;
15687 
15688 	(void) ddi_pathname(dip, path);
15689 
15690 	/* Clean up. */
15691 out:
15692 	if (pip != NULL)
15693 		mdi_rele_path(pip);
15694 
15695 	/*
15696 	 * In reality, the user wants a fully valid path (one they can open)
15697 	 * but this string is lacking the mount point, and the minor node.
15698 	 * It would be nice if we could "figure these out" somehow
15699 	 * and fill them in.  Otherwise, the userland code has to understand
15700 	 * driver specific details of which minor node is the "best" or
15701 	 * "right" one to expose.  (Ex: which slice is the whole disk, or
15702 	 * which tape doesn't rewind)
15703 	 */
15704 	return (path);
15705 }
15706 
15707 static int
fcp_scsi_bus_config(dev_info_t * parent,uint_t flag,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)15708 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15709     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15710 {
15711 	int64_t reset_delay;
15712 	int rval, retry = 0;
15713 	struct fcp_port *pptr = fcp_dip2port(parent);
15714 
15715 	reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15716 	    (ddi_get_lbolt64() - pptr->port_attach_time);
15717 	if (reset_delay < 0) {
15718 		reset_delay = 0;
15719 	}
15720 
15721 	if (fcp_bus_config_debug) {
15722 		flag |= NDI_DEVI_DEBUG;
15723 	}
15724 
15725 	switch (op) {
15726 	case BUS_CONFIG_ONE:
15727 		/*
15728 		 * Retry the command since we need to ensure
15729 		 * the fabric devices are available for root
15730 		 */
15731 		while (retry++ < fcp_max_bus_config_retries) {
15732 			rval =	(ndi_busop_bus_config(parent,
15733 			    flag | NDI_MDI_FALLBACK, op,
15734 			    arg, childp, (clock_t)reset_delay));
15735 			if (rval == 0) {
15736 				return (rval);
15737 			}
15738 		}
15739 
15740 		/*
15741 		 * drain taskq to make sure nodes are created and then
15742 		 * try again.
15743 		 */
15744 		taskq_wait(DEVI(parent)->devi_taskq);
15745 		return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15746 		    op, arg, childp, 0));
15747 
15748 	case BUS_CONFIG_DRIVER:
15749 	case BUS_CONFIG_ALL: {
15750 		/*
15751 		 * delay till all devices report in (port_tmp_cnt == 0)
15752 		 * or FCP_INIT_WAIT_TIMEOUT
15753 		 */
15754 		mutex_enter(&pptr->port_mutex);
15755 		while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15756 			(void) cv_timedwait(&pptr->port_config_cv,
15757 			    &pptr->port_mutex,
15758 			    ddi_get_lbolt() + (clock_t)reset_delay);
15759 			reset_delay =
15760 			    (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15761 			    (ddi_get_lbolt64() - pptr->port_attach_time);
15762 		}
15763 		mutex_exit(&pptr->port_mutex);
15764 		/* drain taskq to make sure nodes are created */
15765 		taskq_wait(DEVI(parent)->devi_taskq);
15766 		return (ndi_busop_bus_config(parent, flag, op,
15767 		    arg, childp, 0));
15768 	}
15769 
15770 	default:
15771 		return (NDI_FAILURE);
15772 	}
15773 	/*NOTREACHED*/
15774 }
15775 
15776 static int
fcp_scsi_bus_unconfig(dev_info_t * parent,uint_t flag,ddi_bus_config_op_t op,void * arg)15777 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15778     ddi_bus_config_op_t op, void *arg)
15779 {
15780 	if (fcp_bus_config_debug) {
15781 		flag |= NDI_DEVI_DEBUG;
15782 	}
15783 
15784 	return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15785 }
15786 
15787 
15788 /*
15789  * Routine to copy GUID into the lun structure.
15790  * returns 0 if copy was successful and 1 if encountered a
15791  * failure and did not copy the guid.
15792  */
15793 static int
fcp_copy_guid_2_lun_block(struct fcp_lun * plun,char * guidp)15794 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15795 {
15796 
15797 	int retval = 0;
15798 
15799 	/* add one for the null terminator */
15800 	const unsigned int len = strlen(guidp) + 1;
15801 
15802 	if ((guidp == NULL) || (plun == NULL)) {
15803 		return (1);
15804 	}
15805 
15806 	/*
15807 	 * if the plun->lun_guid already has been allocated,
15808 	 * then check the size. if the size is exact, reuse
15809 	 * it....if not free it an allocate the required size.
15810 	 * The reallocation should NOT typically happen
15811 	 * unless the GUIDs reported changes between passes.
15812 	 * We free up and alloc again even if the
15813 	 * size was more than required. This is due to the
15814 	 * fact that the field lun_guid_size - serves
15815 	 * dual role of indicating the size of the wwn
15816 	 * size and ALSO the allocation size.
15817 	 */
15818 	if (plun->lun_guid) {
15819 		if (plun->lun_guid_size != len) {
15820 			/*
15821 			 * free the allocated memory and
15822 			 * initialize the field
15823 			 * lun_guid_size to 0.
15824 			 */
15825 			kmem_free(plun->lun_guid, plun->lun_guid_size);
15826 			plun->lun_guid = NULL;
15827 			plun->lun_guid_size = 0;
15828 		}
15829 	}
15830 	/*
15831 	 * alloc only if not already done.
15832 	 */
15833 	if (plun->lun_guid == NULL) {
15834 		plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15835 		if (plun->lun_guid == NULL) {
15836 			cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15837 			    "Unable to allocate"
15838 			    "Memory for GUID!!! size %d", len);
15839 			retval = 1;
15840 		} else {
15841 			plun->lun_guid_size = len;
15842 		}
15843 	}
15844 	if (plun->lun_guid) {
15845 		/*
15846 		 * now copy the GUID
15847 		 */
15848 		bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15849 	}
15850 	return (retval);
15851 }
15852 
15853 /*
15854  * fcp_reconfig_wait
15855  *
15856  * Wait for a rediscovery/reconfiguration to complete before continuing.
15857  */
15858 
15859 static void
fcp_reconfig_wait(struct fcp_port * pptr)15860 fcp_reconfig_wait(struct fcp_port *pptr)
15861 {
15862 	clock_t		reconfig_start, wait_timeout;
15863 
15864 	/*
15865 	 * Quick check.	 If pptr->port_tmp_cnt is 0, there is no
15866 	 * reconfiguration in progress.
15867 	 */
15868 
15869 	mutex_enter(&pptr->port_mutex);
15870 	if (pptr->port_tmp_cnt == 0) {
15871 		mutex_exit(&pptr->port_mutex);
15872 		return;
15873 	}
15874 	mutex_exit(&pptr->port_mutex);
15875 
15876 	/*
15877 	 * If we cause a reconfig by raising power, delay until all devices
15878 	 * report in (port_tmp_cnt returns to 0)
15879 	 */
15880 
15881 	reconfig_start = ddi_get_lbolt();
15882 	wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15883 
15884 	mutex_enter(&pptr->port_mutex);
15885 
15886 	while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15887 	    pptr->port_tmp_cnt) {
15888 
15889 		(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15890 		    reconfig_start + wait_timeout);
15891 	}
15892 
15893 	mutex_exit(&pptr->port_mutex);
15894 
15895 	/*
15896 	 * Even if fcp_tmp_count isn't 0, continue without error.  The port
15897 	 * we want may still be ok.  If not, it will error out later
15898 	 */
15899 }
15900 
15901 /*
15902  * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15903  * We rely on the fcp_global_mutex to provide protection against changes to
15904  * the fcp_lun_blacklist.
15905  *
15906  * You can describe a list of target port WWNs and LUN numbers which will
15907  * not be configured. LUN numbers will be interpreted as decimal. White
15908  * spaces and ',' can be used in the list of LUN numbers.
15909  *
15910  * To prevent LUNs 1 and 2 from being configured for target
15911  * port 510000f010fd92a1 and target port 510000e012079df1, set:
15912  *
15913  * pwwn-lun-blacklist=
15914  * "510000f010fd92a1,1,2",
15915  * "510000e012079df1,1,2";
15916  */
15917 static void
fcp_read_blacklist(dev_info_t * dip,struct fcp_black_list_entry ** pplun_blacklist)15918 fcp_read_blacklist(dev_info_t *dip,
15919     struct fcp_black_list_entry **pplun_blacklist)
15920 {
15921 	char **prop_array	= NULL;
15922 	char *curr_pwwn		= NULL;
15923 	char *curr_lun		= NULL;
15924 	uint32_t prop_item	= 0;
15925 	int idx			= 0;
15926 	int len			= 0;
15927 
15928 	ASSERT(mutex_owned(&fcp_global_mutex));
15929 	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15930 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15931 	    LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15932 		return;
15933 	}
15934 
15935 	for (idx = 0; idx < prop_item; idx++) {
15936 
15937 		curr_pwwn = prop_array[idx];
15938 		while (*curr_pwwn == ' ') {
15939 			curr_pwwn++;
15940 		}
15941 		if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15942 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15943 			    ", please check.", curr_pwwn);
15944 			continue;
15945 		}
15946 		if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15947 		    (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15948 			fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15949 			    ", please check.", curr_pwwn);
15950 			continue;
15951 		}
15952 		for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15953 			if (isxdigit(curr_pwwn[len]) != TRUE) {
15954 				fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15955 				    "blacklist, please check.", curr_pwwn);
15956 				break;
15957 			}
15958 		}
15959 		if (len != sizeof (la_wwn_t) * 2) {
15960 			continue;
15961 		}
15962 
15963 		curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15964 		*(curr_lun - 1) = '\0';
15965 		fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15966 	}
15967 
15968 	ddi_prop_free(prop_array);
15969 }
15970 
15971 /*
15972  * Get the masking info about one remote target port designated by wwn.
15973  * Lun ids could be separated by ',' or white spaces.
15974  */
15975 static void
fcp_mask_pwwn_lun(char * curr_pwwn,char * curr_lun,struct fcp_black_list_entry ** pplun_blacklist)15976 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15977     struct fcp_black_list_entry **pplun_blacklist)
15978 {
15979 	int		idx			= 0;
15980 	uint32_t	offset			= 0;
15981 	unsigned long	lun_id			= 0;
15982 	char		lunid_buf[16];
15983 	char		*pend			= NULL;
15984 	int		illegal_digit		= 0;
15985 
15986 	while (offset < strlen(curr_lun)) {
15987 		while ((curr_lun[offset + idx] != ',') &&
15988 		    (curr_lun[offset + idx] != '\0') &&
15989 		    (curr_lun[offset + idx] != ' ')) {
15990 			if (isdigit(curr_lun[offset + idx]) == 0) {
15991 				illegal_digit++;
15992 			}
15993 			idx++;
15994 		}
15995 		if (illegal_digit > 0) {
15996 			offset += (idx+1);	/* To the start of next lun */
15997 			idx = 0;
15998 			illegal_digit = 0;
15999 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16000 			    "the blacklist, please check digits.",
16001 			    curr_lun, curr_pwwn);
16002 			continue;
16003 		}
16004 		if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16005 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16006 			    "the blacklist, please check the length of LUN#.",
16007 			    curr_lun, curr_pwwn);
16008 			break;
16009 		}
16010 		if (idx == 0) {	/* ignore ' ' or ',' or '\0' */
16011 			offset++;
16012 			continue;
16013 		}
16014 
16015 		bcopy(curr_lun + offset, lunid_buf, idx);
16016 		lunid_buf[idx] = '\0';
16017 		if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16018 			fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16019 		} else {
16020 			fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16021 			    "the blacklist, please check %s.",
16022 			    curr_lun, curr_pwwn, lunid_buf);
16023 		}
16024 		offset += (idx+1);	/* To the start of next lun */
16025 		idx = 0;
16026 	}
16027 }
16028 
16029 /*
16030  * Add one masking record
16031  */
16032 static void
fcp_add_one_mask(char * curr_pwwn,uint32_t lun_id,struct fcp_black_list_entry ** pplun_blacklist)16033 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16034     struct fcp_black_list_entry **pplun_blacklist)
16035 {
16036 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16037 	struct fcp_black_list_entry	*new_entry	= NULL;
16038 	la_wwn_t			wwn;
16039 
16040 	fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16041 	while (tmp_entry) {
16042 		if ((bcmp(&tmp_entry->wwn, &wwn,
16043 		    sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16044 			return;
16045 		}
16046 
16047 		tmp_entry = tmp_entry->next;
16048 	}
16049 
16050 	/* add to black list */
16051 	new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16052 	    (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16053 	bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16054 	new_entry->lun = lun_id;
16055 	new_entry->masked = 0;
16056 	new_entry->next = *pplun_blacklist;
16057 	*pplun_blacklist = new_entry;
16058 }
16059 
16060 /*
16061  * Check if we should mask the specified lun of this fcp_tgt
16062  */
16063 static int
fcp_should_mask(la_wwn_t * wwn,uint32_t lun_id)16064 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16065 {
16066 	struct fcp_black_list_entry *remote_port;
16067 
16068 	remote_port = fcp_lun_blacklist;
16069 	while (remote_port != NULL) {
16070 		if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16071 			if (remote_port->lun == lun_id) {
16072 				remote_port->masked++;
16073 				if (remote_port->masked == 1) {
16074 					fcp_log(CE_NOTE, NULL, "LUN %d of port "
16075 					    "%02x%02x%02x%02x%02x%02x%02x%02x "
16076 					    "is masked due to black listing.\n",
16077 					    lun_id, wwn->raw_wwn[0],
16078 					    wwn->raw_wwn[1], wwn->raw_wwn[2],
16079 					    wwn->raw_wwn[3], wwn->raw_wwn[4],
16080 					    wwn->raw_wwn[5], wwn->raw_wwn[6],
16081 					    wwn->raw_wwn[7]);
16082 				}
16083 				return (TRUE);
16084 			}
16085 		}
16086 		remote_port = remote_port->next;
16087 	}
16088 	return (FALSE);
16089 }
16090 
16091 /*
16092  * Release all allocated resources
16093  */
16094 static void
fcp_cleanup_blacklist(struct fcp_black_list_entry ** pplun_blacklist)16095 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16096 {
16097 	struct fcp_black_list_entry	*tmp_entry	= *pplun_blacklist;
16098 	struct fcp_black_list_entry	*current_entry	= NULL;
16099 
16100 	ASSERT(mutex_owned(&fcp_global_mutex));
16101 	/*
16102 	 * Traverse all luns
16103 	 */
16104 	while (tmp_entry) {
16105 		current_entry = tmp_entry;
16106 		tmp_entry = tmp_entry->next;
16107 		kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16108 	}
16109 	*pplun_blacklist = NULL;
16110 }
16111 
16112 /*
16113  * In fcp module,
16114  *   pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16115  */
16116 static struct scsi_pkt *
fcp_pseudo_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)16117 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16118     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16119     int flags, int (*callback)(), caddr_t arg)
16120 {
16121 	fcp_port_t	*pptr = ADDR2FCP(ap);
16122 	fcp_pkt_t	*cmd  = NULL;
16123 	fc_frame_hdr_t	*hp;
16124 
16125 	/*
16126 	 * First step: get the packet
16127 	 */
16128 	if (pkt == NULL) {
16129 		pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16130 		    tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16131 		    callback, arg);
16132 		if (pkt == NULL) {
16133 			return (NULL);
16134 		}
16135 
16136 		/*
16137 		 * All fields in scsi_pkt will be initialized properly or
16138 		 * set to zero. We need do nothing for scsi_pkt.
16139 		 */
16140 		/*
16141 		 * But it's our responsibility to link other related data
16142 		 * structures. Their initialization will be done, just
16143 		 * before the scsi_pkt will be sent to FCA.
16144 		 */
16145 		cmd		= PKT2CMD(pkt);
16146 		cmd->cmd_pkt	= pkt;
16147 		cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16148 		/*
16149 		 * fc_packet_t
16150 		 */
16151 		cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16152 		cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16153 		    sizeof (struct fcp_pkt));
16154 		cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16155 		cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16156 		cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16157 		cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16158 		/*
16159 		 * Fill in the Fabric Channel Header
16160 		 */
16161 		hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16162 		hp->r_ctl = R_CTL_COMMAND;
16163 		hp->rsvd = 0;
16164 		hp->type = FC_TYPE_SCSI_FCP;
16165 		hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16166 		hp->seq_id = 0;
16167 		hp->df_ctl  = 0;
16168 		hp->seq_cnt = 0;
16169 		hp->ox_id = 0xffff;
16170 		hp->rx_id = 0xffff;
16171 		hp->ro = 0;
16172 	} else {
16173 		/*
16174 		 * We need think if we should reset any elements in
16175 		 * related data structures.
16176 		 */
16177 		FCP_TRACE(fcp_logq, pptr->port_instbuf,
16178 		    fcp_trace, FCP_BUF_LEVEL_6, 0,
16179 		    "reusing pkt, flags %d", flags);
16180 		cmd = PKT2CMD(pkt);
16181 		if (cmd->cmd_fp_pkt->pkt_pd) {
16182 			cmd->cmd_fp_pkt->pkt_pd = NULL;
16183 		}
16184 	}
16185 
16186 	/*
16187 	 * Second step:	 dma allocation/move
16188 	 */
16189 	if (bp && bp->b_bcount != 0) {
16190 		/*
16191 		 * Mark if it's read or write
16192 		 */
16193 		if (bp->b_flags & B_READ) {
16194 			cmd->cmd_flags |= CFLAG_IS_READ;
16195 		} else {
16196 			cmd->cmd_flags &= ~CFLAG_IS_READ;
16197 		}
16198 
16199 		bp_mapin(bp);
16200 		cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16201 		cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16202 		cmd->cmd_fp_pkt->pkt_data_resid = 0;
16203 	} else {
16204 		/*
16205 		 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16206 		 * to send zero-length read/write.
16207 		 */
16208 		cmd->cmd_fp_pkt->pkt_data = NULL;
16209 		cmd->cmd_fp_pkt->pkt_datalen = 0;
16210 	}
16211 
16212 	return (pkt);
16213 }
16214 
16215 static void
fcp_pseudo_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)16216 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16217 {
16218 	fcp_port_t	*pptr = ADDR2FCP(ap);
16219 
16220 	/*
16221 	 * First we let FCA to uninitilize private part.
16222 	 */
16223 	(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16224 	    PKT2CMD(pkt)->cmd_fp_pkt);
16225 
16226 	/*
16227 	 * Then we uninitialize fc_packet.
16228 	 */
16229 
16230 	/*
16231 	 * Thirdly, we uninitializae fcp_pkt.
16232 	 */
16233 
16234 	/*
16235 	 * In the end, we free scsi_pkt.
16236 	 */
16237 	scsi_hba_pkt_free(ap, pkt);
16238 }
16239 
16240 static int
fcp_pseudo_start(struct scsi_address * ap,struct scsi_pkt * pkt)16241 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16242 {
16243 	fcp_port_t	*pptr = ADDR2FCP(ap);
16244 	fcp_lun_t	*plun = ADDR2LUN(ap);
16245 	fcp_tgt_t	*ptgt = plun->lun_tgt;
16246 	fcp_pkt_t	*cmd  = PKT2CMD(pkt);
16247 	fcp_cmd_t	*fcmd = &cmd->cmd_fcp_cmd;
16248 	fc_packet_t	*fpkt = cmd->cmd_fp_pkt;
16249 	int		 rval;
16250 
16251 	fpkt->pkt_pd = ptgt->tgt_pd_handle;
16252 	(void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16253 
16254 	/*
16255 	 * Firstly, we need initialize fcp_pkt_t
16256 	 * Secondly, we need initialize fcp_cmd_t.
16257 	 */
16258 	bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16259 	fcmd->fcp_data_len = fpkt->pkt_datalen;
16260 	fcmd->fcp_ent_addr = plun->lun_addr;
16261 	if (pkt->pkt_flags & FLAG_HTAG) {
16262 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16263 	} else if (pkt->pkt_flags & FLAG_OTAG) {
16264 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16265 	} else if (pkt->pkt_flags & FLAG_STAG) {
16266 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16267 	} else {
16268 		fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16269 	}
16270 
16271 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16272 		fcmd->fcp_cntl.cntl_read_data = 1;
16273 		fcmd->fcp_cntl.cntl_write_data = 0;
16274 	} else {
16275 		fcmd->fcp_cntl.cntl_read_data = 0;
16276 		fcmd->fcp_cntl.cntl_write_data = 1;
16277 	}
16278 
16279 	/*
16280 	 * Then we need initialize fc_packet_t too.
16281 	 */
16282 	fpkt->pkt_timeout = pkt->pkt_time + 2;
16283 	fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16284 	fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16285 	if (cmd->cmd_flags & CFLAG_IS_READ) {
16286 		fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16287 	} else {
16288 		fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16289 	}
16290 
16291 	if (pkt->pkt_flags & FLAG_NOINTR) {
16292 		fpkt->pkt_comp = NULL;
16293 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16294 	} else {
16295 		fpkt->pkt_comp = fcp_cmd_callback;
16296 		fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16297 		if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16298 			fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16299 		}
16300 	}
16301 
16302 	/*
16303 	 * Lastly, we need initialize scsi_pkt
16304 	 */
16305 	pkt->pkt_reason = CMD_CMPLT;
16306 	pkt->pkt_state = 0;
16307 	pkt->pkt_statistics = 0;
16308 	pkt->pkt_resid = 0;
16309 
16310 	/*
16311 	 * if interrupts aren't allowed (e.g. at dump time) then we'll
16312 	 * have to do polled I/O
16313 	 */
16314 	if (pkt->pkt_flags & FLAG_NOINTR) {
16315 		return (fcp_dopoll(pptr, cmd));
16316 	}
16317 
16318 	cmd->cmd_state = FCP_PKT_ISSUED;
16319 	rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16320 	if (rval == FC_SUCCESS) {
16321 		return (TRAN_ACCEPT);
16322 	}
16323 
16324 	/*
16325 	 * Need more consideration
16326 	 *
16327 	 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16328 	 */
16329 	cmd->cmd_state = FCP_PKT_IDLE;
16330 	if (rval == FC_TRAN_BUSY) {
16331 		return (TRAN_BUSY);
16332 	} else {
16333 		return (TRAN_FATAL_ERROR);
16334 	}
16335 }
16336 
16337 /*
16338  * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16339  * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16340  */
16341 static void
fcp_pseudo_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)16342 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16343 {
16344 	FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16345 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16346 }
16347 
16348 /*
16349  * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16350  */
16351 static void
fcp_pseudo_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)16352 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16353 {
16354 	FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16355 	    FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16356 }
16357