xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/ulp/fcip.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  */
26 
27 /*
28  * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module
29  * This is a pseudo driver module to handle encapsulation of IP and ARP
30  * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS
31  * driver module which interfaces with IP/ARP using DLPI. This module
32  * is a Style-2 DLS provider.
33  *
34  * The implementation of this module is based on RFC 2625 which gives
35  * details on the encapsulation of IP/ARP data over FibreChannel.
36  * The fcip module needs to resolve an IP address to a port address before
37  * sending data to a destination port. A FC device port has 2 addresses
38  * associated with it: A 8 byte World Wide unique Port Name and a 3 byte
39  * volatile Port number or Port_ID.
40  *
41  * The mapping between a IP address and the World Wide Port Name is handled
42  * by the ARP layer since the IP over FC draft requires the MAC address to
43  * be the least significant six bytes of the WorldWide Port Names. The
44  * fcip module however needs to identify the destination port uniquely when
45  * the destination FC device has multiple FC ports.
46  *
47  * The FC layer mapping between the World Wide Port Name and the Port_ID
48  * will be handled through the use of a fabric name server or through the
49  * use of the FARP ELS command as described in the draft. Since the Port_IDs
50  * are volatile, the mapping between the World Wide Port Name and Port_IDs
51  * must be maintained and validated before use each time a datagram
52  * needs to be sent to the destination ports. The FC transport module
53  * informs the fcip module of all changes to states of ports on the
54  * fabric through registered callbacks. This enables the fcip module
55  * to maintain the WW_PN to Port_ID mappings current.
56  *
57  * For details on how this module interfaces with the FibreChannel Transport
58  * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport
59  * Programming guide details the APIs between ULPs and the Transport.
60  *
61  * Now for some Caveats:
62  *
63  * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have
64  * the NAA bits set to '0001' indicating a IEEE 48bit address which
65  * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel
66  * adapters containing 2 or more ports, IEEE naming cannot identify the
67  * ports on an adapter uniquely so we will in the first implementation
68  * be operating only on Port 0 of each adapter.
69  */
70 
71 #include	<sys/types.h>
72 #include	<sys/errno.h>
73 #include	<sys/debug.h>
74 #include	<sys/time.h>
75 #include	<sys/sysmacros.h>
76 #include	<sys/systm.h>
77 #include	<sys/user.h>
78 #include	<sys/stropts.h>
79 #include	<sys/stream.h>
80 #include	<sys/strlog.h>
81 #include	<sys/strsubr.h>
82 #include	<sys/cmn_err.h>
83 #include	<sys/cpu.h>
84 #include	<sys/kmem.h>
85 #include	<sys/conf.h>
86 #include	<sys/ddi.h>
87 #include	<sys/sunddi.h>
88 #include	<sys/ksynch.h>
89 #include	<sys/stat.h>
90 #include	<sys/kstat.h>
91 #include	<sys/vtrace.h>
92 #include	<sys/strsun.h>
93 #include	<sys/varargs.h>
94 #include	<sys/modctl.h>
95 #include 	<sys/thread.h>
96 #include 	<sys/var.h>
97 #include 	<sys/proc.h>
98 #include	<inet/common.h>
99 #include	<netinet/ip6.h>
100 #include	<inet/ip.h>
101 #include	<inet/arp.h>
102 #include	<inet/mi.h>
103 #include	<inet/nd.h>
104 #include	<sys/dlpi.h>
105 #include	<sys/ethernet.h>
106 #include	<sys/file.h>
107 #include	<sys/syslog.h>
108 #include	<sys/disp.h>
109 #include	<sys/taskq.h>
110 
111 /*
112  * Leadville includes
113  */
114 
115 #include	<sys/fibre-channel/fc.h>
116 #include	<sys/fibre-channel/impl/fc_ulpif.h>
117 #include	<sys/fibre-channel/ulp/fcip.h>
118 
119 /*
120  * TNF Probe/trace facility include
121  */
122 #if defined(lint) || defined(FCIP_TNF_ENABLED)
123 #include <sys/tnf_probe.h>
124 #endif
125 
126 #define	FCIP_ESBALLOC
127 
128 /*
129  * Function prototypes
130  */
131 
132 /* standard loadable modules entry points */
133 static int	fcip_attach(dev_info_t *, ddi_attach_cmd_t);
134 static int 	fcip_detach(dev_info_t *, ddi_detach_cmd_t);
135 static void 	fcip_dodetach(struct fcipstr *slp);
136 static int fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
137     void *arg, void **result);
138 
139 
140 /* streams specific */
141 static void fcip_setipq(struct fcip *fptr);
142 static int fcip_wput(queue_t *, mblk_t *);
143 static int fcip_wsrv(queue_t *);
144 static void fcip_proto(queue_t *, mblk_t *);
145 static void fcip_ioctl(queue_t *, mblk_t *);
146 static int fcip_open(queue_t *wq, dev_t *devp, int flag,
147 		int sflag, cred_t *credp);
148 static int fcip_close(queue_t *rq, int flag, cred_t *credp);
149 static int fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
150     struct fcip_dest *fdestp, int flags);
151 static void fcip_sendup(struct fcip *fptr, mblk_t *mp,
152     struct fcipstr *(*acceptfunc)());
153 static struct fcipstr *fcip_accept(struct fcipstr *slp, struct fcip *fptr,
154     int type, la_wwn_t *dhostp);
155 static mblk_t *fcip_addudind(struct fcip *fptr, mblk_t *mp,
156     fcph_network_hdr_t *nhdr, int type);
157 static int fcip_setup_mac_addr(struct fcip *fptr);
158 static void fcip_kstat_init(struct fcip *fptr);
159 static int fcip_stat_update(kstat_t *, int);
160 
161 
162 /* dlpi specific */
163 static void fcip_spareq(queue_t *wq, mblk_t *mp);
164 static void fcip_pareq(queue_t *wq, mblk_t *mp);
165 static void fcip_ubreq(queue_t *wq, mblk_t *mp);
166 static void fcip_breq(queue_t *wq, mblk_t *mp);
167 static void fcip_dreq(queue_t *wq, mblk_t *mp);
168 static void fcip_areq(queue_t *wq, mblk_t *mp);
169 static void fcip_udreq(queue_t *wq, mblk_t *mp);
170 static void fcip_ireq(queue_t *wq, mblk_t *mp);
171 static void fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp);
172 
173 
174 /* solaris sundry, DR/CPR etc */
175 static int fcip_cache_constructor(void *buf, void *arg, int size);
176 static void fcip_cache_destructor(void *buf, void *size);
177 static int fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd);
178 static int fcip_handle_resume(fcip_port_info_t *fport,
179     fc_ulp_port_info_t *port_info, fc_attach_cmd_t cmd);
180 static fcip_port_info_t *fcip_softstate_free(fcip_port_info_t *fport);
181 static int fcip_port_attach_handler(struct fcip *fptr);
182 
183 
184 /*
185  * ulp - transport interface function prototypes
186  */
187 static int fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *,
188     fc_attach_cmd_t cmd, uint32_t sid);
189 static int fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *,
190     fc_detach_cmd_t cmd);
191 static int fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle,
192     dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
193     uint32_t claimed);
194 static void fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
195     uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
196     uint32_t listlen, uint32_t sid);
197 static int fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
198     fc_unsol_buf_t *buf, uint32_t claimed);
199 static int fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
200     fc_unsol_buf_t *payload, uint32_t claimed);
201 
202 
203 /* Routing table specific */
204 static void fcip_handle_topology(struct fcip *fptr);
205 static int fcip_init_port(struct fcip *fptr);
206 struct fcip_routing_table *fcip_lookup_rtable(struct fcip *fptr,
207     la_wwn_t *pwwn, int matchflag);
208 static void fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist,
209     uint32_t listlen);
210 static void fcip_rt_flush(struct fcip *fptr);
211 static void fcip_rte_remove_deferred(void *arg);
212 static int fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp);
213 
214 
215 /* dest table specific */
216 static struct fcip_dest *fcip_get_dest(struct fcip *fptr,
217     la_wwn_t *dlphys);
218 static struct fcip_dest *fcip_add_dest(struct fcip *fptr,
219     struct fcip_routing_table *frp);
220 static int fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag);
221 static uint32_t fcip_get_broadcast_did(struct fcip *fptr);
222 static void fcip_cleanup_dest(struct fcip *fptr);
223 
224 
225 /* helper functions */
226 static fcip_port_info_t *fcip_get_port(opaque_t phandle);
227 static int fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag);
228 static void fcip_ether_to_str(struct ether_addr *e, caddr_t s);
229 static int fcip_port_get_num_pkts(struct fcip *fptr);
230 static int fcip_check_port_busy(struct fcip *fptr);
231 static void fcip_check_remove_minor_node(void);
232 static int fcip_set_wwn(la_wwn_t *pwwn);
233 static int fcip_plogi_in_progress(struct fcip *fptr);
234 static int fcip_check_port_exists(struct fcip *fptr);
235 static int fcip_is_supported_fc_topology(int fc_topology);
236 
237 
238 /* pkt specific */
239 static fcip_pkt_t *fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp,
240     int flags, int datalen);
241 static void fcip_pkt_free(struct fcip_pkt *fcip_pkt, int flags);
242 static fcip_pkt_t *fcip_ipkt_alloc(struct fcip *fptr, int cmdlen,
243     int resplen, opaque_t pd, int flags);
244 static void fcip_ipkt_free(fcip_pkt_t *fcip_pkt);
245 static void fcip_ipkt_callback(fc_packet_t *fc_pkt);
246 static void fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt);
247 static void fcip_pkt_callback(fc_packet_t *fc_pkt);
248 static void fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid,
249     fc_portid_t did, void (*comp) ());
250 static int fcip_transport(fcip_pkt_t *fcip_pkt);
251 static void fcip_pkt_timeout(void *arg);
252 static void fcip_timeout(void *arg);
253 static void fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp,
254     fcip_pkt_t *fcip_pkt);
255 static int fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp,
256     fcip_pkt_t *fcip_pkt);
257 static int fcip_sendup_constructor(void *buf, void *arg, int flags);
258 static void fcip_sendup_thr(void *arg);
259 static int fcip_sendup_alloc_enque(struct fcip *ftpr, mblk_t *mp,
260     struct fcipstr *(*f)());
261 
262 /*
263  * zero copy inbound data handling
264  */
265 #ifdef FCIP_ESBALLOC
266 static void fcip_ubfree(char *arg);
267 #endif /* FCIP_ESBALLOC */
268 
269 #if !defined(FCIP_ESBALLOC)
270 static void *fcip_allocb(size_t size, uint_t pri);
271 #endif
272 
273 
274 /* FCIP FARP support functions */
275 static struct fcip_dest *fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn,
276     char *ip_addr, size_t ip_addr_len, int flags);
277 static void fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (),
278     int is_els);
279 static int fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd);
280 static int fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd);
281 static void fcip_cache_arp_broadcast(struct fcip *ftpr, fc_unsol_buf_t *buf);
282 static void fcip_port_ns(void *arg);
283 
284 #ifdef DEBUG
285 
286 #include <sys/debug.h>
287 
288 #define	FCIP_DEBUG_DEFAULT	0x1
289 #define	FCIP_DEBUG_ATTACH	0x2
290 #define	FCIP_DEBUG_INIT		0x4
291 #define	FCIP_DEBUG_DETACH	0x8
292 #define	FCIP_DEBUG_DLPI		0x10
293 #define	FCIP_DEBUG_ELS		0x20
294 #define	FCIP_DEBUG_DOWNSTREAM	0x40
295 #define	FCIP_DEBUG_UPSTREAM	0x80
296 #define	FCIP_DEBUG_MISC		0x100
297 
298 #define	FCIP_DEBUG_STARTUP	(FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT)
299 #define	FCIP_DEBUG_DATAOUT	(FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM)
300 #define	FCIP_DEBUG_DATAIN	(FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM)
301 
302 static int fcip_debug = FCIP_DEBUG_DEFAULT;
303 
304 #define	FCIP_DEBUG(level, args)	\
305 	if (fcip_debug & (level))	cmn_err args;
306 
307 #else	/* DEBUG */
308 
309 #define	FCIP_DEBUG(level, args)		/* do nothing */
310 
311 #endif	/* DEBUG */
312 
313 #define	KIOIP	KSTAT_INTR_PTR(fcip->fcip_intrstats)
314 
315 /*
316  * Endian independent ethernet to WWN copy
317  */
318 #define	ether_to_wwn(E, W)	\
319 	bzero((void *)(W), sizeof (la_wwn_t)); \
320 	bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \
321 	(W)->raw_wwn[0] |= 0x10
322 
323 /*
324  * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr.
325  * The args to the macro are pointers to WWN and ether_addr structures
326  */
327 #define	wwn_to_ether(W, E)	\
328 	bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL)
329 
330 /*
331  * The module_info structure contains identification and limit values.
332  * All queues associated with a certain driver share the same module_info
333  * structures. This structure defines the characteristics of that driver/
334  * module's queues. The module name must be unique. The max and min packet
335  * sizes limit the no. of characters in M_DATA messages. The Hi and Lo
336  * water marks are for flow control when a module has a service procedure.
337  */
338 static struct module_info	fcipminfo = {
339 	FCIPIDNUM,	/* mi_idnum : Module ID num */
340 	FCIPNAME, 	/* mi_idname: Module Name */
341 	FCIPMINPSZ,	/* mi_minpsz: Min packet size */
342 	FCIPMAXPSZ,	/* mi_maxpsz: Max packet size */
343 	FCIPHIWAT,	/* mi_hiwat : High water mark */
344 	FCIPLOWAT	/* mi_lowat : Low water mark */
345 };
346 
347 /*
348  * The qinit structres contain the module put, service. open and close
349  * procedure pointers. All modules and drivers with the same streamtab
350  * file (i.e same fmodsw or cdevsw entry points) point to the same
351  * upstream (read) and downstream (write) qinit structs.
352  */
353 static struct qinit	fcip_rinit = {
354 	NULL,		/* qi_putp */
355 	NULL,		/* qi_srvp */
356 	fcip_open,	/* qi_qopen */
357 	fcip_close,	/* qi_qclose */
358 	NULL,		/* qi_qadmin */
359 	&fcipminfo,	/* qi_minfo */
360 	NULL		/* qi_mstat */
361 };
362 
363 static struct qinit	fcip_winit = {
364 	fcip_wput,	/* qi_putp */
365 	fcip_wsrv,	/* qi_srvp */
366 	NULL,		/* qi_qopen */
367 	NULL,		/* qi_qclose */
368 	NULL,		/* qi_qadmin */
369 	&fcipminfo,	/* qi_minfo */
370 	NULL		/* qi_mstat */
371 };
372 
373 /*
374  * streamtab contains pointers to the read and write qinit structures
375  */
376 
377 static struct streamtab fcip_info = {
378 	&fcip_rinit,	/* st_rdinit */
379 	&fcip_winit,	/* st_wrinit */
380 	NULL,		/* st_muxrinit */
381 	NULL,		/* st_muxwrinit */
382 };
383 
384 static struct cb_ops  fcip_cb_ops = {
385 	nodev,				/* open */
386 	nodev,				/* close */
387 	nodev,				/* strategy */
388 	nodev,				/* print */
389 	nodev,				/* dump */
390 	nodev,				/* read */
391 	nodev,				/* write */
392 	nodev,				/* ioctl */
393 	nodev,				/* devmap */
394 	nodev,				/* mmap */
395 	nodev,				/* segmap */
396 	nochpoll,			/* poll */
397 	ddi_prop_op,			/* cb_prop_op */
398 	&fcip_info,			/* streamtab  */
399 	D_MP | D_HOTPLUG,		/* Driver compatibility flag */
400 	CB_REV,				/* rev */
401 	nodev,				/* int (*cb_aread)() */
402 	nodev				/* int (*cb_awrite)() */
403 };
404 
405 /*
406  * autoconfiguration routines.
407  */
408 static struct dev_ops fcip_ops = {
409 	DEVO_REV,		/* devo_rev, */
410 	0,			/* refcnt  */
411 	fcip_getinfo,		/* info */
412 	nulldev,		/* identify */
413 	nulldev,		/* probe */
414 	fcip_attach,		/* attach */
415 	fcip_detach,		/* detach */
416 	nodev,			/* RESET */
417 	&fcip_cb_ops,		/* driver operations */
418 	NULL,			/* bus operations */
419 	ddi_power		/* power management */
420 };
421 
422 #define	FCIP_VERSION	"1.61"
423 #define	FCIP_NAME	"SunFC FCIP v" FCIP_VERSION
424 
425 #define	PORT_DRIVER	"fp"
426 
427 #define	GETSTRUCT(struct, number)	\
428 	((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \
429 		KM_SLEEP))
430 
431 static struct modldrv modldrv = {
432 	&mod_driverops,			/* Type of module - driver */
433 	FCIP_NAME,			/* Name of module */
434 	&fcip_ops,			/* driver ops */
435 };
436 
437 static struct modlinkage modlinkage = {
438 	MODREV_1, (void *)&modldrv, NULL
439 };
440 
441 
442 /*
443  * Now for some global statics
444  */
445 static uint32_t	fcip_ub_nbufs = FCIP_UB_NBUFS;
446 static uint32_t fcip_ub_size = FCIP_UB_SIZE;
447 static int fcip_pkt_ttl_ticks = FCIP_PKT_TTL;
448 static int fcip_tick_incr = 1;
449 static int fcip_wait_cmds = FCIP_WAIT_CMDS;
450 static int fcip_num_attaching = 0;
451 static int fcip_port_attach_pending = 0;
452 static int fcip_create_nodes_on_demand = 1;	/* keep it similar to fcp */
453 static int fcip_cache_on_arp_broadcast = 0;
454 static int fcip_farp_supported = 0;
455 static int fcip_minor_node_created = 0;
456 
457 /*
458  * Supported FCAs
459  */
460 #define	QLC_PORT_1_ID_BITS		0x100
461 #define	QLC_PORT_2_ID_BITS		0x101
462 #define	QLC_PORT_NAA			0x2
463 #define	QLC_MODULE_NAME			"qlc"
464 #define	IS_QLC_PORT(port_dip)		\
465 			(strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\
466 			QLC_MODULE_NAME) == 0)
467 
468 
469 /*
470  * fcip softstate structures head.
471  */
472 
473 static void *fcip_softp = NULL;
474 
475 /*
476  * linked list of active (inuse) driver streams
477  */
478 
479 static int fcip_num_instances = 0;
480 static dev_info_t *fcip_module_dip = (dev_info_t *)0;
481 
482 
483 /*
484  * Ethernet broadcast address: Broadcast addressing in IP over fibre
485  * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN).
486  *
487  * The broadcast addressing varies for differing topologies a node may be in:
488  *	- On a private loop the ARP broadcast is a class 3 sequence sent
489  *	  using OPNfr (Open Broadcast Replicate primitive) followed by
490  *	  the ARP frame to D_ID 0xFFFFFF
491  *
492  *	- On a public Loop the broadcast sequence is sent to AL_PA 0x00
493  *	  (no OPNfr primitive).
494  *
495  *	- For direct attach and point to point topologies we just send
496  *	  the frame to D_ID 0xFFFFFF
497  *
498  * For public loop the handling would probably be different - for now
499  * I'll just declare this struct - It can be deleted if not necessary.
500  *
501  */
502 
503 
504 /*
505  * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is
506  * returned as a part of an  DL_INFO_ACK message which is a M_PCPROTO message
507  * returned in response to a DL_INFO_REQ message sent to us from a DLS user
508  * Let us fake an ether header as much as possible.
509  *
510  * dl_addr_length is the Provider's DLSAP addr which is SAP addr +
511  *                Physical addr of the provider. We set this to
512  *                ushort_t + sizeof (la_wwn_t) for Fibre Channel ports.
513  * dl_mac_type    Lets just use DL_ETHER - we can try using DL_IPFC, a new
514  *		  dlpi.h define later.
515  * dl_sap_length  -2 indicating the SAP address follows the Physical addr
516  *		  component in the DLSAP addr.
517  * dl_service_mode: DLCLDS - connectionless data link service.
518  *
519  */
520 
521 static dl_info_ack_t fcip_infoack = {
522 	DL_INFO_ACK,				/* dl_primitive */
523 	FCIPMTU,				/* dl_max_sdu */
524 	0,					/* dl_min_sdu */
525 	FCIPADDRL,				/* dl_addr_length */
526 	DL_ETHER,				/* dl_mac_type */
527 	0,					/* dl_reserved */
528 	0,					/* dl_current_state */
529 	-2,					/* dl_sap_length */
530 	DL_CLDLS,				/* dl_service_mode */
531 	0,					/* dl_qos_length */
532 	0,					/* dl_qos_offset */
533 	0,					/* dl_range_length */
534 	0,					/* dl_range_offset */
535 	DL_STYLE2,				/* dl_provider_style */
536 	sizeof (dl_info_ack_t),			/* dl_addr_offset */
537 	DL_VERSION_2,				/* dl_version */
538 	ETHERADDRL,				/* dl_brdcst_addr_length */
539 	sizeof (dl_info_ack_t) + FCIPADDRL,	/* dl_brdcst_addr_offset */
540 	0					/* dl_growth */
541 };
542 
543 /*
544  * FCIP broadcast address definition.
545  */
546 static	struct ether_addr	fcipnhbroadcastaddr = {
547 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
548 };
549 
550 /*
551  * RFC2625 requires the broadcast ARP address in the ARP data payload to
552  * be set to 0x00 00 00 00 00 00 for ARP broadcast packets
553  */
554 static	struct ether_addr	fcip_arpbroadcast_addr = {
555 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
556 };
557 
558 
559 #define	ether_bcopy(src, dest)	bcopy((src), (dest), ETHERADDRL);
560 
561 /*
562  * global kernel locks
563  */
564 static kcondvar_t	fcip_global_cv;
565 static kmutex_t		fcip_global_mutex;
566 
567 /*
568  * fctl external defines
569  */
570 extern int fc_ulp_add(fc_ulp_modinfo_t *);
571 
572 /*
573  * fctl data structures
574  */
575 
576 #define	FCIP_REV	0x07
577 
578 /* linked list of port info structures */
579 static fcip_port_info_t *fcip_port_head = NULL;
580 
581 /* linked list of fcip structures */
582 static struct fcipstr	*fcipstrup = NULL;
583 static krwlock_t	fcipstruplock;
584 
585 
586 /*
587  * Module information structure. This structure gives the FC Transport modules
588  * information about an ULP that registers with it.
589  */
590 static fc_ulp_modinfo_t	fcip_modinfo = {
591 	0,			/* for xref checks? */
592 	FCTL_ULP_MODREV_4,	/* FCIP revision */
593 	FC_TYPE_IS8802_SNAP,	/* type 5 for SNAP encapsulated datagrams */
594 	FCIP_NAME,		/* module name as in the modldrv struct */
595 	0x0,			/* get all statec callbacks for now */
596 	fcip_port_attach,	/* port attach callback */
597 	fcip_port_detach,	/* port detach callback */
598 	fcip_port_ioctl,	/* port ioctl callback */
599 	fcip_els_cb,		/* els callback */
600 	fcip_data_cb,		/* data callback */
601 	fcip_statec_cb		/* state change callback */
602 };
603 
604 
605 /*
606  * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry
607  *
608  * ddi-forceattach=1;
609  *
610  * This will ensure that fp is loaded at bootup. No additional checks are needed
611  */
612 int
613 _init(void)
614 {
615 	int	rval;
616 
617 	FCIP_TNF_LOAD();
618 
619 	/*
620 	 * Initialize the mutexs used by port attach and other callbacks.
621 	 * The transport can call back into our port_attach_callback
622 	 * routine even before _init() completes and bad things can happen.
623 	 */
624 	mutex_init(&fcip_global_mutex, NULL, MUTEX_DRIVER, NULL);
625 	cv_init(&fcip_global_cv, NULL, CV_DRIVER, NULL);
626 	rw_init(&fcipstruplock, NULL, RW_DRIVER, NULL);
627 
628 	mutex_enter(&fcip_global_mutex);
629 	fcip_port_attach_pending = 1;
630 	mutex_exit(&fcip_global_mutex);
631 
632 	/*
633 	 * Now attempt to register fcip with the transport.
634 	 * If fc_ulp_add fails, fcip module will not be loaded.
635 	 */
636 	rval = fc_ulp_add(&fcip_modinfo);
637 	if (rval != FC_SUCCESS) {
638 		mutex_destroy(&fcip_global_mutex);
639 		cv_destroy(&fcip_global_cv);
640 		rw_destroy(&fcipstruplock);
641 		switch (rval) {
642 		case FC_ULP_SAMEMODULE:
643 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
644 			    "!fcip: module is already registered with"
645 			    " transport"));
646 			rval = EEXIST;
647 			break;
648 		case FC_ULP_SAMETYPE:
649 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
650 			    "!fcip: Another module of the same ULP type 0x%x"
651 			    " is already registered with the transport",
652 			    fcip_modinfo.ulp_type));
653 			rval = EEXIST;
654 			break;
655 		case FC_BADULP:
656 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
657 			    "!fcip: Current fcip version 0x%x does not match"
658 			    " fctl version",
659 			    fcip_modinfo.ulp_rev));
660 			rval = ENODEV;
661 			break;
662 		default:
663 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
664 			    "!fcip: fc_ulp_add failed with status 0x%x", rval));
665 			rval = ENODEV;
666 			break;
667 		}
668 		FCIP_TNF_UNLOAD(&modlinkage);
669 		return (rval);
670 	}
671 
672 	if ((rval = ddi_soft_state_init(&fcip_softp, sizeof (struct fcip),
673 			FCIP_NUM_INSTANCES)) != 0) {
674 		mutex_destroy(&fcip_global_mutex);
675 		cv_destroy(&fcip_global_cv);
676 		rw_destroy(&fcipstruplock);
677 		(void) fc_ulp_remove(&fcip_modinfo);
678 		FCIP_TNF_UNLOAD(&modlinkage);
679 		return (rval);
680 	}
681 
682 	if ((rval = mod_install(&modlinkage)) != 0) {
683 		FCIP_TNF_UNLOAD(&modlinkage);
684 		(void) fc_ulp_remove(&fcip_modinfo);
685 		mutex_destroy(&fcip_global_mutex);
686 		cv_destroy(&fcip_global_cv);
687 		rw_destroy(&fcipstruplock);
688 		ddi_soft_state_fini(&fcip_softp);
689 	}
690 	return (rval);
691 }
692 
693 /*
694  * Unload the port driver if this was the only ULP loaded and then
695  * deregister with the transport.
696  */
697 int
698 _fini(void)
699 {
700 	int	rval;
701 	int	rval1;
702 
703 	/*
704 	 * Do not permit the module to be unloaded before a port
705 	 * attach callback has happened.
706 	 */
707 	mutex_enter(&fcip_global_mutex);
708 	if (fcip_num_attaching || fcip_port_attach_pending) {
709 		mutex_exit(&fcip_global_mutex);
710 		return (EBUSY);
711 	}
712 	mutex_exit(&fcip_global_mutex);
713 
714 	if ((rval = mod_remove(&modlinkage)) != 0) {
715 		return (rval);
716 	}
717 
718 	/*
719 	 * unregister with the transport layer
720 	 */
721 	rval1 = fc_ulp_remove(&fcip_modinfo);
722 
723 	/*
724 	 * If the ULP was not registered with the transport, init should
725 	 * have failed. If transport has no knowledge of our existence
726 	 * we should simply bail out and succeed
727 	 */
728 #ifdef DEBUG
729 	if (rval1 == FC_BADULP) {
730 		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
731 		"fcip: ULP was never registered with the transport"));
732 		rval = ENODEV;
733 	} else if (rval1 == FC_BADTYPE) {
734 		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
735 			"fcip: No ULP of this type 0x%x was registered with "
736 			"transport", fcip_modinfo.ulp_type));
737 		rval = ENODEV;
738 	}
739 #endif /* DEBUG */
740 
741 	mutex_destroy(&fcip_global_mutex);
742 	rw_destroy(&fcipstruplock);
743 	cv_destroy(&fcip_global_cv);
744 	ddi_soft_state_fini(&fcip_softp);
745 
746 	FCIP_TNF_UNLOAD(&modlinkage);
747 
748 	return (rval);
749 }
750 
751 /*
752  * Info about this loadable module
753  */
754 int
755 _info(struct modinfo *modinfop)
756 {
757 	return (mod_info(&modlinkage, modinfop));
758 }
759 
760 /*
761  * The port attach callback is invoked by the port driver when a FCA
762  * port comes online and binds with the transport layer. The transport
763  * then callsback into all ULP modules registered with it. The Port attach
764  * call back will also provide the ULP module with the Port's WWN and S_ID
765  */
766 /* ARGSUSED */
767 static int
768 fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
769     fc_attach_cmd_t cmd, uint32_t sid)
770 {
771 	int 			rval = FC_FAILURE;
772 	int 			instance;
773 	struct fcip		*fptr;
774 	fcip_port_info_t	*fport = NULL;
775 	fcip_port_info_t	*cur_fport;
776 	fc_portid_t		src_id;
777 
778 	switch (cmd) {
779 	case FC_CMD_ATTACH: {
780 		la_wwn_t	*ww_pn = NULL;
781 		/*
782 		 * It was determined that, as per spec, the lower 48 bits of
783 		 * the port-WWN will always be unique. This will make the MAC
784 		 * address (i.e the lower 48 bits of the WWN), that IP/ARP
785 		 * depend on, unique too. Hence we should be able to remove the
786 		 * restriction of attaching to only one of the ports of
787 		 * multi port FCAs.
788 		 *
789 		 * Earlier, fcip used to attach only to qlc module and fail
790 		 * silently for attach failures resulting from unknown FCAs or
791 		 * unsupported FCA ports. Now, we'll do no such checks.
792 		 */
793 		ww_pn = &port_info->port_pwwn;
794 
795 		FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
796 			tnf_string, msg, "port id bits",
797 			tnf_opaque, nport_id, ww_pn->w.nport_id));
798 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
799 		    "port id bits: 0x%x", ww_pn->w.nport_id));
800 		/*
801 		 * A port has come online
802 		 */
803 		mutex_enter(&fcip_global_mutex);
804 		fcip_num_instances++;
805 		fcip_num_attaching++;
806 
807 		if (fcip_port_head == NULL) {
808 			/* OK to sleep here ? */
809 			fport = kmem_zalloc(sizeof (fcip_port_info_t),
810 						KM_NOSLEEP);
811 			if (fport == NULL) {
812 				fcip_num_instances--;
813 				fcip_num_attaching--;
814 				ASSERT(fcip_num_attaching >= 0);
815 				mutex_exit(&fcip_global_mutex);
816 				rval = FC_FAILURE;
817 				cmn_err(CE_WARN, "!fcip(%d): port attach "
818 				    "failed: alloc failed",
819 				    ddi_get_instance(port_info->port_dip));
820 				goto done;
821 			}
822 			fcip_port_head = fport;
823 		} else {
824 			/*
825 			 * traverse the port list and also check for
826 			 * duplicate port attaches - Nothing wrong in being
827 			 * paranoid Heh Heh.
828 			 */
829 			cur_fport = fcip_port_head;
830 			while (cur_fport != NULL) {
831 				if (cur_fport->fcipp_handle ==
832 				    port_info->port_handle) {
833 					fcip_num_instances--;
834 					fcip_num_attaching--;
835 					ASSERT(fcip_num_attaching >= 0);
836 					mutex_exit(&fcip_global_mutex);
837 					FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
838 					    "!fcip(%d): port already "
839 					    "attached!!", ddi_get_instance(
840 					    port_info->port_dip)));
841 					rval = FC_FAILURE;
842 					goto done;
843 				}
844 				cur_fport = cur_fport->fcipp_next;
845 			}
846 			fport = kmem_zalloc(sizeof (fcip_port_info_t),
847 						KM_NOSLEEP);
848 			if (fport == NULL) {
849 				rval = FC_FAILURE;
850 				fcip_num_instances--;
851 				fcip_num_attaching--;
852 				ASSERT(fcip_num_attaching >= 0);
853 				mutex_exit(&fcip_global_mutex);
854 				cmn_err(CE_WARN, "!fcip(%d): port attach "
855 				    "failed: alloc failed",
856 				    ddi_get_instance(port_info->port_dip));
857 				goto done;
858 			}
859 			fport->fcipp_next = fcip_port_head;
860 			fcip_port_head = fport;
861 		}
862 
863 		mutex_exit(&fcip_global_mutex);
864 
865 		/*
866 		 * now fill in the details about the port itself
867 		 */
868 		fport->fcipp_linkage = *port_info->port_linkage;
869 		fport->fcipp_handle = port_info->port_handle;
870 		fport->fcipp_dip = port_info->port_dip;
871 		fport->fcipp_topology = port_info->port_flags;
872 		fport->fcipp_pstate = port_info->port_state;
873 		fport->fcipp_naa = port_info->port_pwwn.w.naa_id;
874 		bcopy(&port_info->port_pwwn, &fport->fcipp_pwwn,
875 		    sizeof (la_wwn_t));
876 		bcopy(&port_info->port_nwwn, &fport->fcipp_nwwn,
877 		    sizeof (la_wwn_t));
878 		fport->fcipp_fca_pkt_size = port_info->port_fca_pkt_size;
879 		fport->fcipp_cmd_dma_attr = *port_info->port_cmd_dma_attr;
880 		fport->fcipp_resp_dma_attr = *port_info->port_resp_dma_attr;
881 		fport->fcipp_fca_acc_attr = *port_info->port_acc_attr;
882 		src_id.port_id = sid;
883 		src_id.priv_lilp_posit = 0;
884 		fport->fcipp_sid = src_id;
885 
886 		/*
887 		 * allocate soft state for this instance
888 		 */
889 		instance = ddi_get_instance(fport->fcipp_dip);
890 		if (ddi_soft_state_zalloc(fcip_softp,
891 		    instance) != DDI_SUCCESS) {
892 			rval = FC_FAILURE;
893 			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
894 			    "soft state alloc failed", instance);
895 			goto failure;
896 		}
897 
898 		fptr = ddi_get_soft_state(fcip_softp, instance);
899 
900 		if (fptr == NULL) {
901 			rval = FC_FAILURE;
902 			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
903 			    "failure to get soft state", instance);
904 			goto failure;
905 		}
906 
907 		/*
908 		 * initialize all mutexes and locks required for this module
909 		 */
910 		mutex_init(&fptr->fcip_mutex, NULL, MUTEX_DRIVER, NULL);
911 		mutex_init(&fptr->fcip_ub_mutex, NULL, MUTEX_DRIVER, NULL);
912 		mutex_init(&fptr->fcip_rt_mutex, NULL, MUTEX_DRIVER, NULL);
913 		mutex_init(&fptr->fcip_dest_mutex, NULL, MUTEX_DRIVER, NULL);
914 		mutex_init(&fptr->fcip_sendup_mutex, NULL, MUTEX_DRIVER, NULL);
915 		cv_init(&fptr->fcip_farp_cv, NULL, CV_DRIVER, NULL);
916 		cv_init(&fptr->fcip_sendup_cv, NULL, CV_DRIVER, NULL);
917 		cv_init(&fptr->fcip_ub_cv, NULL, CV_DRIVER, NULL);
918 
919 		mutex_enter(&fptr->fcip_mutex);
920 
921 		fptr->fcip_dip = fport->fcipp_dip;	/* parent's dip */
922 		fptr->fcip_instance = instance;
923 		fptr->fcip_ub_upstream = 0;
924 
925 		if (FC_PORT_STATE_MASK(port_info->port_state) ==
926 		    FC_STATE_ONLINE) {
927 			fptr->fcip_port_state = FCIP_PORT_ONLINE;
928 			if (fptr->fcip_flags & FCIP_LINK_DOWN) {
929 				fptr->fcip_flags &= ~FCIP_LINK_DOWN;
930 			}
931 		} else {
932 			fptr->fcip_port_state = FCIP_PORT_OFFLINE;
933 		}
934 
935 		fptr->fcip_flags |= FCIP_ATTACHING;
936 		fptr->fcip_port_info = fport;
937 
938 		/*
939 		 * Extract our MAC addr from our port's WWN. The lower 48
940 		 * bits will be our MAC address
941 		 */
942 		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
943 
944 		fport->fcipp_fcip = fptr;
945 
946 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
947 		    (CE_NOTE, "fcipdest : 0x%lx, rtable : 0x%lx",
948 		    (long)(sizeof (fptr->fcip_dest)),
949 		    (long)(sizeof (fptr->fcip_rtable))));
950 
951 		bzero(fptr->fcip_dest, sizeof (fptr->fcip_dest));
952 		bzero(fptr->fcip_rtable, sizeof (fptr->fcip_rtable));
953 
954 		/*
955 		 * create a taskq to handle sundry jobs for the driver
956 		 * This way we can have jobs run in parallel
957 		 */
958 		fptr->fcip_tq = taskq_create("fcip_tasks",
959 		    FCIP_NUM_THREADS, MINCLSYSPRI, FCIP_MIN_TASKS,
960 		    FCIP_MAX_TASKS, TASKQ_PREPOPULATE);
961 
962 		mutex_exit(&fptr->fcip_mutex);
963 
964 		/*
965 		 * create a separate thread to handle all unsolicited
966 		 * callback handling. This is because unsolicited_callback
967 		 * can happen from an interrupt context and the upstream
968 		 * modules can put new messages right back in the same
969 		 * thread context. This usually works fine, but sometimes
970 		 * we may have to block to obtain the dest struct entries
971 		 * for some remote ports.
972 		 */
973 		mutex_enter(&fptr->fcip_sendup_mutex);
974 		if (thread_create(NULL, DEFAULTSTKSZ,
975 		    (void (*)())fcip_sendup_thr, (caddr_t)fptr, 0, &p0,
976 		    TS_RUN, minclsyspri) == NULL) {
977 			mutex_exit(&fptr->fcip_sendup_mutex);
978 			cmn_err(CE_WARN,
979 			    "!unable to create fcip sendup thread for "
980 			    " instance: 0x%x", instance);
981 			rval = FC_FAILURE;
982 			goto done;
983 		}
984 		fptr->fcip_sendup_thr_initted = 1;
985 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
986 		mutex_exit(&fptr->fcip_sendup_mutex);
987 
988 
989 		/* Let the attach handler do the rest */
990 		if (fcip_port_attach_handler(fptr) != FC_SUCCESS) {
991 			/*
992 			 * We have already cleaned up so return
993 			 */
994 			rval = FC_FAILURE;
995 			cmn_err(CE_WARN, "!fcip(%d): port attach failed",
996 			    instance);
997 			goto done;
998 		}
999 
1000 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_CONT,
1001 		    "!fcip attach for port instance (0x%x) successful",
1002 		    instance));
1003 
1004 		rval = FC_SUCCESS;
1005 		goto done;
1006 	}
1007 	case FC_CMD_POWER_UP:
1008 	/* FALLTHROUGH */
1009 	case FC_CMD_RESUME:
1010 		mutex_enter(&fcip_global_mutex);
1011 		fport = fcip_port_head;
1012 		while (fport != NULL) {
1013 			if (fport->fcipp_handle == port_info->port_handle) {
1014 				break;
1015 			}
1016 			fport = fport->fcipp_next;
1017 		}
1018 		if (fport == NULL) {
1019 			rval = FC_SUCCESS;
1020 			mutex_exit(&fcip_global_mutex);
1021 			goto done;
1022 		}
1023 		rval = fcip_handle_resume(fport, port_info, cmd);
1024 		mutex_exit(&fcip_global_mutex);
1025 		goto done;
1026 
1027 	default:
1028 		FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
1029 			tnf_string, msg, "unknown command type",
1030 			tnf_uint, cmd, cmd));
1031 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1032 		    "unknown cmd type 0x%x in port_attach", cmd));
1033 		rval = FC_FAILURE;
1034 		goto done;
1035 	}
1036 
1037 failure:
1038 	if (fport) {
1039 		mutex_enter(&fcip_global_mutex);
1040 		fcip_num_attaching--;
1041 		ASSERT(fcip_num_attaching >= 0);
1042 		(void) fcip_softstate_free(fport);
1043 		fcip_port_attach_pending = 0;
1044 		mutex_exit(&fcip_global_mutex);
1045 	}
1046 	return (rval);
1047 
1048 done:
1049 	mutex_enter(&fcip_global_mutex);
1050 	fcip_port_attach_pending = 0;
1051 	mutex_exit(&fcip_global_mutex);
1052 	return (rval);
1053 }
1054 
1055 /*
1056  * fcip_port_attach_handler : Completes the port attach operation after
1057  * the ulp_port_attach routine has completed its ground work. The job
1058  * of this function among other things is to obtain and handle topology
1059  * specifics, initialize a port, setup broadcast address entries in
1060  * the fcip tables etc. This routine cleans up behind itself on failures.
1061  * Returns FC_SUCCESS or FC_FAILURE.
1062  */
1063 static int
1064 fcip_port_attach_handler(struct fcip *fptr)
1065 {
1066 	fcip_port_info_t		*fport = fptr->fcip_port_info;
1067 	int				rval = FC_FAILURE;
1068 
1069 	ASSERT(fport != NULL);
1070 
1071 	mutex_enter(&fcip_global_mutex);
1072 
1073 	FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1074 	    "fcip module dip: %p instance: %d",
1075 	    (void *)fcip_module_dip, ddi_get_instance(fptr->fcip_dip)));
1076 
1077 	if (fcip_module_dip == NULL) {
1078 		clock_t		fcip_lbolt;
1079 
1080 		fcip_lbolt = ddi_get_lbolt();
1081 		/*
1082 		 * we need to use the fcip devinfo for creating
1083 		 * the clone device node, but the fcip attach
1084 		 * (from its conf file entry claiming to be a
1085 		 * child of pseudo) may not have happened yet.
1086 		 * wait here for 10 seconds and fail port attach
1087 		 * if the fcip devinfo is not attached yet
1088 		 */
1089 		fcip_lbolt += drv_usectohz(FCIP_INIT_DELAY);
1090 
1091 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1092 		    (CE_WARN, "cv_timedwait lbolt %lx", fcip_lbolt));
1093 
1094 		(void) cv_timedwait(&fcip_global_cv, &fcip_global_mutex,
1095 		    fcip_lbolt);
1096 
1097 		if (fcip_module_dip == NULL) {
1098 			mutex_exit(&fcip_global_mutex);
1099 
1100 			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1101 				"fcip attach did not happen"));
1102 			goto port_attach_cleanup;
1103 		}
1104 	}
1105 
1106 	if ((!fcip_minor_node_created) &&
1107 	    fcip_is_supported_fc_topology(fport->fcipp_topology)) {
1108 		/*
1109 		 * Checking for same topologies which are considered valid
1110 		 * by fcip_handle_topology(). Dont create a minor node if
1111 		 * nothing is hanging off the FC port.
1112 		 */
1113 		if (ddi_create_minor_node(fcip_module_dip, "fcip", S_IFCHR,
1114 		    ddi_get_instance(fptr->fcip_dip), DDI_PSEUDO,
1115 		    CLONE_DEV) == DDI_FAILURE) {
1116 			mutex_exit(&fcip_global_mutex);
1117 			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1118 			    "failed to create minor node for fcip(%d)",
1119 			    ddi_get_instance(fptr->fcip_dip)));
1120 			goto port_attach_cleanup;
1121 		}
1122 		fcip_minor_node_created++;
1123 	}
1124 	mutex_exit(&fcip_global_mutex);
1125 
1126 	/*
1127 	 * initialize port for traffic
1128 	 */
1129 	if (fcip_init_port(fptr) != FC_SUCCESS) {
1130 		/* fcip_init_port has already cleaned up its stuff */
1131 
1132 		mutex_enter(&fcip_global_mutex);
1133 
1134 		if ((fcip_num_instances == 1) &&
1135 		    (fcip_minor_node_created == 1)) {
1136 			/* Remove minor node iff this is the last instance */
1137 			ddi_remove_minor_node(fcip_module_dip, NULL);
1138 		}
1139 
1140 		mutex_exit(&fcip_global_mutex);
1141 
1142 		goto port_attach_cleanup;
1143 	}
1144 
1145 	mutex_enter(&fptr->fcip_mutex);
1146 	fptr->fcip_flags &= ~FCIP_ATTACHING;
1147 	fptr->fcip_flags |= FCIP_INITED;
1148 	fptr->fcip_timeout_ticks = 0;
1149 
1150 	/*
1151 	 * start the timeout threads
1152 	 */
1153 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1154 	    drv_usectohz(1000000));
1155 
1156 	mutex_exit(&fptr->fcip_mutex);
1157 	mutex_enter(&fcip_global_mutex);
1158 	fcip_num_attaching--;
1159 	ASSERT(fcip_num_attaching >= 0);
1160 	mutex_exit(&fcip_global_mutex);
1161 	rval = FC_SUCCESS;
1162 	return (rval);
1163 
1164 port_attach_cleanup:
1165 	mutex_enter(&fcip_global_mutex);
1166 	(void) fcip_softstate_free(fport);
1167 	fcip_num_attaching--;
1168 	ASSERT(fcip_num_attaching >= 0);
1169 	mutex_exit(&fcip_global_mutex);
1170 	rval = FC_FAILURE;
1171 	return (rval);
1172 }
1173 
1174 
1175 /*
1176  * Handler for DDI_RESUME operations. Port must be ready to restart IP
1177  * traffic on resume
1178  */
1179 static int
1180 fcip_handle_resume(fcip_port_info_t *fport, fc_ulp_port_info_t *port_info,
1181     fc_attach_cmd_t cmd)
1182 {
1183 	int 		rval = FC_SUCCESS;
1184 	struct fcip	*fptr = fport->fcipp_fcip;
1185 	struct fcipstr	*tslp;
1186 	int		index;
1187 
1188 
1189 	ASSERT(fptr != NULL);
1190 
1191 	mutex_enter(&fptr->fcip_mutex);
1192 
1193 	if (cmd == FC_CMD_POWER_UP) {
1194 		fptr->fcip_flags &= ~(FCIP_POWER_DOWN);
1195 		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1196 			mutex_exit(&fptr->fcip_mutex);
1197 			return (FC_SUCCESS);
1198 		}
1199 	} else if (cmd == FC_CMD_RESUME) {
1200 		fptr->fcip_flags &= ~(FCIP_SUSPENDED);
1201 	} else {
1202 		mutex_exit(&fptr->fcip_mutex);
1203 		return (FC_FAILURE);
1204 	}
1205 
1206 	/*
1207 	 * set the current port state and topology
1208 	 */
1209 	fport->fcipp_topology = port_info->port_flags;
1210 	fport->fcipp_pstate = port_info->port_state;
1211 
1212 	rw_enter(&fcipstruplock, RW_READER);
1213 	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1214 		if (tslp->sl_fcip == fptr) {
1215 			break;
1216 		}
1217 	}
1218 	rw_exit(&fcipstruplock);
1219 
1220 	/*
1221 	 * No active streams on this port
1222 	 */
1223 	if (tslp == NULL) {
1224 		rval = FC_SUCCESS;
1225 		goto done;
1226 	}
1227 
1228 	mutex_enter(&fptr->fcip_rt_mutex);
1229 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1230 		struct fcip_routing_table 	*frp;
1231 
1232 		frp = fptr->fcip_rtable[index];
1233 		while (frp) {
1234 			uint32_t		did;
1235 			/*
1236 			 * Mark the broadcast RTE available again. It
1237 			 * was marked SUSPENDED during SUSPEND.
1238 			 */
1239 			did = fcip_get_broadcast_did(fptr);
1240 			if (frp->fcipr_d_id.port_id == did) {
1241 				frp->fcipr_state = 0;
1242 				index = FCIP_RT_HASH_ELEMS;
1243 				break;
1244 			}
1245 			frp = frp->fcipr_next;
1246 		}
1247 	}
1248 	mutex_exit(&fptr->fcip_rt_mutex);
1249 
1250 	/*
1251 	 * fcip_handle_topology will update the port entries in the
1252 	 * routing table.
1253 	 * fcip_handle_topology also takes care of resetting the
1254 	 * fcipr_state field in the routing table structure. The entries
1255 	 * were set to RT_INVALID during suspend.
1256 	 */
1257 	fcip_handle_topology(fptr);
1258 
1259 done:
1260 	/*
1261 	 * Restart the timeout thread
1262 	 */
1263 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1264 	    drv_usectohz(1000000));
1265 	mutex_exit(&fptr->fcip_mutex);
1266 	return (rval);
1267 }
1268 
1269 
1270 /*
1271  * Insert a destination port entry into the routing table for
1272  * this port
1273  */
1274 static void
1275 fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, uint32_t listlen)
1276 {
1277 	struct fcip_routing_table	*frp;
1278 	fcip_port_info_t		*fport = fptr->fcip_port_info;
1279 	int				hash_bucket, i;
1280 	fc_portmap_t			*pmap;
1281 	char				wwn_buf[20];
1282 
1283 	FCIP_TNF_PROBE_2((fcip_rt_update, "fcip io", /* CSTYLED */,
1284 		tnf_string, msg, "enter",
1285 		tnf_int, listlen, listlen));
1286 
1287 	ASSERT(!mutex_owned(&fptr->fcip_mutex));
1288 	mutex_enter(&fptr->fcip_rt_mutex);
1289 
1290 	for (i = 0; i < listlen; i++) {
1291 		pmap = &(devlist[i]);
1292 
1293 		frp = fcip_lookup_rtable(fptr, &(pmap->map_pwwn),
1294 		    FCIP_COMPARE_PWWN);
1295 		/*
1296 		 * If an entry for a port in the devlist exists in the
1297 		 * in the per port routing table, make sure the data
1298 		 * is current. We need to do this irrespective of the
1299 		 * underlying port topology.
1300 		 */
1301 		switch (pmap->map_type) {
1302 		/* FALLTHROUGH */
1303 		case PORT_DEVICE_NOCHANGE:
1304 		/* FALLTHROUGH */
1305 		case PORT_DEVICE_USER_LOGIN:
1306 		/* FALLTHROUGH */
1307 		case PORT_DEVICE_CHANGED:
1308 		/* FALLTHROUGH */
1309 		case PORT_DEVICE_NEW:
1310 			if (frp == NULL) {
1311 				goto add_new_entry;
1312 			} else if (frp) {
1313 				goto update_entry;
1314 			} else {
1315 				continue;
1316 			}
1317 
1318 		case PORT_DEVICE_OLD:
1319 		/* FALLTHROUGH */
1320 		case PORT_DEVICE_USER_LOGOUT:
1321 			/*
1322 			 * Mark entry for removal from Routing Table if
1323 			 * one exists. Let the timeout thread actually
1324 			 * remove the entry after we've given up hopes
1325 			 * of the port ever showing up.
1326 			 */
1327 			if (frp) {
1328 				uint32_t		did;
1329 
1330 				/*
1331 				 * Mark the routing table as invalid to bail
1332 				 * the packets early that are in transit
1333 				 */
1334 				did = fptr->fcip_broadcast_did;
1335 				if (frp->fcipr_d_id.port_id != did) {
1336 					frp->fcipr_pd = NULL;
1337 					frp->fcipr_state = FCIP_RT_INVALID;
1338 					frp->fcipr_invalid_timeout =
1339 					    fptr->fcip_timeout_ticks +
1340 					    FCIP_RTE_TIMEOUT;
1341 				}
1342 			}
1343 			continue;
1344 
1345 		default:
1346 			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
1347 			    "unknown map flags in rt_update"));
1348 			continue;
1349 		}
1350 add_new_entry:
1351 		ASSERT(frp == NULL);
1352 		hash_bucket = FCIP_RT_HASH(pmap->map_pwwn.raw_wwn);
1353 
1354 		ASSERT(hash_bucket < FCIP_RT_HASH_ELEMS);
1355 
1356 		FCIP_TNF_PROBE_2((fcip_rt_update, "cfip io", /* CSTYLED */,
1357 			tnf_string, msg,
1358 			"add new entry",
1359 			tnf_int, hashbucket, hash_bucket));
1360 
1361 		frp = (struct fcip_routing_table *)
1362 		    kmem_zalloc(sizeof (struct fcip_routing_table), KM_SLEEP);
1363 		/* insert at beginning of hash bucket */
1364 		frp->fcipr_next = fptr->fcip_rtable[hash_bucket];
1365 		fptr->fcip_rtable[hash_bucket] = frp;
1366 		fc_wwn_to_str(&pmap->map_pwwn, wwn_buf);
1367 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1368 		    "added entry for pwwn %s and d_id 0x%x",
1369 		    wwn_buf, pmap->map_did.port_id));
1370 update_entry:
1371 		bcopy((void *)&pmap->map_pwwn,
1372 		    (void *)&frp->fcipr_pwwn, sizeof (la_wwn_t));
1373 		bcopy((void *)&pmap->map_nwwn, (void *)&frp->fcipr_nwwn,
1374 		    sizeof (la_wwn_t));
1375 		frp->fcipr_d_id = pmap->map_did;
1376 		frp->fcipr_state = pmap->map_state;
1377 		frp->fcipr_pd = pmap->map_pd;
1378 
1379 		/*
1380 		 * If there is no pd for a destination port that is not
1381 		 * a broadcast entry, the port is pretty much unusable - so
1382 		 * mark the port for removal so we can try adding back the
1383 		 * entry again.
1384 		 */
1385 		if ((frp->fcipr_pd == NULL) &&
1386 		    (frp->fcipr_d_id.port_id != fptr->fcip_broadcast_did)) {
1387 			frp->fcipr_state = PORT_DEVICE_INVALID;
1388 			frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
1389 			    (FCIP_RTE_TIMEOUT / 2);
1390 		}
1391 		frp->fcipr_fca_dev =
1392 		    fc_ulp_get_fca_device(fport->fcipp_handle, pmap->map_did);
1393 
1394 		/*
1395 		 * login to the remote port. Don't worry about
1396 		 * plogi failures for now
1397 		 */
1398 		if (pmap->map_pd != NULL) {
1399 			(void) fcip_do_plogi(fptr, frp);
1400 		} else if (FC_TOP_EXTERNAL(fport->fcipp_topology)) {
1401 			fc_wwn_to_str(&frp->fcipr_pwwn, wwn_buf);
1402 			FCIP_DEBUG(FCIP_DEBUG_MISC, (CE_NOTE,
1403 			    "logging into pwwn %s, d_id 0x%x",
1404 			    wwn_buf, frp->fcipr_d_id.port_id));
1405 			(void) fcip_do_plogi(fptr, frp);
1406 		}
1407 
1408 		FCIP_TNF_BYTE_ARRAY(fcip_rt_update, "fcip io", "detail",
1409 			"new wwn in rt", pwwn,
1410 			&frp->fcipr_pwwn, sizeof (la_wwn_t));
1411 	}
1412 	mutex_exit(&fptr->fcip_rt_mutex);
1413 }
1414 
1415 
1416 /*
1417  * return a matching routing table entry for a given fcip instance
1418  */
1419 struct fcip_routing_table *
1420 fcip_lookup_rtable(struct fcip *fptr, la_wwn_t *wwn, int matchflag)
1421 {
1422 	struct fcip_routing_table	*frp = NULL;
1423 	int				hash_bucket;
1424 
1425 
1426 	FCIP_TNF_PROBE_1((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1427 		tnf_string, msg, "enter"));
1428 	FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1429 		"rtable lookup for", wwn,
1430 		&wwn->raw_wwn, sizeof (la_wwn_t));
1431 	FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1432 		tnf_string, msg, "match by",
1433 		tnf_int, matchflag, matchflag));
1434 
1435 	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
1436 
1437 	hash_bucket = FCIP_RT_HASH(wwn->raw_wwn);
1438 	frp = fptr->fcip_rtable[hash_bucket];
1439 	while (frp != NULL) {
1440 
1441 		FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1442 			"rtable entry", nwwn,
1443 			&(frp->fcipr_nwwn.raw_wwn), sizeof (la_wwn_t));
1444 
1445 		if (fcip_wwn_compare(&frp->fcipr_pwwn, wwn, matchflag) == 0) {
1446 			break;
1447 		}
1448 
1449 		frp = frp->fcipr_next;
1450 	}
1451 	FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1452 		tnf_string, msg, "lookup result",
1453 		tnf_opaque, frp, frp));
1454 	return (frp);
1455 }
1456 
1457 /*
1458  * Attach of fcip under pseudo. The actual setup of the interface
1459  * actually happens in fcip_port_attach on a callback from the
1460  * transport. The port_attach callback however can proceed only
1461  * after the devinfo for fcip has been created under pseudo
1462  */
1463 static int
1464 fcip_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1465 {
1466 	switch ((int)cmd) {
1467 
1468 	case DDI_ATTACH: {
1469 		ASSERT(fcip_module_dip == NULL);
1470 		fcip_module_dip = dip;
1471 
1472 		/*
1473 		 * this call originates as a result of fcip's conf
1474 		 * file entry and will result in a fcip instance being
1475 		 * a child of pseudo. We should ensure here that the port
1476 		 * driver (fp) has been loaded and initted since we would
1477 		 * never get a port attach callback without fp being loaded.
1478 		 * If we are unable to succesfully load and initalize fp -
1479 		 * just fail this attach.
1480 		 */
1481 		mutex_enter(&fcip_global_mutex);
1482 
1483 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1484 		    (CE_WARN, "global cv - signaling"));
1485 
1486 		cv_signal(&fcip_global_cv);
1487 
1488 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1489 		    (CE_WARN, "global cv - signaled"));
1490 		mutex_exit(&fcip_global_mutex);
1491 		return (DDI_SUCCESS);
1492 	}
1493 	case DDI_RESUME:
1494 		/*
1495 		 * Resume appears trickier
1496 		 */
1497 		return (DDI_SUCCESS);
1498 	default:
1499 		return (DDI_FAILURE);
1500 	}
1501 }
1502 
1503 
1504 /*
1505  * The detach entry point to permit unloading fcip. We make sure
1506  * there are no active streams before we proceed with the detach
1507  */
1508 /* ARGSUSED */
1509 static int
1510 fcip_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1511 {
1512 	struct fcip		*fptr;
1513 	fcip_port_info_t	*fport;
1514 	int			detached;
1515 
1516 	switch (cmd) {
1517 	case DDI_DETACH: {
1518 		/*
1519 		 * If we got here, any active streams should have been
1520 		 * unplumbed but check anyway
1521 		 */
1522 		mutex_enter(&fcip_global_mutex);
1523 		if (fcipstrup != NULL) {
1524 			mutex_exit(&fcip_global_mutex);
1525 			return (DDI_FAILURE);
1526 		}
1527 
1528 		if (fcip_port_head != NULL) {
1529 			/*
1530 			 * Check to see if we have unattached/unbound
1531 			 * ports. If all the ports are unattached/unbound go
1532 			 * ahead and unregister with the transport
1533 			 */
1534 			fport = fcip_port_head;
1535 			while (fport != NULL) {
1536 				fptr = fport->fcipp_fcip;
1537 				if (fptr == NULL) {
1538 					continue;
1539 				}
1540 				mutex_enter(&fptr->fcip_mutex);
1541 				fptr->fcip_flags |= FCIP_DETACHING;
1542 				if (fptr->fcip_ipq ||
1543 				    fptr->fcip_flags & (FCIP_IN_TIMEOUT |
1544 				    FCIP_IN_CALLBACK | FCIP_ATTACHING |
1545 				    FCIP_SUSPENDED | FCIP_POWER_DOWN |
1546 				    FCIP_REG_INPROGRESS)) {
1547 					FCIP_TNF_PROBE_1((fcip_detach,
1548 					    "fcip io", /* CSTYLED */,
1549 					    tnf_string, msg,
1550 					    "fcip instance busy"));
1551 
1552 					mutex_exit(&fptr->fcip_mutex);
1553 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1554 					    "fcip instance busy"));
1555 					break;
1556 				}
1557 				/*
1558 				 * Check for any outstanding pkts. If yes
1559 				 * fail the detach
1560 				 */
1561 				mutex_enter(&fptr->fcip_dest_mutex);
1562 				if (fcip_port_get_num_pkts(fptr) > 0) {
1563 					mutex_exit(&fptr->fcip_dest_mutex);
1564 					mutex_exit(&fptr->fcip_mutex);
1565 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1566 					    "fcip instance busy - pkts "
1567 					    "pending"));
1568 					break;
1569 				}
1570 				mutex_exit(&fptr->fcip_dest_mutex);
1571 
1572 				mutex_enter(&fptr->fcip_rt_mutex);
1573 				if (fcip_plogi_in_progress(fptr)) {
1574 					mutex_exit(&fptr->fcip_rt_mutex);
1575 					mutex_exit(&fptr->fcip_mutex);
1576 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1577 					    "fcip instance busy - plogi in "
1578 					    "progress"));
1579 					break;
1580 				}
1581 				mutex_exit(&fptr->fcip_rt_mutex);
1582 
1583 				mutex_exit(&fptr->fcip_mutex);
1584 				fport = fport->fcipp_next;
1585 			}
1586 			/*
1587 			 * if fport is non NULL - we have active ports
1588 			 */
1589 			if (fport != NULL) {
1590 				/*
1591 				 * Remove the DETACHING flags on the ports
1592 				 */
1593 				fport = fcip_port_head;
1594 				while (fport != NULL) {
1595 					fptr = fport->fcipp_fcip;
1596 					mutex_enter(&fptr->fcip_mutex);
1597 					fptr->fcip_flags &= ~(FCIP_DETACHING);
1598 					mutex_exit(&fptr->fcip_mutex);
1599 					fport = fport->fcipp_next;
1600 				}
1601 				mutex_exit(&fcip_global_mutex);
1602 				return (DDI_FAILURE);
1603 			}
1604 		}
1605 
1606 		/*
1607 		 * free up all softstate structures
1608 		 */
1609 		fport = fcip_port_head;
1610 		while (fport != NULL) {
1611 			detached = 1;
1612 
1613 			fptr = fport->fcipp_fcip;
1614 			if (fptr) {
1615 				mutex_enter(&fptr->fcip_mutex);
1616 				/*
1617 				 * Check to see if somebody beat us to the
1618 				 * punch
1619 				 */
1620 				detached = fptr->fcip_flags & FCIP_DETACHED;
1621 				fptr->fcip_flags &= ~(FCIP_DETACHING);
1622 				fptr->fcip_flags |= FCIP_DETACHED;
1623 				mutex_exit(&fptr->fcip_mutex);
1624 			}
1625 
1626 			if (!detached) {
1627 				fport = fcip_softstate_free(fport);
1628 			} else {
1629 				/*
1630 				 * If the port was marked as detached
1631 				 * but it was still in the list, that
1632 				 * means another thread has marked it
1633 				 * but we got in while it released the
1634 				 * fcip_global_mutex in softstate_free.
1635 				 * Given that, we're still safe to use
1636 				 * fport->fcipp_next to find out what
1637 				 * the next port on the list is.
1638 				 */
1639 				fport = fport->fcipp_next;
1640 			}
1641 
1642 			FCIP_DEBUG(FCIP_DEBUG_DETACH,
1643 			    (CE_NOTE, "detaching port"));
1644 
1645 			FCIP_TNF_PROBE_1((fcip_detach,
1646 				"fcip io", /* CSTYLED */, tnf_string,
1647 				msg, "detaching port"));
1648 		}
1649 
1650 		/*
1651 		 * If we haven't removed all the port structures, we
1652 		 * aren't yet ready to be detached.
1653 		 */
1654 		if (fcip_port_head != NULL) {
1655 			mutex_exit(&fcip_global_mutex);
1656 			return (DDI_FAILURE);
1657 		}
1658 
1659 		fcip_num_instances = 0;
1660 		mutex_exit(&fcip_global_mutex);
1661 		fcip_module_dip = NULL;
1662 		return (DDI_SUCCESS);
1663 	}
1664 	case DDI_SUSPEND:
1665 		return (DDI_SUCCESS);
1666 	default:
1667 		return (DDI_FAILURE);
1668 	}
1669 }
1670 
1671 /*
1672  * The port_detach callback is called from the transport when a
1673  * FC port is being removed from the transport's control. This routine
1674  * provides fcip with an opportunity to cleanup all activities and
1675  * structures on the port marked for removal.
1676  */
1677 /* ARGSUSED */
1678 static int
1679 fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
1680     fc_detach_cmd_t cmd)
1681 {
1682 	int 			rval = FC_FAILURE;
1683 	fcip_port_info_t	*fport;
1684 	struct fcip		*fptr;
1685 	struct fcipstr		*strp;
1686 
1687 	switch (cmd) {
1688 	case FC_CMD_DETACH: {
1689 		mutex_enter(&fcip_global_mutex);
1690 
1691 		if (fcip_port_head == NULL) {
1692 			/*
1693 			 * we are all done but our fini has not been
1694 			 * called yet!! Let's hope we have no active
1695 			 * fcip instances here. - strange secnario but
1696 			 * no harm in having this return a success.
1697 			 */
1698 			fcip_check_remove_minor_node();
1699 
1700 			mutex_exit(&fcip_global_mutex);
1701 			return (FC_SUCCESS);
1702 		} else {
1703 			/*
1704 			 * traverse the port list
1705 			 */
1706 			fport = fcip_port_head;
1707 			while (fport != NULL) {
1708 				if (fport->fcipp_handle ==
1709 				    port_info->port_handle) {
1710 					fptr = fport->fcipp_fcip;
1711 
1712 					/*
1713 					 * Fail the port detach if there is
1714 					 * still an attached, bound stream on
1715 					 * this interface.
1716 					 */
1717 
1718 					rw_enter(&fcipstruplock, RW_READER);
1719 
1720 					for (strp = fcipstrup; strp != NULL;
1721 					    strp = strp->sl_nextp) {
1722 						if (strp->sl_fcip == fptr) {
1723 							rw_exit(&fcipstruplock);
1724 							mutex_exit(
1725 							    &fcip_global_mutex);
1726 							return (FC_FAILURE);
1727 						}
1728 					}
1729 
1730 					rw_exit(&fcipstruplock);
1731 
1732 					/*
1733 					 * fail port detach if we are in
1734 					 * the middle of a deferred port attach
1735 					 * or if the port has outstanding pkts
1736 					 */
1737 					if (fptr != NULL) {
1738 						mutex_enter(&fptr->fcip_mutex);
1739 						if (fcip_check_port_busy
1740 						    (fptr) ||
1741 						    (fptr->fcip_flags &
1742 						    FCIP_DETACHED)) {
1743 							mutex_exit(
1744 							    &fptr->fcip_mutex);
1745 							mutex_exit(
1746 							    &fcip_global_mutex);
1747 							return (FC_FAILURE);
1748 						}
1749 
1750 						fptr->fcip_flags |=
1751 						    FCIP_DETACHED;
1752 						mutex_exit(&fptr->fcip_mutex);
1753 					}
1754 					(void) fcip_softstate_free(fport);
1755 
1756 					fcip_check_remove_minor_node();
1757 					mutex_exit(&fcip_global_mutex);
1758 					return (FC_SUCCESS);
1759 				}
1760 				fport = fport->fcipp_next;
1761 			}
1762 			ASSERT(fport == NULL);
1763 		}
1764 		mutex_exit(&fcip_global_mutex);
1765 		break;
1766 	}
1767 	case FC_CMD_POWER_DOWN:
1768 	/* FALLTHROUGH */
1769 	case FC_CMD_SUSPEND:
1770 		mutex_enter(&fcip_global_mutex);
1771 		fport = fcip_port_head;
1772 		while (fport != NULL) {
1773 			if (fport->fcipp_handle == port_info->port_handle) {
1774 				break;
1775 			}
1776 			fport = fport->fcipp_next;
1777 		}
1778 		if (fport == NULL) {
1779 			mutex_exit(&fcip_global_mutex);
1780 			break;
1781 		}
1782 		rval = fcip_handle_suspend(fport, cmd);
1783 		mutex_exit(&fcip_global_mutex);
1784 		break;
1785 	default:
1786 		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1787 		    (CE_WARN, "unknown port detach command!!"));
1788 		break;
1789 	}
1790 	return (rval);
1791 }
1792 
1793 
1794 /*
1795  * Returns 0 if the port is not busy, else returns non zero.
1796  */
1797 static int
1798 fcip_check_port_busy(struct fcip *fptr)
1799 {
1800 	int rval = 0, num_pkts = 0;
1801 
1802 	ASSERT(fptr != NULL);
1803 	ASSERT(MUTEX_HELD(&fptr->fcip_mutex));
1804 
1805 	mutex_enter(&fptr->fcip_dest_mutex);
1806 
1807 	if (fptr->fcip_flags & FCIP_PORT_BUSY ||
1808 	    ((num_pkts = fcip_port_get_num_pkts(fptr)) > 0) ||
1809 	    fptr->fcip_num_ipkts_pending) {
1810 		rval = 1;
1811 		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1812 		    (CE_NOTE, "!fcip_check_port_busy: port is busy "
1813 		    "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!",
1814 		    fptr->fcip_flags, num_pkts, fptr->fcip_num_ipkts_pending));
1815 	}
1816 
1817 	mutex_exit(&fptr->fcip_dest_mutex);
1818 	return (rval);
1819 }
1820 
1821 /*
1822  * Helper routine to remove fcip's minor node
1823  * There is one minor node per system and it should be removed if there are no
1824  * other fcip instances (which has a 1:1 mapping for fp instances) present
1825  */
1826 static void
1827 fcip_check_remove_minor_node(void)
1828 {
1829 	ASSERT(MUTEX_HELD(&fcip_global_mutex));
1830 
1831 	/*
1832 	 * If there are no more fcip (fp) instances, remove the
1833 	 * minor node for fcip.
1834 	 * Reset fcip_minor_node_created to invalidate it.
1835 	 */
1836 	if (fcip_num_instances == 0 && (fcip_module_dip != NULL)) {
1837 		ddi_remove_minor_node(fcip_module_dip, NULL);
1838 		fcip_minor_node_created = 0;
1839 	}
1840 }
1841 
1842 /*
1843  * This routine permits the suspend operation during a CPR/System
1844  * power management operation. The routine basically quiesces I/Os
1845  * on all active interfaces
1846  */
1847 static int
1848 fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd)
1849 {
1850 	struct fcip	*fptr = fport->fcipp_fcip;
1851 	timeout_id_t	tid;
1852 	int 		index;
1853 	int		tryagain = 0;
1854 	int		count;
1855 	struct fcipstr	*tslp;
1856 
1857 
1858 	ASSERT(fptr != NULL);
1859 	mutex_enter(&fptr->fcip_mutex);
1860 
1861 	/*
1862 	 * Fail if we are in the middle of a callback. Don't use delay during
1863 	 * suspend since clock intrs are not available so busy wait
1864 	 */
1865 	count = 0;
1866 	while (count++ < 15 &&
1867 	    ((fptr->fcip_flags & FCIP_IN_CALLBACK) ||
1868 	    (fptr->fcip_flags & FCIP_IN_TIMEOUT))) {
1869 		mutex_exit(&fptr->fcip_mutex);
1870 		drv_usecwait(1000000);
1871 		mutex_enter(&fptr->fcip_mutex);
1872 	}
1873 
1874 	if (fptr->fcip_flags & FCIP_IN_CALLBACK ||
1875 	    fptr->fcip_flags & FCIP_IN_TIMEOUT) {
1876 		mutex_exit(&fptr->fcip_mutex);
1877 		return (FC_FAILURE);
1878 	}
1879 
1880 	if (cmd == FC_CMD_POWER_DOWN) {
1881 		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1882 			fptr->fcip_flags |= FCIP_POWER_DOWN;
1883 			mutex_exit(&fptr->fcip_mutex);
1884 			goto success;
1885 		} else {
1886 			fptr->fcip_flags |= FCIP_POWER_DOWN;
1887 		}
1888 	} else if (cmd == FC_CMD_SUSPEND) {
1889 		fptr->fcip_flags |= FCIP_SUSPENDED;
1890 	} else {
1891 		mutex_exit(&fptr->fcip_mutex);
1892 		return (FC_FAILURE);
1893 	}
1894 
1895 	mutex_exit(&fptr->fcip_mutex);
1896 	/*
1897 	 * If no streams are plumbed - its the easiest case - Just
1898 	 * bail out without having to do much
1899 	 */
1900 
1901 	rw_enter(&fcipstruplock, RW_READER);
1902 	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1903 		if (tslp->sl_fcip == fptr) {
1904 			break;
1905 		}
1906 	}
1907 	rw_exit(&fcipstruplock);
1908 
1909 	/*
1910 	 * No active streams on this port
1911 	 */
1912 	if (tslp == NULL) {
1913 		goto success;
1914 	}
1915 
1916 	/*
1917 	 * Walk through each Routing table structure and check if
1918 	 * the destination table has any outstanding commands. If yes
1919 	 * wait for the commands to drain. Since we go through each
1920 	 * routing table entry in succession, it may be wise to wait
1921 	 * only a few seconds for each entry.
1922 	 */
1923 	mutex_enter(&fptr->fcip_rt_mutex);
1924 	while (!tryagain) {
1925 
1926 		tryagain = 0;
1927 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1928 			struct fcip_routing_table 	*frp;
1929 			struct fcip_dest 		*fdestp;
1930 			la_wwn_t			*pwwn;
1931 			int				hash_bucket;
1932 
1933 			frp = fptr->fcip_rtable[index];
1934 			while (frp) {
1935 				/*
1936 				 * Mark the routing table as SUSPENDED. Even
1937 				 * mark the broadcast entry SUSPENDED to
1938 				 * prevent any ARP or other broadcasts. We
1939 				 * can reset the state of the broadcast
1940 				 * RTE when we resume.
1941 				 */
1942 				frp->fcipr_state = FCIP_RT_SUSPENDED;
1943 				pwwn = &frp->fcipr_pwwn;
1944 
1945 				/*
1946 				 * Get hold of destination pointer
1947 				 */
1948 				mutex_enter(&fptr->fcip_dest_mutex);
1949 
1950 				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
1951 				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
1952 
1953 				fdestp = fptr->fcip_dest[hash_bucket];
1954 				while (fdestp != NULL) {
1955 					mutex_enter(&fdestp->fcipd_mutex);
1956 					if (fdestp->fcipd_rtable) {
1957 						if (fcip_wwn_compare(pwwn,
1958 						    &fdestp->fcipd_pwwn,
1959 						    FCIP_COMPARE_PWWN) == 0) {
1960 							mutex_exit(
1961 							&fdestp->fcipd_mutex);
1962 							break;
1963 						}
1964 					}
1965 					mutex_exit(&fdestp->fcipd_mutex);
1966 					fdestp = fdestp->fcipd_next;
1967 				}
1968 
1969 				mutex_exit(&fptr->fcip_dest_mutex);
1970 				if (fdestp == NULL) {
1971 					frp = frp->fcipr_next;
1972 					continue;
1973 				}
1974 
1975 				/*
1976 				 * Wait for fcip_wait_cmds seconds for
1977 				 * the commands to drain.
1978 				 */
1979 				count = 0;
1980 				mutex_enter(&fdestp->fcipd_mutex);
1981 				while (fdestp->fcipd_ncmds &&
1982 				    count < fcip_wait_cmds) {
1983 					mutex_exit(&fdestp->fcipd_mutex);
1984 					mutex_exit(&fptr->fcip_rt_mutex);
1985 					drv_usecwait(1000000);
1986 					mutex_enter(&fptr->fcip_rt_mutex);
1987 					mutex_enter(&fdestp->fcipd_mutex);
1988 					count++;
1989 				}
1990 				/*
1991 				 * Check if we were able to drain all cmds
1992 				 * successfully. Else continue with other
1993 				 * ports and try during the second pass
1994 				 */
1995 				if (fdestp->fcipd_ncmds) {
1996 					tryagain++;
1997 				}
1998 				mutex_exit(&fdestp->fcipd_mutex);
1999 
2000 				frp = frp->fcipr_next;
2001 			}
2002 		}
2003 		if (tryagain == 0) {
2004 			break;
2005 		}
2006 	}
2007 	mutex_exit(&fptr->fcip_rt_mutex);
2008 
2009 	if (tryagain) {
2010 		mutex_enter(&fptr->fcip_mutex);
2011 		fptr->fcip_flags &= ~(FCIP_SUSPENDED | FCIP_POWER_DOWN);
2012 		mutex_exit(&fptr->fcip_mutex);
2013 		return (FC_FAILURE);
2014 	}
2015 
2016 success:
2017 	mutex_enter(&fptr->fcip_mutex);
2018 	tid = fptr->fcip_timeout_id;
2019 	fptr->fcip_timeout_id = NULL;
2020 	mutex_exit(&fptr->fcip_mutex);
2021 
2022 	(void) untimeout(tid);
2023 
2024 	return (FC_SUCCESS);
2025 }
2026 
2027 /*
2028  * the getinfo(9E) entry point
2029  */
2030 /* ARGSUSED */
2031 static int
2032 fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
2033 {
2034 	int rval = DDI_FAILURE;
2035 
2036 	switch (cmd) {
2037 	case DDI_INFO_DEVT2DEVINFO:
2038 		*result = fcip_module_dip;
2039 		if (*result)
2040 			rval = DDI_SUCCESS;
2041 		break;
2042 
2043 	case DDI_INFO_DEVT2INSTANCE:
2044 		*result = (void *)0;
2045 		rval = DDI_SUCCESS;
2046 		break;
2047 	default:
2048 		break;
2049 	}
2050 
2051 	return (rval);
2052 }
2053 
2054 /*
2055  * called from fcip_attach to initialize kstats for the link
2056  */
2057 /* ARGSUSED */
2058 static void
2059 fcip_kstat_init(struct fcip *fptr)
2060 {
2061 	int instance;
2062 	char buf[16];
2063 	struct fcipstat	*fcipstatp;
2064 
2065 	ASSERT(mutex_owned(&fptr->fcip_mutex));
2066 
2067 	instance = ddi_get_instance(fptr->fcip_dip);
2068 	(void) sprintf(buf, "fcip%d", instance);
2069 
2070 #ifdef	kstat
2071 	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2072 	    KSTAT_TYPE_NAMED,
2073 	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)),
2074 	    KSTAT_FLAG_PERSISTENT);
2075 #else
2076 	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2077 	    KSTAT_TYPE_NAMED,
2078 	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 0);
2079 #endif
2080 	if (fptr->fcip_kstatp == NULL) {
2081 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "kstat created failed"));
2082 		return;
2083 	}
2084 
2085 	fcipstatp = (struct  fcipstat *)fptr->fcip_kstatp->ks_data;
2086 	kstat_named_init(&fcipstatp->fcips_ipackets,	"ipackets",
2087 		KSTAT_DATA_ULONG);
2088 	kstat_named_init(&fcipstatp->fcips_ierrors,	"ierrors",
2089 		KSTAT_DATA_ULONG);
2090 	kstat_named_init(&fcipstatp->fcips_opackets,	"opackets",
2091 		KSTAT_DATA_ULONG);
2092 	kstat_named_init(&fcipstatp->fcips_oerrors,	"oerrors",
2093 		KSTAT_DATA_ULONG);
2094 	kstat_named_init(&fcipstatp->fcips_collisions,	"collisions",
2095 		KSTAT_DATA_ULONG);
2096 	kstat_named_init(&fcipstatp->fcips_nocanput,	"nocanput",
2097 		KSTAT_DATA_ULONG);
2098 	kstat_named_init(&fcipstatp->fcips_allocbfail,	"allocbfail",
2099 		KSTAT_DATA_ULONG);
2100 
2101 	kstat_named_init(&fcipstatp->fcips_defer, "defer",
2102 		KSTAT_DATA_ULONG);
2103 	kstat_named_init(&fcipstatp->fcips_fram, "fram",
2104 		KSTAT_DATA_ULONG);
2105 	kstat_named_init(&fcipstatp->fcips_crc, "crc",
2106 		KSTAT_DATA_ULONG);
2107 	kstat_named_init(&fcipstatp->fcips_oflo, "oflo",
2108 		KSTAT_DATA_ULONG);
2109 	kstat_named_init(&fcipstatp->fcips_uflo, "uflo",
2110 		KSTAT_DATA_ULONG);
2111 	kstat_named_init(&fcipstatp->fcips_missed, "missed",
2112 		KSTAT_DATA_ULONG);
2113 	kstat_named_init(&fcipstatp->fcips_tlcol, "tlcol",
2114 		KSTAT_DATA_ULONG);
2115 	kstat_named_init(&fcipstatp->fcips_trtry, "trtry",
2116 		KSTAT_DATA_ULONG);
2117 	kstat_named_init(&fcipstatp->fcips_tnocar, "tnocar",
2118 		KSTAT_DATA_ULONG);
2119 	kstat_named_init(&fcipstatp->fcips_inits, "inits",
2120 		KSTAT_DATA_ULONG);
2121 	kstat_named_init(&fcipstatp->fcips_notbufs, "notbufs",
2122 		KSTAT_DATA_ULONG);
2123 	kstat_named_init(&fcipstatp->fcips_norbufs, "norbufs",
2124 		KSTAT_DATA_ULONG);
2125 	kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail",
2126 		KSTAT_DATA_ULONG);
2127 
2128 	/*
2129 	 * required by kstat for MIB II objects(RFC 1213)
2130 	 */
2131 	kstat_named_init(&fcipstatp->fcips_rcvbytes, "fcips_rcvbytes",
2132 		KSTAT_DATA_ULONG);	/* # octets received */
2133 					/* MIB - ifInOctets */
2134 	kstat_named_init(&fcipstatp->fcips_xmtbytes, "fcips_xmtbytes",
2135 		KSTAT_DATA_ULONG);	/* # octets xmitted */
2136 					/* MIB - ifOutOctets */
2137 	kstat_named_init(&fcipstatp->fcips_multircv,	"fcips_multircv",
2138 		KSTAT_DATA_ULONG);	/* # multicast packets */
2139 					/* delivered to upper layer */
2140 					/* MIB - ifInNUcastPkts */
2141 	kstat_named_init(&fcipstatp->fcips_multixmt,	"fcips_multixmt",
2142 		KSTAT_DATA_ULONG);	/* # multicast packets */
2143 					/* requested to be sent */
2144 					/* MIB - ifOutNUcastPkts */
2145 	kstat_named_init(&fcipstatp->fcips_brdcstrcv, "fcips_brdcstrcv",
2146 		KSTAT_DATA_ULONG); /* # broadcast packets */
2147 					/* delivered to upper layer */
2148 					/* MIB - ifInNUcastPkts */
2149 	kstat_named_init(&fcipstatp->fcips_brdcstxmt, "fcips_brdcstxmt",
2150 		KSTAT_DATA_ULONG);	/* # broadcast packets */
2151 					/* requested to be sent */
2152 					/* MIB - ifOutNUcastPkts */
2153 	kstat_named_init(&fcipstatp->fcips_norcvbuf,	"fcips_norcvbuf",
2154 		KSTAT_DATA_ULONG);	/* # rcv packets discarded */
2155 					/* MIB - ifInDiscards */
2156 	kstat_named_init(&fcipstatp->fcips_noxmtbuf,	"fcips_noxmtbuf",
2157 		KSTAT_DATA_ULONG);	/* # xmt packets discarded */
2158 
2159 	fptr->fcip_kstatp->ks_update = fcip_stat_update;
2160 	fptr->fcip_kstatp->ks_private = (void *) fptr;
2161 	kstat_install(fptr->fcip_kstatp);
2162 }
2163 
2164 /*
2165  * Update the defined kstats for netstat et al to use
2166  */
2167 /* ARGSUSED */
2168 static int
2169 fcip_stat_update(kstat_t *fcip_statp, int val)
2170 {
2171 	struct fcipstat	*fcipstatp;
2172 	struct fcip	*fptr;
2173 
2174 	fptr = (struct fcip *)fcip_statp->ks_private;
2175 	fcipstatp = (struct fcipstat *)fcip_statp->ks_data;
2176 
2177 	if (val == KSTAT_WRITE) {
2178 		fptr->fcip_ipackets	= fcipstatp->fcips_ipackets.value.ul;
2179 		fptr->fcip_ierrors	= fcipstatp->fcips_ierrors.value.ul;
2180 		fptr->fcip_opackets	= fcipstatp->fcips_opackets.value.ul;
2181 		fptr->fcip_oerrors	= fcipstatp->fcips_oerrors.value.ul;
2182 		fptr->fcip_collisions	= fcipstatp->fcips_collisions.value.ul;
2183 		fptr->fcip_defer	= fcipstatp->fcips_defer.value.ul;
2184 		fptr->fcip_fram	= fcipstatp->fcips_fram.value.ul;
2185 		fptr->fcip_crc	= fcipstatp->fcips_crc.value.ul;
2186 		fptr->fcip_oflo	= fcipstatp->fcips_oflo.value.ul;
2187 		fptr->fcip_uflo	= fcipstatp->fcips_uflo.value.ul;
2188 		fptr->fcip_missed	= fcipstatp->fcips_missed.value.ul;
2189 		fptr->fcip_tlcol	= fcipstatp->fcips_tlcol.value.ul;
2190 		fptr->fcip_trtry	= fcipstatp->fcips_trtry.value.ul;
2191 		fptr->fcip_tnocar	= fcipstatp->fcips_tnocar.value.ul;
2192 		fptr->fcip_inits	= fcipstatp->fcips_inits.value.ul;
2193 		fptr->fcip_notbufs	= fcipstatp->fcips_notbufs.value.ul;
2194 		fptr->fcip_norbufs	= fcipstatp->fcips_norbufs.value.ul;
2195 		fptr->fcip_nocanput	= fcipstatp->fcips_nocanput.value.ul;
2196 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2197 		fptr->fcip_rcvbytes	= fcipstatp->fcips_rcvbytes.value.ul;
2198 		fptr->fcip_xmtbytes	= fcipstatp->fcips_xmtbytes.value.ul;
2199 		fptr->fcip_multircv	= fcipstatp->fcips_multircv.value.ul;
2200 		fptr->fcip_multixmt	= fcipstatp->fcips_multixmt.value.ul;
2201 		fptr->fcip_brdcstrcv	= fcipstatp->fcips_brdcstrcv.value.ul;
2202 		fptr->fcip_norcvbuf	= fcipstatp->fcips_norcvbuf.value.ul;
2203 		fptr->fcip_noxmtbuf	= fcipstatp->fcips_noxmtbuf.value.ul;
2204 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2205 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2206 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2207 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2208 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2209 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2210 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2211 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2212 
2213 	} else {
2214 		fcipstatp->fcips_ipackets.value.ul	= fptr->fcip_ipackets;
2215 		fcipstatp->fcips_ierrors.value.ul	= fptr->fcip_ierrors;
2216 		fcipstatp->fcips_opackets.value.ul	= fptr->fcip_opackets;
2217 		fcipstatp->fcips_oerrors.value.ul	= fptr->fcip_oerrors;
2218 		fcipstatp->fcips_collisions.value.ul	= fptr->fcip_collisions;
2219 		fcipstatp->fcips_nocanput.value.ul	= fptr->fcip_nocanput;
2220 		fcipstatp->fcips_allocbfail.value.ul	= fptr->fcip_allocbfail;
2221 		fcipstatp->fcips_defer.value.ul	= fptr->fcip_defer;
2222 		fcipstatp->fcips_fram.value.ul	= fptr->fcip_fram;
2223 		fcipstatp->fcips_crc.value.ul	= fptr->fcip_crc;
2224 		fcipstatp->fcips_oflo.value.ul	= fptr->fcip_oflo;
2225 		fcipstatp->fcips_uflo.value.ul	= fptr->fcip_uflo;
2226 		fcipstatp->fcips_missed.value.ul	= fptr->fcip_missed;
2227 		fcipstatp->fcips_tlcol.value.ul	= fptr->fcip_tlcol;
2228 		fcipstatp->fcips_trtry.value.ul	= fptr->fcip_trtry;
2229 		fcipstatp->fcips_tnocar.value.ul	= fptr->fcip_tnocar;
2230 		fcipstatp->fcips_inits.value.ul	= fptr->fcip_inits;
2231 		fcipstatp->fcips_norbufs.value.ul	= fptr->fcip_norbufs;
2232 		fcipstatp->fcips_notbufs.value.ul	= fptr->fcip_notbufs;
2233 		fcipstatp->fcips_rcvbytes.value.ul	= fptr->fcip_rcvbytes;
2234 		fcipstatp->fcips_xmtbytes.value.ul	= fptr->fcip_xmtbytes;
2235 		fcipstatp->fcips_multircv.value.ul	= fptr->fcip_multircv;
2236 		fcipstatp->fcips_multixmt.value.ul	= fptr->fcip_multixmt;
2237 		fcipstatp->fcips_brdcstrcv.value.ul	= fptr->fcip_brdcstrcv;
2238 		fcipstatp->fcips_brdcstxmt.value.ul	= fptr->fcip_brdcstxmt;
2239 		fcipstatp->fcips_norcvbuf.value.ul	= fptr->fcip_norcvbuf;
2240 		fcipstatp->fcips_noxmtbuf.value.ul	= fptr->fcip_noxmtbuf;
2241 
2242 	}
2243 	return (0);
2244 }
2245 
2246 
2247 /*
2248  * fcip_statec_cb: handles all required state change callback notifications
2249  * it receives from the transport
2250  */
2251 /* ARGSUSED */
2252 static void
2253 fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
2254     uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
2255     uint32_t listlen, uint32_t sid)
2256 {
2257 	fcip_port_info_t	*fport;
2258 	struct fcip 		*fptr;
2259 	struct fcipstr		*slp;
2260 	queue_t			*wrq;
2261 	int			instance;
2262 	int 			index;
2263 	struct fcip_routing_table 	*frtp;
2264 
2265 	fport = fcip_get_port(phandle);
2266 
2267 	if (fport == NULL) {
2268 		return;
2269 	}
2270 
2271 	fptr = fport->fcipp_fcip;
2272 	ASSERT(fptr != NULL);
2273 
2274 	if (fptr == NULL) {
2275 		return;
2276 	}
2277 
2278 	instance = ddi_get_instance(fport->fcipp_dip);
2279 
2280 	FCIP_TNF_PROBE_4((fcip_statec_cb, "fcip io", /* CSTYLED */,
2281 		tnf_string, msg, "state change callback",
2282 		tnf_uint, instance, instance,
2283 		tnf_uint, S_ID, sid,
2284 		tnf_int, count, listlen));
2285 	FCIP_DEBUG(FCIP_DEBUG_ELS,
2286 	    (CE_NOTE, "fcip%d, state change callback: state:0x%x, "
2287 	    "S_ID:0x%x, count:0x%x", instance, port_state, sid, listlen));
2288 
2289 	mutex_enter(&fptr->fcip_mutex);
2290 
2291 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2292 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2293 		mutex_exit(&fptr->fcip_mutex);
2294 		return;
2295 	}
2296 
2297 	/*
2298 	 * set fcip flags to indicate we are in the middle of a
2299 	 * state change callback so we can wait till the statechange
2300 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2301 	 */
2302 	fptr->fcip_flags |= FCIP_IN_SC_CB;
2303 
2304 	fport->fcipp_pstate = port_state;
2305 
2306 	/*
2307 	 * Check if topology changed. If Yes - Modify the broadcast
2308 	 * RTE entries to understand the new broadcast D_IDs
2309 	 */
2310 	if (fport->fcipp_topology != port_top &&
2311 	    (port_top != FC_TOP_UNKNOWN)) {
2312 		/* REMOVE later */
2313 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2314 		    "topology changed: Old topology: 0x%x New topology 0x%x",
2315 		    fport->fcipp_topology, port_top));
2316 		/*
2317 		 * If topology changed - attempt a rediscovery of
2318 		 * devices. Helps specially in Fabric/Public loops
2319 		 * and if on_demand_node_creation is disabled
2320 		 */
2321 		fport->fcipp_topology = port_top;
2322 		fcip_handle_topology(fptr);
2323 	}
2324 
2325 	mutex_exit(&fptr->fcip_mutex);
2326 
2327 	switch (FC_PORT_STATE_MASK(port_state)) {
2328 	case FC_STATE_ONLINE:
2329 	/* FALLTHROUGH */
2330 	case FC_STATE_LIP:
2331 	/* FALLTHROUGH */
2332 	case FC_STATE_LIP_LBIT_SET:
2333 
2334 		/*
2335 		 * nothing to do here actually other than if we
2336 		 * were actually logged onto a port in the devlist
2337 		 * (which indicates active communication between
2338 		 * the host port and the port in the changelist).
2339 		 * If however we are in a private loop or point to
2340 		 * point mode, we need to check for any IP capable
2341 		 * ports and update our routing table.
2342 		 */
2343 		switch (port_top) {
2344 		case FC_TOP_FABRIC:
2345 			/*
2346 			 * This indicates a fabric port with a NameServer.
2347 			 * Check the devlist to see if we are in active
2348 			 * communication with a port on the devlist.
2349 			 */
2350 			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2351 			    "Statec_cb: fabric topology"));
2352 			fcip_rt_update(fptr, changelist, listlen);
2353 			break;
2354 		case FC_TOP_NO_NS:
2355 			/*
2356 			 * No nameserver - so treat it like a Private loop
2357 			 * or point to point topology and get a map of
2358 			 * devices on the link and get IP capable ports to
2359 			 * to update the routing table.
2360 			 */
2361 			FCIP_DEBUG(FCIP_DEBUG_ELS,
2362 			    (CE_NOTE, "Statec_cb: NO_NS topology"));
2363 		/* FALLTHROUGH */
2364 		case FC_TOP_PRIVATE_LOOP:
2365 			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2366 			    "Statec_cb: Pvt_Loop topology"));
2367 		/* FALLTHROUGH */
2368 		case FC_TOP_PT_PT:
2369 			/*
2370 			 * call get_port_map() and update routing table
2371 			 */
2372 			fcip_rt_update(fptr, changelist, listlen);
2373 			break;
2374 		default:
2375 			FCIP_DEBUG(FCIP_DEBUG_ELS,
2376 			    (CE_NOTE, "Statec_cb: Unknown topology"));
2377 		}
2378 
2379 		/*
2380 		 * We should now enable the Queues and permit I/Os
2381 		 * to flow through downstream. The update of routing
2382 		 * table should have flushed out any port entries that
2383 		 * don't exist or are not available after the state change
2384 		 */
2385 		mutex_enter(&fptr->fcip_mutex);
2386 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
2387 		if (fptr->fcip_flags & FCIP_LINK_DOWN) {
2388 			fptr->fcip_flags &= ~FCIP_LINK_DOWN;
2389 		}
2390 		mutex_exit(&fptr->fcip_mutex);
2391 
2392 		/*
2393 		 * Enable write queues
2394 		 */
2395 		rw_enter(&fcipstruplock, RW_READER);
2396 		for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
2397 			if (slp && slp->sl_fcip == fptr) {
2398 				wrq = WR(slp->sl_rq);
2399 				if (wrq->q_flag & QFULL) {
2400 					qenable(wrq);
2401 				}
2402 			}
2403 		}
2404 		rw_exit(&fcipstruplock);
2405 		break;
2406 	case FC_STATE_OFFLINE:
2407 		/*
2408 		 * mark the port_state OFFLINE and wait for it to
2409 		 * become online. Any new messages in this state will
2410 		 * simply be queued back up. If the port does not
2411 		 * come online in a short while, we can begin failing
2412 		 * messages and flush the routing table
2413 		 */
2414 		mutex_enter(&fptr->fcip_mutex);
2415 		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2416 		    FCIP_OFFLINE_TIMEOUT;
2417 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2418 		mutex_exit(&fptr->fcip_mutex);
2419 
2420 		/*
2421 		 * Mark all Routing table entries as invalid to prevent
2422 		 * any commands from trickling through to ports that
2423 		 * have disappeared from under us
2424 		 */
2425 		mutex_enter(&fptr->fcip_rt_mutex);
2426 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
2427 			frtp = fptr->fcip_rtable[index];
2428 			while (frtp) {
2429 				frtp->fcipr_state = PORT_DEVICE_INVALID;
2430 				frtp = frtp->fcipr_next;
2431 			}
2432 		}
2433 		mutex_exit(&fptr->fcip_rt_mutex);
2434 
2435 		break;
2436 
2437 	case FC_STATE_RESET_REQUESTED:
2438 		/*
2439 		 * Release all Unsolicited buffers back to transport/FCA.
2440 		 * This also means the port state is marked offline - so
2441 		 * we may have to do what OFFLINE state requires us to do.
2442 		 * Care must be taken to wait for any active unsolicited
2443 		 * buffer with the other Streams modules - so wait for
2444 		 * a freeb if the unsolicited buffer is passed back all
2445 		 * the way upstream.
2446 		 */
2447 		mutex_enter(&fptr->fcip_mutex);
2448 
2449 #ifdef FCIP_ESBALLOC
2450 		while (fptr->fcip_ub_upstream) {
2451 			cv_wait(&fptr->fcip_ub_cv, &fptr->fcip_mutex);
2452 		}
2453 #endif	/* FCIP_ESBALLOC */
2454 
2455 		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2456 		    FCIP_OFFLINE_TIMEOUT;
2457 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2458 		mutex_exit(&fptr->fcip_mutex);
2459 		break;
2460 
2461 	case FC_STATE_DEVICE_CHANGE:
2462 		if (listlen) {
2463 			fcip_rt_update(fptr, changelist, listlen);
2464 		}
2465 		break;
2466 	case FC_STATE_RESET:
2467 		/*
2468 		 * Not much to do I guess - wait for port to become
2469 		 * ONLINE. If the port doesn't become online in a short
2470 		 * while, the upper layers abort any request themselves.
2471 		 * We can just putback the messages in the streams queues
2472 		 * if the link is offline
2473 		 */
2474 		break;
2475 	}
2476 	mutex_enter(&fptr->fcip_mutex);
2477 	fptr->fcip_flags &= ~(FCIP_IN_SC_CB);
2478 	mutex_exit(&fptr->fcip_mutex);
2479 }
2480 
2481 /*
2482  * Given a port handle, return the fcip_port_info structure corresponding
2483  * to that port handle. The transport allocates and communicates with
2484  * ULPs using port handles
2485  */
2486 static fcip_port_info_t *
2487 fcip_get_port(opaque_t phandle)
2488 {
2489 	fcip_port_info_t *fport;
2490 
2491 	ASSERT(phandle != NULL);
2492 
2493 	mutex_enter(&fcip_global_mutex);
2494 	fport = fcip_port_head;
2495 
2496 	while (fport != NULL) {
2497 		if (fport->fcipp_handle == phandle) {
2498 			/* found */
2499 			break;
2500 		}
2501 		fport = fport->fcipp_next;
2502 	}
2503 
2504 	mutex_exit(&fcip_global_mutex);
2505 
2506 	return (fport);
2507 }
2508 
2509 /*
2510  * Handle inbound ELS requests received by the transport. We are only
2511  * intereseted in FARP/InARP mostly.
2512  */
2513 /* ARGSUSED */
2514 static int
2515 fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
2516     fc_unsol_buf_t *buf, uint32_t claimed)
2517 {
2518 	fcip_port_info_t	*fport;
2519 	struct fcip 		*fptr;
2520 	int			instance;
2521 	uchar_t			r_ctl;
2522 	uchar_t			ls_code;
2523 	la_els_farp_t		farp_cmd;
2524 	la_els_farp_t		*fcmd;
2525 	int			rval = FC_UNCLAIMED;
2526 
2527 	fport = fcip_get_port(phandle);
2528 	if (fport == NULL) {
2529 		return (FC_UNCLAIMED);
2530 	}
2531 
2532 	fptr = fport->fcipp_fcip;
2533 	ASSERT(fptr != NULL);
2534 	if (fptr == NULL) {
2535 		return (FC_UNCLAIMED);
2536 	}
2537 
2538 	instance = ddi_get_instance(fport->fcipp_dip);
2539 
2540 	mutex_enter(&fptr->fcip_mutex);
2541 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2542 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2543 		mutex_exit(&fptr->fcip_mutex);
2544 		return (FC_UNCLAIMED);
2545 	}
2546 
2547 	/*
2548 	 * set fcip flags to indicate we are in the middle of a
2549 	 * ELS callback so we can wait till the statechange
2550 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2551 	 */
2552 	fptr->fcip_flags |= FCIP_IN_ELS_CB;
2553 	mutex_exit(&fptr->fcip_mutex);
2554 
2555 	FCIP_TNF_PROBE_2((fcip_els_cb, "fcip io", /* CSTYLED */,
2556 		tnf_string, msg, "ELS callback",
2557 		tnf_uint, instance, instance));
2558 
2559 	FCIP_DEBUG(FCIP_DEBUG_ELS,
2560 	    (CE_NOTE, "fcip%d, ELS callback , ", instance));
2561 
2562 	r_ctl = buf->ub_frame.r_ctl;
2563 	switch (r_ctl & R_CTL_ROUTING) {
2564 	case R_CTL_EXTENDED_SVC:
2565 		if (r_ctl == R_CTL_ELS_REQ) {
2566 			ls_code = buf->ub_buffer[0];
2567 			if (ls_code == LA_ELS_FARP_REQ) {
2568 				/*
2569 				 * Inbound FARP broadcast request
2570 				 */
2571 				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2572 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2573 					    "Invalid FARP req buffer size "
2574 					    "expected 0x%lx, got 0x%x",
2575 					    (long)(sizeof (la_els_farp_t)),
2576 					    buf->ub_bufsize));
2577 					rval = FC_UNCLAIMED;
2578 					goto els_cb_done;
2579 				}
2580 				fcmd = (la_els_farp_t *)buf;
2581 				if (fcip_wwn_compare(&fcmd->resp_nwwn,
2582 				    &fport->fcipp_nwwn,
2583 				    FCIP_COMPARE_NWWN) != 0) {
2584 					rval = FC_UNCLAIMED;
2585 					goto els_cb_done;
2586 				}
2587 				/*
2588 				 * copy the FARP request and release the
2589 				 * unsolicited buffer
2590 				 */
2591 				fcmd = &farp_cmd;
2592 				bcopy((void *)buf, (void *)fcmd,
2593 				    sizeof (la_els_farp_t));
2594 				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2595 				    &buf->ub_token);
2596 
2597 				if (fcip_farp_supported &&
2598 				    fcip_handle_farp_request(fptr, fcmd) ==
2599 				    FC_SUCCESS) {
2600 					/*
2601 					 * We successfully sent out a FARP
2602 					 * reply to the requesting port
2603 					 */
2604 					rval = FC_SUCCESS;
2605 					goto els_cb_done;
2606 				} else {
2607 					rval = FC_UNCLAIMED;
2608 					goto els_cb_done;
2609 				}
2610 			}
2611 		} else if (r_ctl == R_CTL_ELS_RSP) {
2612 			ls_code = buf->ub_buffer[0];
2613 			if (ls_code == LA_ELS_FARP_REPLY) {
2614 				/*
2615 				 * We received a REPLY to our FARP request
2616 				 */
2617 				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2618 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2619 					    "Invalid FARP req buffer size "
2620 					    "expected 0x%lx, got 0x%x",
2621 					    (long)(sizeof (la_els_farp_t)),
2622 					    buf->ub_bufsize));
2623 					rval = FC_UNCLAIMED;
2624 					goto els_cb_done;
2625 				}
2626 				fcmd = &farp_cmd;
2627 				bcopy((void *)buf, (void *)fcmd,
2628 				    sizeof (la_els_farp_t));
2629 				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2630 				    &buf->ub_token);
2631 				if (fcip_farp_supported &&
2632 				    fcip_handle_farp_response(fptr, fcmd) ==
2633 				    FC_SUCCESS) {
2634 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2635 					    "Successfully recevied a FARP "
2636 					    "response"));
2637 					mutex_enter(&fptr->fcip_mutex);
2638 					fptr->fcip_farp_rsp_flag = 1;
2639 					cv_signal(&fptr->fcip_farp_cv);
2640 					mutex_exit(&fptr->fcip_mutex);
2641 					rval = FC_SUCCESS;
2642 					goto els_cb_done;
2643 				} else {
2644 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2645 					    "Unable to handle a FARP response "
2646 					    "receive"));
2647 					rval = FC_UNCLAIMED;
2648 					goto els_cb_done;
2649 				}
2650 			}
2651 		}
2652 		break;
2653 	default:
2654 		break;
2655 	}
2656 els_cb_done:
2657 	mutex_enter(&fptr->fcip_mutex);
2658 	fptr->fcip_flags &= ~(FCIP_IN_ELS_CB);
2659 	mutex_exit(&fptr->fcip_mutex);
2660 	return (rval);
2661 }
2662 
2663 
2664 /*
2665  * Handle inbound FARP requests
2666  */
2667 static int
2668 fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd)
2669 {
2670 	fcip_pkt_t		*fcip_pkt;
2671 	fc_packet_t		*fc_pkt;
2672 	fcip_port_info_t	*fport = fptr->fcip_port_info;
2673 	int			rval = FC_FAILURE;
2674 	opaque_t		fca_dev;
2675 	fc_portmap_t 		map;
2676 	struct fcip_routing_table *frp;
2677 	struct fcip_dest *fdestp;
2678 
2679 	/*
2680 	 * Add an entry for the remote port into our routing and destination
2681 	 * tables.
2682 	 */
2683 	map.map_did = fcmd->req_id;
2684 	map.map_hard_addr.hard_addr = fcmd->req_id.port_id;
2685 	map.map_state = PORT_DEVICE_VALID;
2686 	map.map_type = PORT_DEVICE_NEW;
2687 	map.map_flags = 0;
2688 	map.map_pd = NULL;
2689 	bcopy((void *)&fcmd->req_pwwn, (void *)&map.map_pwwn,
2690 	    sizeof (la_wwn_t));
2691 	bcopy((void *)&fcmd->req_nwwn, (void *)&map.map_nwwn,
2692 	    sizeof (la_wwn_t));
2693 	fcip_rt_update(fptr, &map, 1);
2694 	mutex_enter(&fptr->fcip_rt_mutex);
2695 	frp = fcip_lookup_rtable(fptr, &fcmd->req_pwwn, FCIP_COMPARE_NWWN);
2696 	mutex_exit(&fptr->fcip_rt_mutex);
2697 
2698 	fdestp = fcip_add_dest(fptr, frp);
2699 
2700 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
2701 	    sizeof (la_els_farp_t), NULL, KM_SLEEP);
2702 	if (fcip_pkt == NULL) {
2703 		rval = FC_FAILURE;
2704 		goto farp_done;
2705 	}
2706 	/*
2707 	 * Fill in our port's PWWN and NWWN
2708 	 */
2709 	fcmd->resp_pwwn = fport->fcipp_pwwn;
2710 	fcmd->resp_nwwn = fport->fcipp_nwwn;
2711 
2712 	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
2713 	    fcmd->req_id, NULL);
2714 
2715 	fca_dev =
2716 	    fc_ulp_get_fca_device(fport->fcipp_handle, fcmd->req_id);
2717 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
2718 	fc_pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_RSP;
2719 	fc_pkt->pkt_fca_device = fca_dev;
2720 	fcip_pkt->fcip_pkt_dest = fdestp;
2721 
2722 	/*
2723 	 * Attempt a PLOGI again
2724 	 */
2725 	if (fcmd->resp_flags & FARP_INIT_P_LOGI) {
2726 		if (fcip_do_plogi(fptr, frp) != FC_SUCCESS) {
2727 			/*
2728 			 * Login to the remote port failed. There is no
2729 			 * point continuing with the FARP request further
2730 			 * so bail out here.
2731 			 */
2732 			frp->fcipr_state = PORT_DEVICE_INVALID;
2733 			rval = FC_FAILURE;
2734 			goto farp_done;
2735 		}
2736 	}
2737 
2738 	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
2739 	    sizeof (la_els_farp_t));
2740 
2741 	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
2742 	if (rval != FC_SUCCESS) {
2743 		FCIP_TNF_PROBE_2((fcip_handle_farp_request, "fcip io",
2744 		    /* CSTYLED */, tnf_string, msg,
2745 		    "fcip_transport of farp reply failed",
2746 		    tnf_uint, rval, rval));
2747 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2748 		    "fcip_transport of farp reply failed 0x%x", rval));
2749 	}
2750 
2751 farp_done:
2752 	return (rval);
2753 }
2754 
2755 
2756 /*
2757  * Handle FARP responses to our FARP requests. When we receive a FARP
2758  * reply, we need to add the entry for the Port that replied into our
2759  * routing and destination hash tables. It is possible that the remote
2760  * port did not login into us (FARP responses can be received without
2761  * a PLOGI)
2762  */
2763 static int
2764 fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd)
2765 {
2766 	int			rval = FC_FAILURE;
2767 	fc_portmap_t 		map;
2768 	struct fcip_routing_table *frp;
2769 	struct fcip_dest *fdestp;
2770 
2771 	/*
2772 	 * Add an entry for the remote port into our routing and destination
2773 	 * tables.
2774 	 */
2775 	map.map_did = fcmd->dest_id;
2776 	map.map_hard_addr.hard_addr = fcmd->dest_id.port_id;
2777 	map.map_state = PORT_DEVICE_VALID;
2778 	map.map_type = PORT_DEVICE_NEW;
2779 	map.map_flags = 0;
2780 	map.map_pd = NULL;
2781 	bcopy((void *)&fcmd->resp_pwwn, (void *)&map.map_pwwn,
2782 	    sizeof (la_wwn_t));
2783 	bcopy((void *)&fcmd->resp_nwwn, (void *)&map.map_nwwn,
2784 	    sizeof (la_wwn_t));
2785 	fcip_rt_update(fptr, &map, 1);
2786 	mutex_enter(&fptr->fcip_rt_mutex);
2787 	frp = fcip_lookup_rtable(fptr, &fcmd->resp_pwwn, FCIP_COMPARE_NWWN);
2788 	mutex_exit(&fptr->fcip_rt_mutex);
2789 
2790 	fdestp = fcip_add_dest(fptr, frp);
2791 
2792 	if (fdestp != NULL) {
2793 		rval = FC_SUCCESS;
2794 	}
2795 	return (rval);
2796 }
2797 
2798 
2799 #define	FCIP_HDRS_LENGTH	\
2800 	sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t)
2801 
2802 /*
2803  * fcip_data_cb is the heart of most IP operations. This routine is called
2804  * by the transport when any unsolicited IP data arrives at a port (which
2805  * is almost all IP data). This routine then strips off the Network header
2806  * from the payload (after authenticating the received payload ofcourse),
2807  * creates a message blk and sends the data upstream. You will see ugly
2808  * #defines because of problems with using esballoc() as opposed to
2809  * allocb to prevent an extra copy of data. We should probably move to
2810  * esballoc entirely when the MTU eventually will be larger than 1500 bytes
2811  * since copies will get more expensive then. At 1500 byte MTUs, there is
2812  * no noticable difference between using allocb and esballoc. The other
2813  * caveat is that the qlc firmware still cannot tell us accurately the
2814  * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort
2815  * to looking into the IP header and hoping that the no. of bytes speficified
2816  * in the header was actually received.
2817  */
2818 /* ARGSUSED */
2819 static int
2820 fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
2821     fc_unsol_buf_t *buf, uint32_t claimed)
2822 {
2823 	fcip_port_info_t		*fport;
2824 	struct fcip 			*fptr;
2825 	fcph_network_hdr_t		*nhdr;
2826 	llc_snap_hdr_t			*snaphdr;
2827 	mblk_t				*bp;
2828 	uint32_t 			len;
2829 	uint32_t			hdrlen;
2830 	ushort_t			type;
2831 	ipha_t				*iphdr;
2832 	int				rval;
2833 
2834 #ifdef FCIP_ESBALLOC
2835 	frtn_t				*free_ubuf;
2836 	struct fcip_esballoc_arg	*fesb_argp;
2837 #endif /* FCIP_ESBALLOC */
2838 
2839 	fport = fcip_get_port(phandle);
2840 	if (fport == NULL) {
2841 		return (FC_UNCLAIMED);
2842 	}
2843 
2844 	fptr = fport->fcipp_fcip;
2845 	ASSERT(fptr != NULL);
2846 
2847 	if (fptr == NULL) {
2848 		return (FC_UNCLAIMED);
2849 	}
2850 
2851 	mutex_enter(&fptr->fcip_mutex);
2852 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2853 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2854 		mutex_exit(&fptr->fcip_mutex);
2855 		rval = FC_UNCLAIMED;
2856 		goto data_cb_done;
2857 	}
2858 
2859 	/*
2860 	 * set fcip flags to indicate we are in the middle of a
2861 	 * data callback so we can wait till the statechange
2862 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2863 	 */
2864 	fptr->fcip_flags |= FCIP_IN_DATA_CB;
2865 	mutex_exit(&fptr->fcip_mutex);
2866 
2867 	FCIP_TNF_PROBE_2((fcip_data_cb, "fcip io", /* CSTYLED */,
2868 		tnf_string, msg, "data callback",
2869 		tnf_int, instance, ddi_get_instance(fport->fcipp_dip)));
2870 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2871 	    (CE_NOTE, "fcip%d, data callback",
2872 	    ddi_get_instance(fport->fcipp_dip)));
2873 
2874 	/*
2875 	 * get to the network and snap headers in the payload
2876 	 */
2877 	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
2878 	snaphdr = (llc_snap_hdr_t *)(buf->ub_buffer +
2879 	    sizeof (fcph_network_hdr_t));
2880 
2881 	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
2882 
2883 	/*
2884 	 * get the IP header to obtain the no. of bytes we need to read
2885 	 * off from the unsol buffer. This obviously is because not all
2886 	 * data fills up the unsol buffer completely and the firmware
2887 	 * doesn't tell us how many valid bytes are in there as well
2888 	 */
2889 	iphdr = (ipha_t *)(buf->ub_buffer + hdrlen);
2890 	snaphdr->pid = BE_16(snaphdr->pid);
2891 	type = snaphdr->pid;
2892 
2893 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2894 	    (CE_CONT, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n",
2895 	    snaphdr->dsap, snaphdr->ssap, snaphdr->ctrl));
2896 
2897 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2898 	    (CE_CONT, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n",
2899 	    snaphdr->oui[0], snaphdr->oui[1], snaphdr->oui[2], snaphdr->pid));
2900 
2901 	/* Authneticate, Authenticate */
2902 	if (type == ETHERTYPE_IP) {
2903 		len = hdrlen + BE_16(iphdr->ipha_length);
2904 	} else if (type == ETHERTYPE_ARP) {
2905 		len = hdrlen + 28;
2906 	} else {
2907 		len = buf->ub_bufsize;
2908 	}
2909 
2910 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2911 	    (CE_CONT, "effective packet length is %d bytes.\n", len));
2912 
2913 	if (len < hdrlen || len > FCIP_UB_SIZE) {
2914 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2915 		    (CE_NOTE, "Incorrect buffer size %d bytes", len));
2916 		rval = FC_UNCLAIMED;
2917 		goto data_cb_done;
2918 	}
2919 
2920 	if (buf->ub_frame.type != FC_TYPE_IS8802_SNAP) {
2921 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Not IP/ARP data"));
2922 		rval = FC_UNCLAIMED;
2923 		goto data_cb_done;
2924 	}
2925 
2926 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "checking wwn"));
2927 
2928 	if ((fcip_wwn_compare(&nhdr->net_dest_addr, &fport->fcipp_pwwn,
2929 	    FCIP_COMPARE_NWWN) != 0) &&
2930 	    (!IS_BROADCAST_ADDR(&nhdr->net_dest_addr))) {
2931 		rval = FC_UNCLAIMED;
2932 		goto data_cb_done;
2933 	} else if (fcip_cache_on_arp_broadcast &&
2934 	    IS_BROADCAST_ADDR(&nhdr->net_dest_addr)) {
2935 		fcip_cache_arp_broadcast(fptr, buf);
2936 	}
2937 
2938 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Allocate streams block"));
2939 
2940 	/*
2941 	 * Using esballoc instead of allocb should be faster, atleast at
2942 	 * larger MTUs than 1500 bytes. Someday we'll get there :)
2943 	 */
2944 #if defined(FCIP_ESBALLOC)
2945 	/*
2946 	 * allocate memory for the frtn function arg. The Function
2947 	 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type
2948 	 * which contains pointers to the unsol buffer and the
2949 	 * opaque port handle for releasing the unsol buffer back to
2950 	 * the FCA for reuse
2951 	 */
2952 	fesb_argp = (struct fcip_esballoc_arg *)
2953 	    kmem_zalloc(sizeof (struct fcip_esballoc_arg), KM_NOSLEEP);
2954 
2955 	if (fesb_argp == NULL) {
2956 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2957 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2958 		rval = FC_UNCLAIMED;
2959 		goto data_cb_done;
2960 	}
2961 	/*
2962 	 * Check with KM_NOSLEEP
2963 	 */
2964 	free_ubuf = (frtn_t *)kmem_zalloc(sizeof (frtn_t), KM_NOSLEEP);
2965 	if (free_ubuf == NULL) {
2966 		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2967 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2968 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2969 		rval = FC_UNCLAIMED;
2970 		goto data_cb_done;
2971 	}
2972 
2973 	fesb_argp->frtnp = free_ubuf;
2974 	fesb_argp->buf = buf;
2975 	fesb_argp->phandle = phandle;
2976 	free_ubuf->free_func = fcip_ubfree;
2977 	free_ubuf->free_arg = (char *)fesb_argp;
2978 	if ((bp = (mblk_t *)esballoc((unsigned char *)buf->ub_buffer,
2979 	    len, BPRI_MED, free_ubuf)) == NULL) {
2980 		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2981 		kmem_free(free_ubuf, sizeof (frtn_t));
2982 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2983 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2984 		rval = FC_UNCLAIMED;
2985 		goto data_cb_done;
2986 	}
2987 #elif !defined(FCIP_ESBALLOC)
2988 	/*
2989 	 * allocate streams mblk and copy the contents of the
2990 	 * unsolicited buffer into this newly alloc'ed mblk
2991 	 */
2992 	if ((bp = (mblk_t *)fcip_allocb((size_t)len, BPRI_LO)) == NULL) {
2993 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2994 		    (CE_WARN, "alloc of mblk failed in data_cb"));
2995 		rval = FC_UNCLAIMED;
2996 		goto data_cb_done;
2997 	}
2998 
2999 	/*
3000 	 * Unsolicited buffers handed up to us from the FCA must be
3001 	 * endian clean so just bcopy the data into our mblk. Else
3002 	 * we may have to either copy the data byte by byte or
3003 	 * use the ddi_rep_get* routines to do the copy for us.
3004 	 */
3005 	bcopy(buf->ub_buffer, bp->b_rptr, len);
3006 
3007 	/*
3008 	 * for esballoc'ed mblks - free the UB in the frtn function
3009 	 * along with the memory allocated for the function arg.
3010 	 * for allocb'ed mblk - release the unsolicited buffer here
3011 	 */
3012 	(void) fc_ulp_ubrelease(phandle, 1, &buf->ub_token);
3013 
3014 #endif	/* FCIP_ESBALLOC */
3015 
3016 	bp->b_wptr = bp->b_rptr + len;
3017 	fptr->fcip_ipackets++;
3018 
3019 	if (type == ETHERTYPE_IP) {
3020 		mutex_enter(&fptr->fcip_mutex);
3021 		fptr->fcip_ub_upstream++;
3022 		mutex_exit(&fptr->fcip_mutex);
3023 		bp->b_rptr += hdrlen;
3024 
3025 		/*
3026 		 * Check if ipq is valid in the sendup thread
3027 		 */
3028 		if (fcip_sendup_alloc_enque(fptr, bp, NULL) != FC_SUCCESS) {
3029 			freemsg(bp);
3030 		}
3031 	} else {
3032 		/*
3033 		 * We won't get ethernet 802.3 packets in FCIP but we may get
3034 		 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let
3035 		 * fcip_sendup() do the matching.
3036 		 */
3037 		mutex_enter(&fptr->fcip_mutex);
3038 		fptr->fcip_ub_upstream++;
3039 		mutex_exit(&fptr->fcip_mutex);
3040 		if (fcip_sendup_alloc_enque(fptr, bp,
3041 		    fcip_accept) != FC_SUCCESS) {
3042 			freemsg(bp);
3043 		}
3044 	}
3045 
3046 	rval = FC_SUCCESS;
3047 
3048 	/*
3049 	 * Unset fcip_flags to indicate we are out of callback and return
3050 	 */
3051 data_cb_done:
3052 	mutex_enter(&fptr->fcip_mutex);
3053 	fptr->fcip_flags &= ~(FCIP_IN_DATA_CB);
3054 	mutex_exit(&fptr->fcip_mutex);
3055 	return (rval);
3056 }
3057 
3058 #if !defined(FCIP_ESBALLOC)
3059 /*
3060  * Allocate a message block for the inbound data to be sent upstream.
3061  */
3062 static void *
3063 fcip_allocb(size_t size, uint_t pri)
3064 {
3065 	mblk_t	*mp;
3066 
3067 	if ((mp = allocb(size, pri)) == NULL) {
3068 		return (NULL);
3069 	}
3070 	return (mp);
3071 }
3072 
3073 #endif
3074 
3075 /*
3076  * This helper routine kmem cache alloc's a sendup element for enquing
3077  * into the sendup list for callbacks upstream from the dedicated sendup
3078  * thread. We enque the msg buf into the sendup list and cv_signal the
3079  * sendup thread to finish the callback for us.
3080  */
3081 static int
3082 fcip_sendup_alloc_enque(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*f)())
3083 {
3084 	struct fcip_sendup_elem 	*msg_elem;
3085 	int				rval = FC_FAILURE;
3086 
3087 	FCIP_TNF_PROBE_1((fcip_sendup_alloc_enque, "fcip io", /* CSTYLED */,
3088 		tnf_string, msg, "sendup msg enque"));
3089 	msg_elem = kmem_cache_alloc(fptr->fcip_sendup_cache, KM_NOSLEEP);
3090 	if (msg_elem == NULL) {
3091 		/* drop pkt to floor - update stats */
3092 		rval = FC_FAILURE;
3093 		goto sendup_alloc_done;
3094 	}
3095 	msg_elem->fcipsu_mp = mp;
3096 	msg_elem->fcipsu_func = f;
3097 
3098 	mutex_enter(&fptr->fcip_sendup_mutex);
3099 	if (fptr->fcip_sendup_head == NULL) {
3100 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = msg_elem;
3101 	} else {
3102 		fptr->fcip_sendup_tail->fcipsu_next = msg_elem;
3103 		fptr->fcip_sendup_tail = msg_elem;
3104 	}
3105 	fptr->fcip_sendup_cnt++;
3106 	cv_signal(&fptr->fcip_sendup_cv);
3107 	mutex_exit(&fptr->fcip_sendup_mutex);
3108 	rval = FC_SUCCESS;
3109 
3110 sendup_alloc_done:
3111 	return (rval);
3112 }
3113 
3114 /*
3115  * One of the ways of performing the WWN to D_ID mapping required for
3116  * IPFC data is to cache the unsolicited ARP broadcast messages received
3117  * and update the routing table to add entry for the destination port
3118  * if we are the intended recipient of the ARP broadcast message. This is
3119  * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping
3120  * but is not typically used unless enabled. The driver prefers to use the
3121  * nameserver/lilp map to obtain this mapping.
3122  */
3123 static void
3124 fcip_cache_arp_broadcast(struct fcip *fptr, fc_unsol_buf_t *buf)
3125 {
3126 	fcip_port_info_t		*fport;
3127 	fcph_network_hdr_t		*nhdr;
3128 	struct fcip_routing_table	*frp;
3129 	fc_portmap_t			map;
3130 
3131 	fport = fptr->fcip_port_info;
3132 	if (fport == NULL) {
3133 		return;
3134 	}
3135 	ASSERT(fport != NULL);
3136 
3137 	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
3138 
3139 	mutex_enter(&fptr->fcip_rt_mutex);
3140 	frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, FCIP_COMPARE_NWWN);
3141 	mutex_exit(&fptr->fcip_rt_mutex);
3142 	if (frp == NULL) {
3143 		map.map_did.port_id = buf->ub_frame.s_id;
3144 		map.map_hard_addr.hard_addr = buf->ub_frame.s_id;
3145 		map.map_state = PORT_DEVICE_VALID;
3146 		map.map_type = PORT_DEVICE_NEW;
3147 		map.map_flags = 0;
3148 		map.map_pd = NULL;
3149 		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_pwwn,
3150 		    sizeof (la_wwn_t));
3151 		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_nwwn,
3152 		    sizeof (la_wwn_t));
3153 		fcip_rt_update(fptr, &map, 1);
3154 		mutex_enter(&fptr->fcip_rt_mutex);
3155 		frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr,
3156 		    FCIP_COMPARE_NWWN);
3157 		mutex_exit(&fptr->fcip_rt_mutex);
3158 
3159 		(void) fcip_add_dest(fptr, frp);
3160 	}
3161 
3162 }
3163 
3164 /*
3165  * This is a dedicated thread to do callbacks from fcip's data callback
3166  * routines into the modules upstream. The reason for this thread is
3167  * the data callback function can be called from an interrupt context and
3168  * the upstream modules *can* make calls downstream in the same thread
3169  * context. If the call is to a fabric port which is not yet in our
3170  * routing tables, we may have to query the nameserver/fabric for the
3171  * MAC addr to Port_ID mapping which may be blocking calls.
3172  */
3173 static void
3174 fcip_sendup_thr(void *arg)
3175 {
3176 	struct fcip		*fptr = (struct fcip *)arg;
3177 	struct fcip_sendup_elem	*msg_elem;
3178 	queue_t			*ip4q = NULL;
3179 
3180 	CALLB_CPR_INIT(&fptr->fcip_cpr_info, &fptr->fcip_sendup_mutex,
3181 	    callb_generic_cpr, "fcip_sendup_thr");
3182 
3183 	mutex_enter(&fptr->fcip_sendup_mutex);
3184 	for (;;) {
3185 
3186 		while (fptr->fcip_sendup_thr_initted &&
3187 		    fptr->fcip_sendup_head == NULL) {
3188 			CALLB_CPR_SAFE_BEGIN(&fptr->fcip_cpr_info);
3189 			cv_wait(&fptr->fcip_sendup_cv,
3190 			    &fptr->fcip_sendup_mutex);
3191 			CALLB_CPR_SAFE_END(&fptr->fcip_cpr_info,
3192 			    &fptr->fcip_sendup_mutex);
3193 		}
3194 
3195 		if (fptr->fcip_sendup_thr_initted == 0) {
3196 			break;
3197 		}
3198 
3199 		FCIP_TNF_PROBE_1((fcip_sendup_thr, "fcip io", /* CSTYLED */,
3200 		    tnf_string, msg, "fcip sendup thr - new msg"));
3201 
3202 		msg_elem = fptr->fcip_sendup_head;
3203 		fptr->fcip_sendup_head = msg_elem->fcipsu_next;
3204 		msg_elem->fcipsu_next = NULL;
3205 		mutex_exit(&fptr->fcip_sendup_mutex);
3206 
3207 		if (msg_elem->fcipsu_func == NULL) {
3208 			/*
3209 			 * Message for ipq. Check to see if the ipq is
3210 			 * is still valid. Since the thread is asynchronous,
3211 			 * there could have been a close on the stream
3212 			 */
3213 			mutex_enter(&fptr->fcip_mutex);
3214 			if (fptr->fcip_ipq && canputnext(fptr->fcip_ipq)) {
3215 				ip4q = fptr->fcip_ipq;
3216 				mutex_exit(&fptr->fcip_mutex);
3217 				putnext(ip4q, msg_elem->fcipsu_mp);
3218 			} else {
3219 				mutex_exit(&fptr->fcip_mutex);
3220 				freemsg(msg_elem->fcipsu_mp);
3221 			}
3222 		} else {
3223 			fcip_sendup(fptr, msg_elem->fcipsu_mp,
3224 			    msg_elem->fcipsu_func);
3225 		}
3226 
3227 #if !defined(FCIP_ESBALLOC)
3228 		/*
3229 		 * for allocb'ed mblk - decrement upstream count here
3230 		 */
3231 		mutex_enter(&fptr->fcip_mutex);
3232 		ASSERT(fptr->fcip_ub_upstream > 0);
3233 		fptr->fcip_ub_upstream--;
3234 		mutex_exit(&fptr->fcip_mutex);
3235 #endif /* FCIP_ESBALLOC */
3236 
3237 		kmem_cache_free(fptr->fcip_sendup_cache, (void *)msg_elem);
3238 		mutex_enter(&fptr->fcip_sendup_mutex);
3239 		fptr->fcip_sendup_cnt--;
3240 	}
3241 
3242 
3243 #ifndef	__lock_lint
3244 	CALLB_CPR_EXIT(&fptr->fcip_cpr_info);
3245 #else
3246 	mutex_exit(&fptr->fcip_sendup_mutex);
3247 #endif /* __lock_lint */
3248 
3249 	/* Wake up fcip detach thread by the end */
3250 	cv_signal(&fptr->fcip_sendup_cv);
3251 
3252 	thread_exit();
3253 }
3254 
3255 #ifdef FCIP_ESBALLOC
3256 
3257 /*
3258  * called from the stream head when it is done using an unsolicited buffer.
3259  * We release this buffer then to the FCA for reuse.
3260  */
3261 static void
3262 fcip_ubfree(char *arg)
3263 {
3264 	struct fcip_esballoc_arg *fesb_argp = (struct fcip_esballoc_arg *)arg;
3265 	fc_unsol_buf_t	*ubuf;
3266 	frtn_t		*frtnp;
3267 	fcip_port_info_t		*fport;
3268 	struct fcip 			*fptr;
3269 
3270 
3271 	fport = fcip_get_port(fesb_argp->phandle);
3272 	fptr = fport->fcipp_fcip;
3273 
3274 	ASSERT(fesb_argp != NULL);
3275 	ubuf = fesb_argp->buf;
3276 	frtnp = fesb_argp->frtnp;
3277 
3278 
3279 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
3280 	    (CE_WARN, "freeing ubuf after esballoc in fcip_ubfree"));
3281 	(void) fc_ulp_ubrelease(fesb_argp->phandle, 1, &ubuf->ub_token);
3282 
3283 	mutex_enter(&fptr->fcip_mutex);
3284 	ASSERT(fptr->fcip_ub_upstream > 0);
3285 	fptr->fcip_ub_upstream--;
3286 	cv_signal(&fptr->fcip_ub_cv);
3287 	mutex_exit(&fptr->fcip_mutex);
3288 
3289 	kmem_free(frtnp, sizeof (frtn_t));
3290 	kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
3291 }
3292 
3293 #endif /* FCIP_ESBALLOC */
3294 
3295 /*
3296  * handle data other than that of type ETHERTYPE_IP and send it on its
3297  * way upstream to the right streams module to handle
3298  */
3299 static void
3300 fcip_sendup(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*acceptfunc)())
3301 {
3302 	struct fcipstr	*slp, *nslp;
3303 	la_wwn_t	*dhostp;
3304 	mblk_t		*nmp;
3305 	uint32_t 	isgroupaddr;
3306 	int 		type;
3307 	uint32_t	hdrlen;
3308 	fcph_network_hdr_t	*nhdr;
3309 	llc_snap_hdr_t		*snaphdr;
3310 
3311 	FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3312 		tnf_string, msg, "fcip sendup"));
3313 	nhdr = (fcph_network_hdr_t *)mp->b_rptr;
3314 	snaphdr =
3315 	    (llc_snap_hdr_t *)(mp->b_rptr + sizeof (fcph_network_hdr_t));
3316 	dhostp = &nhdr->net_dest_addr;
3317 	type = snaphdr->pid;
3318 	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
3319 
3320 	/* No group address with fibre channel */
3321 	isgroupaddr = 0;
3322 
3323 	/*
3324 	 * While holding a reader lock on the linked list of streams structures,
3325 	 * attempt to match the address criteria for each stream
3326 	 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
3327 	 */
3328 
3329 	rw_enter(&fcipstruplock, RW_READER);
3330 
3331 	if ((slp = (*acceptfunc)(fcipstrup, fptr, type, dhostp)) == NULL) {
3332 		rw_exit(&fcipstruplock);
3333 		FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3334 		    tnf_string, msg, "fcip sendup - no slp"));
3335 		freemsg(mp);
3336 		return;
3337 	}
3338 
3339 	/*
3340 	 * Loop on matching open streams until (*acceptfunc)() returns NULL.
3341 	 */
3342 	for (; nslp = (*acceptfunc)(slp->sl_nextp, fptr, type, dhostp);
3343 	    slp = nslp) {
3344 		if (canputnext(slp->sl_rq)) {
3345 			if (nmp = dupmsg(mp)) {
3346 				if ((slp->sl_flags & FCIP_SLFAST) &&
3347 							!isgroupaddr) {
3348 					nmp->b_rptr += hdrlen;
3349 					putnext(slp->sl_rq, nmp);
3350 				} else if (slp->sl_flags & FCIP_SLRAW) {
3351 					/* No headers when FCIP_SLRAW is set */
3352 					putnext(slp->sl_rq, nmp);
3353 				} else if ((nmp = fcip_addudind(fptr, nmp,
3354 				    nhdr, type))) {
3355 					putnext(slp->sl_rq, nmp);
3356 				}
3357 			}
3358 		}
3359 	}
3360 
3361 	/*
3362 	 * Do the last one.
3363 	 */
3364 	if (canputnext(slp->sl_rq)) {
3365 		if (slp->sl_flags & FCIP_SLFAST) {
3366 			mp->b_rptr += hdrlen;
3367 			putnext(slp->sl_rq, mp);
3368 		} else if (slp->sl_flags & FCIP_SLRAW) {
3369 			putnext(slp->sl_rq, mp);
3370 		} else if ((mp = fcip_addudind(fptr, mp, nhdr, type))) {
3371 			putnext(slp->sl_rq, mp);
3372 		}
3373 	} else {
3374 		freemsg(mp);
3375 	}
3376 	FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3377 	    tnf_string, msg, "fcip sendup done"));
3378 
3379 	rw_exit(&fcipstruplock);
3380 }
3381 
3382 /*
3383  * Match the stream based on type and wwn if necessary.
3384  * Destination wwn dhostp is passed to this routine is reserved
3385  * for future usage. We don't need to use it right now since port
3386  * to fcip instance mapping is unique and wwn is already validated when
3387  * packet comes to fcip.
3388  */
3389 /* ARGSUSED */
3390 static struct fcipstr *
3391 fcip_accept(struct fcipstr *slp, struct fcip *fptr, int type, la_wwn_t *dhostp)
3392 {
3393 	t_uscalar_t 	sap;
3394 
3395 	FCIP_TNF_PROBE_1((fcip_accept, "fcip io", /* CSTYLED */,
3396 	    tnf_string, msg, "fcip accept"));
3397 
3398 	for (; slp; slp = slp->sl_nextp) {
3399 		sap = slp->sl_sap;
3400 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_CONT,
3401 		    "fcip_accept: checking next sap = %x, type = %x",
3402 		    sap, type));
3403 
3404 		if ((slp->sl_fcip == fptr) && (type == sap)) {
3405 			return (slp);
3406 		}
3407 	}
3408 	return (NULL);
3409 }
3410 
3411 /*
3412  * Handle DL_UNITDATA_IND messages
3413  */
3414 static mblk_t *
3415 fcip_addudind(struct fcip *fptr, mblk_t *mp, fcph_network_hdr_t *nhdr,
3416     int type)
3417 {
3418 	dl_unitdata_ind_t	*dludindp;
3419 	struct	fcipdladdr	*dlap;
3420 	mblk_t	*nmp;
3421 	int	size;
3422 	uint32_t hdrlen;
3423 	struct ether_addr	src_addr;
3424 	struct ether_addr	dest_addr;
3425 
3426 
3427 	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
3428 	mp->b_rptr += hdrlen;
3429 
3430 	FCIP_TNF_PROBE_1((fcip_addudind, "fcip io", /* CSTYLED */,
3431 	    tnf_string, msg, "fcip addudind"));
3432 
3433 	/*
3434 	 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
3435 	 */
3436 	size = sizeof (dl_unitdata_ind_t) + FCIPADDRL + FCIPADDRL;
3437 	if ((nmp = allocb(size, BPRI_LO)) == NULL) {
3438 		fptr->fcip_allocbfail++;
3439 		freemsg(mp);
3440 		return (NULL);
3441 	}
3442 	DB_TYPE(nmp) = M_PROTO;
3443 	nmp->b_wptr = nmp->b_datap->db_lim;
3444 	nmp->b_rptr = nmp->b_wptr - size;
3445 
3446 	/*
3447 	 * Construct a DL_UNITDATA_IND primitive.
3448 	 */
3449 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3450 	dludindp->dl_primitive = DL_UNITDATA_IND;
3451 	dludindp->dl_dest_addr_length = FCIPADDRL;
3452 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3453 	dludindp->dl_src_addr_length = FCIPADDRL;
3454 	dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + FCIPADDRL;
3455 	dludindp->dl_group_address = 0;		/* not DL_MULTI */
3456 
3457 	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
3458 	wwn_to_ether(&nhdr->net_dest_addr, &dest_addr);
3459 	ether_bcopy(&dest_addr, &dlap->dl_phys);
3460 	dlap->dl_sap = (uint16_t)type;
3461 
3462 	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
3463 		+ FCIPADDRL);
3464 	wwn_to_ether(&nhdr->net_src_addr, &src_addr);
3465 	ether_bcopy(&src_addr, &dlap->dl_phys);
3466 	dlap->dl_sap = (uint16_t)type;
3467 
3468 	/*
3469 	 * Link the M_PROTO and M_DATA together.
3470 	 */
3471 	nmp->b_cont = mp;
3472 	return (nmp);
3473 }
3474 
3475 
3476 /*
3477  * The open routine. For clone opens, we return the next available minor
3478  * no. for the stream to use
3479  */
3480 /* ARGSUSED */
3481 static int
3482 fcip_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
3483 {
3484 	struct fcipstr	*slp;
3485 	struct fcipstr	**prevslp;
3486 	minor_t	minor;
3487 
3488 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_open"));
3489 	FCIP_TNF_PROBE_1((fcip_open, "fcip io", /* CSTYLED */,
3490 		tnf_string, msg, "enter"));
3491 	/*
3492 	 * We need to ensure that the port driver is loaded before
3493 	 * we proceed
3494 	 */
3495 	if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER)) == NULL) {
3496 		/* no port driver instances found */
3497 		FCIP_DEBUG(FCIP_DEBUG_STARTUP, (CE_WARN,
3498 		    "!ddi_hold_installed_driver of fp failed\n"));
3499 		return (ENXIO);
3500 	}
3501 	/* serialize opens */
3502 	rw_enter(&fcipstruplock, RW_WRITER);
3503 
3504 	prevslp = &fcipstrup;
3505 	if (sflag == CLONEOPEN) {
3506 		minor = 0;
3507 		for (; (slp = *prevslp) != NULL; prevslp = &slp->sl_nextp) {
3508 			if (minor < slp->sl_minor) {
3509 				break;
3510 			}
3511 			minor ++;
3512 		}
3513 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3514 		    "getmajor returns 0x%x", getmajor(*devp)));
3515 		*devp = makedevice(getmajor(*devp), minor);
3516 	} else {
3517 		minor = getminor(*devp);
3518 	}
3519 
3520 	/*
3521 	 * check if our qp's private area is already initialized. If yes
3522 	 * the stream is already open - just return
3523 	 */
3524 	if (rq->q_ptr) {
3525 		goto done;
3526 	}
3527 
3528 	slp = GETSTRUCT(struct fcipstr, 1);
3529 	slp->sl_minor = minor;
3530 	slp->sl_rq = rq;
3531 	slp->sl_sap = 0;
3532 	slp->sl_flags = 0;
3533 	slp->sl_state = DL_UNATTACHED;
3534 	slp->sl_fcip = NULL;
3535 
3536 	mutex_init(&slp->sl_lock, NULL, MUTEX_DRIVER, NULL);
3537 
3538 	/*
3539 	 * link this new stream entry into list of active streams
3540 	 */
3541 	slp->sl_nextp = *prevslp;
3542 	*prevslp = slp;
3543 
3544 	rq->q_ptr = WR(rq)->q_ptr = (char *)slp;
3545 
3546 	/*
3547 	 * Disable automatic enabling of our write service procedures
3548 	 * we need to control this explicitly. This will prevent
3549 	 * anyone scheduling of our write service procedures.
3550 	 */
3551 	noenable(WR(rq));
3552 
3553 done:
3554 	rw_exit(&fcipstruplock);
3555 	/*
3556 	 * enable our put and service routines on the read side
3557 	 */
3558 	qprocson(rq);
3559 
3560 	/*
3561 	 * There is only one instance of fcip (instance = 0)
3562 	 * for multiple instances of hardware
3563 	 */
3564 	(void) qassociate(rq, 0);	/* don't allow drcompat to be pushed */
3565 	return (0);
3566 }
3567 
3568 /*
3569  * close an opened stream. The minor no. will then be available for
3570  * future opens.
3571  */
3572 /* ARGSUSED */
3573 static int
3574 fcip_close(queue_t *rq, int flag, cred_t *credp)
3575 {
3576 	struct fcipstr *slp;
3577 	struct fcipstr **prevslp;
3578 
3579 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_close"));
3580 	FCIP_TNF_PROBE_1((fcip_close, "fcip io", /* CSTYLED */,
3581 		tnf_string, msg, "enter"));
3582 	ASSERT(rq);
3583 	/* we should also have the active stream pointer in q_ptr */
3584 	ASSERT(rq->q_ptr);
3585 
3586 	ddi_rele_driver(ddi_name_to_major(PORT_DRIVER));
3587 	/*
3588 	 * disable our put and service procedures. We had enabled them
3589 	 * on open
3590 	 */
3591 	qprocsoff(rq);
3592 	slp = (struct fcipstr *)rq->q_ptr;
3593 
3594 	/*
3595 	 * Implicitly detach stream  a stream from an interface.
3596 	 */
3597 	if (slp->sl_fcip) {
3598 		fcip_dodetach(slp);
3599 	}
3600 
3601 	(void) qassociate(rq, -1);	/* undo association in open */
3602 
3603 	rw_enter(&fcipstruplock, RW_WRITER);
3604 
3605 	/*
3606 	 * unlink this stream from the active stream list and free it
3607 	 */
3608 	for (prevslp = &fcipstrup; (slp = *prevslp) != NULL;
3609 	    prevslp = &slp->sl_nextp) {
3610 		if (slp == (struct fcipstr *)rq->q_ptr) {
3611 			break;
3612 		}
3613 	}
3614 
3615 	/* we should have found slp */
3616 	ASSERT(slp);
3617 
3618 	*prevslp = slp->sl_nextp;
3619 	mutex_destroy(&slp->sl_lock);
3620 	kmem_free(slp, sizeof (struct fcipstr));
3621 	rq->q_ptr = WR(rq)->q_ptr = NULL;
3622 
3623 	rw_exit(&fcipstruplock);
3624 	return (0);
3625 }
3626 
3627 /*
3628  * This is not an extension of the DDI_DETACH request. This routine
3629  * only detaches a stream from an interface
3630  */
3631 static void
3632 fcip_dodetach(struct fcipstr *slp)
3633 {
3634 	struct fcipstr	*tslp;
3635 	struct fcip	*fptr;
3636 
3637 	FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_NOTE, "in fcip_dodetach"));
3638 	FCIP_TNF_PROBE_1((fcip_dodetach, "fcip io", /* CSTYLED */,
3639 		tnf_string, msg, "enter"));
3640 	ASSERT(slp->sl_fcip != NULL);
3641 
3642 	fptr = slp->sl_fcip;
3643 	slp->sl_fcip = NULL;
3644 
3645 	/*
3646 	 * we don't support promiscuous mode currently but check
3647 	 * for and disable any promiscuous mode operation
3648 	 */
3649 	if (slp->sl_flags & SLALLPHYS) {
3650 		slp->sl_flags &= ~SLALLPHYS;
3651 	}
3652 
3653 	/*
3654 	 * disable ALLMULTI mode if all mulitcast addr are ON
3655 	 */
3656 	if (slp->sl_flags & SLALLMULTI) {
3657 		slp->sl_flags &= ~SLALLMULTI;
3658 	}
3659 
3660 	/*
3661 	 * we are most likely going to perform multicast by
3662 	 * broadcasting to the well known addr (D_ID) 0xFFFFFF or
3663 	 * ALPA 0x00 in case of public loops
3664 	 */
3665 
3666 
3667 	/*
3668 	 * detach unit from device structure.
3669 	 */
3670 	for (tslp = fcipstrup; tslp != NULL; tslp = tslp->sl_nextp) {
3671 		if (tslp->sl_fcip == fptr) {
3672 			break;
3673 		}
3674 	}
3675 	if (tslp == NULL) {
3676 		FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
3677 		"fcip_dodeatch - active stream struct not found"));
3678 
3679 		/* unregister with Fabric nameserver?? */
3680 	}
3681 	slp->sl_state = DL_UNATTACHED;
3682 
3683 	fcip_setipq(fptr);
3684 }
3685 
3686 
3687 /*
3688  * Set or clear device ipq pointer.
3689  * Walk thru all the streams on this device, if a ETHERTYPE_IP
3690  * stream is found, assign device ipq to its sl_rq.
3691  */
3692 static void
3693 fcip_setipq(struct fcip *fptr)
3694 {
3695 	struct fcipstr	*slp;
3696 	int		ok = 1;
3697 	queue_t		*ipq = NULL;
3698 
3699 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "entered fcip_setipq"));
3700 
3701 	rw_enter(&fcipstruplock, RW_READER);
3702 
3703 	for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
3704 		if (slp->sl_fcip == fptr) {
3705 			if (slp->sl_flags & (SLALLPHYS|SLALLSAP)) {
3706 				ok = 0;
3707 			}
3708 			if (slp->sl_sap == ETHERTYPE_IP) {
3709 				if (ipq == NULL) {
3710 					ipq = slp->sl_rq;
3711 				} else {
3712 					ok = 0;
3713 				}
3714 			}
3715 		}
3716 	}
3717 
3718 	rw_exit(&fcipstruplock);
3719 
3720 	if (fcip_check_port_exists(fptr)) {
3721 		/* fptr passed to us is stale */
3722 		return;
3723 	}
3724 
3725 	mutex_enter(&fptr->fcip_mutex);
3726 	if (ok) {
3727 		fptr->fcip_ipq = ipq;
3728 	} else {
3729 		fptr->fcip_ipq = NULL;
3730 	}
3731 	mutex_exit(&fptr->fcip_mutex);
3732 }
3733 
3734 
3735 /* ARGSUSED */
3736 static void
3737 fcip_ioctl(queue_t *wq, mblk_t *mp)
3738 {
3739 	struct iocblk		*iocp = (struct iocblk *)mp->b_rptr;
3740 	struct fcipstr		*slp = (struct fcipstr *)wq->q_ptr;
3741 
3742 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3743 	    (CE_NOTE, "in fcip ioctl : %d", iocp->ioc_cmd));
3744 	FCIP_TNF_PROBE_1((fcip_ioctl, "fcip io", /* CSTYLED */,
3745 		tnf_string, msg, "enter"));
3746 
3747 	switch (iocp->ioc_cmd) {
3748 	case DLIOCRAW:
3749 		slp->sl_flags |= FCIP_SLRAW;
3750 		miocack(wq, mp, 0, 0);
3751 		break;
3752 
3753 	case DL_IOC_HDR_INFO:
3754 		fcip_dl_ioc_hdr_info(wq, mp);
3755 		break;
3756 
3757 	default:
3758 		miocnak(wq, mp, 0, EINVAL);
3759 		break;
3760 	}
3761 }
3762 
3763 /*
3764  * The streams 'Put' routine.
3765  */
3766 /* ARGSUSED */
3767 static int
3768 fcip_wput(queue_t *wq, mblk_t *mp)
3769 {
3770 	struct fcipstr *slp = (struct fcipstr *)wq->q_ptr;
3771 	struct fcip *fptr;
3772 	struct fcip_dest *fdestp;
3773 	fcph_network_hdr_t *headerp;
3774 
3775 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3776 	    (CE_NOTE, "in fcip_wput :: type:%x", DB_TYPE(mp)));
3777 
3778 	switch (DB_TYPE(mp)) {
3779 	case M_DATA: {
3780 
3781 		fptr = slp->sl_fcip;
3782 
3783 		if (((slp->sl_flags & (FCIP_SLFAST|FCIP_SLRAW)) == 0) ||
3784 		    (slp->sl_state != DL_IDLE) ||
3785 		    (fptr == NULL)) {
3786 			/*
3787 			 * set error in the message block and send a reply
3788 			 * back upstream. Sun's merror routine does this
3789 			 * for us more cleanly.
3790 			 */
3791 			merror(wq, mp, EPROTO);
3792 			break;
3793 		}
3794 
3795 		/*
3796 		 * if any messages are already enqueued or if the interface
3797 		 * is in promiscuous mode, causing the packets to loop back
3798 		 * up, then enqueue the message. Otherwise just transmit
3799 		 * the message. putq() puts the message on fcip's
3800 		 * write queue and qenable() puts the queue (wq) on
3801 		 * the list of queues to be called by the streams scheduler.
3802 		 */
3803 		if (wq->q_first) {
3804 			(void) putq(wq, mp);
3805 			fptr->fcip_wantw = 1;
3806 			qenable(wq);
3807 		} else if (fptr->fcip_flags & FCIP_PROMISC) {
3808 			/*
3809 			 * Promiscous mode not supported but add this code in
3810 			 * case it will be supported in future.
3811 			 */
3812 			(void) putq(wq, mp);
3813 			qenable(wq);
3814 		} else {
3815 
3816 			headerp = (fcph_network_hdr_t *)mp->b_rptr;
3817 			fdestp = fcip_get_dest(fptr, &headerp->net_dest_addr);
3818 
3819 			if (fdestp == NULL) {
3820 				merror(wq, mp, EPROTO);
3821 				break;
3822 			}
3823 
3824 			ASSERT(fdestp != NULL);
3825 
3826 			(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
3827 		}
3828 		break;
3829 	}
3830 	case M_PROTO:
3831 	case M_PCPROTO:
3832 		/*
3833 		 * to prevent recursive calls into fcip_proto
3834 		 * (PROTO and PCPROTO messages are handled by fcip_proto)
3835 		 * let the service procedure handle these messages by
3836 		 * calling putq here.
3837 		 */
3838 		(void) putq(wq, mp);
3839 		qenable(wq);
3840 		break;
3841 
3842 	case M_IOCTL:
3843 		fcip_ioctl(wq, mp);
3844 		break;
3845 
3846 	case M_FLUSH:
3847 		if (*mp->b_rptr & FLUSHW) {
3848 			flushq(wq, FLUSHALL);
3849 			*mp->b_rptr &= ~FLUSHW;
3850 		}
3851 		/*
3852 		 * we have both FLUSHW and FLUSHR set with FLUSHRW
3853 		 */
3854 		if (*mp->b_rptr & FLUSHR) {
3855 			/*
3856 			 * send msg back upstream. qreply() takes care
3857 			 * of using the RD(wq) queue on its reply
3858 			 */
3859 			qreply(wq, mp);
3860 		} else {
3861 			freemsg(mp);
3862 		}
3863 		break;
3864 
3865 	default:
3866 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3867 		    (CE_NOTE, "default msg type: %x", DB_TYPE(mp)));
3868 		freemsg(mp);
3869 		break;
3870 	}
3871 	return (0);
3872 }
3873 
3874 
3875 /*
3876  * Handle M_PROTO and M_PCPROTO messages
3877  */
3878 /* ARGSUSED */
3879 static void
3880 fcip_proto(queue_t *wq, mblk_t *mp)
3881 {
3882 	union DL_primitives	*dlp;
3883 	struct fcipstr		*slp;
3884 	t_uscalar_t		prim;
3885 
3886 	slp = (struct fcipstr *)wq->q_ptr;
3887 	dlp = (union DL_primitives *)mp->b_rptr;
3888 	prim = dlp->dl_primitive;		/* the DLPI command */
3889 
3890 	FCIP_TNF_PROBE_5((fcip_proto, "fcip io", /* CSTYLED */,
3891 		tnf_string, msg, "enter",
3892 		tnf_opaque, wq, wq,
3893 		tnf_opaque, mp, mp,
3894 		tnf_opaque, MP_DB_TYPE, DB_TYPE(mp),
3895 		tnf_opaque, dl_primitive, dlp->dl_primitive));
3896 
3897 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "dl_primitve : %x", prim));
3898 
3899 	mutex_enter(&slp->sl_lock);
3900 
3901 	switch (prim) {
3902 	case DL_UNITDATA_REQ:
3903 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3904 			tnf_string, msg, "unit data request"));
3905 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unit data request"));
3906 		fcip_udreq(wq, mp);
3907 		break;
3908 
3909 	case DL_ATTACH_REQ:
3910 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3911 			tnf_string, msg, "Attach request"));
3912 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Attach request"));
3913 		fcip_areq(wq, mp);
3914 		break;
3915 
3916 	case DL_DETACH_REQ:
3917 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3918 			tnf_string, msg, "Detach request"));
3919 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Detach request"));
3920 		fcip_dreq(wq, mp);
3921 		break;
3922 
3923 	case DL_BIND_REQ:
3924 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Bind request"));
3925 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3926 			tnf_string, msg, "Bind request"));
3927 		fcip_breq(wq, mp);
3928 		break;
3929 
3930 	case DL_UNBIND_REQ:
3931 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3932 			tnf_string, msg, "unbind request"));
3933 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unbind request"));
3934 		fcip_ubreq(wq, mp);
3935 		break;
3936 
3937 	case DL_INFO_REQ:
3938 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3939 			tnf_string, msg, "Info request"));
3940 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Info request"));
3941 		fcip_ireq(wq, mp);
3942 		break;
3943 
3944 	case DL_SET_PHYS_ADDR_REQ:
3945 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3946 			tnf_string, msg, "set phy addr request"));
3947 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3948 		    (CE_NOTE, "set phy addr request"));
3949 		fcip_spareq(wq, mp);
3950 		break;
3951 
3952 	case DL_PHYS_ADDR_REQ:
3953 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3954 			tnf_string, msg, "phy addr request"));
3955 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "phy addr request"));
3956 		fcip_pareq(wq, mp);
3957 		break;
3958 
3959 	case DL_ENABMULTI_REQ:
3960 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3961 			tnf_string, msg, "Enable Multicast request"));
3962 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3963 		    (CE_NOTE, "Enable Multicast request"));
3964 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3965 		break;
3966 
3967 	case DL_DISABMULTI_REQ:
3968 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3969 			tnf_string, msg, "Disable Multicast request"));
3970 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3971 		    (CE_NOTE, "Disable Multicast request"));
3972 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3973 		break;
3974 
3975 	case DL_PROMISCON_REQ:
3976 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3977 			tnf_string, msg, "Promiscuous mode ON request"));
3978 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3979 		    (CE_NOTE, "Promiscuous mode ON request"));
3980 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3981 		break;
3982 
3983 	case DL_PROMISCOFF_REQ:
3984 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3985 			tnf_string, msg, "Promiscuous mode OFF request"));
3986 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3987 		    (CE_NOTE, "Promiscuous mode OFF request"));
3988 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3989 		break;
3990 
3991 	default:
3992 		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3993 			tnf_string, msg, "Unsupported request"));
3994 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3995 		break;
3996 	}
3997 	mutex_exit(&slp->sl_lock);
3998 }
3999 
4000 /*
4001  * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA
4002  * messages sometimes. Processing of M_PROTO and M_PCPROTO messages
4003  * require us to hold fcip's internal locks across (upstream) putnext
4004  * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock
4005  * when it calls putnext(). That thread could loop back around to call
4006  * fcip_wput and eventually fcip_init() to cause a recursive mutex panic
4007  *
4008  * M_DATA messages are enqueued only if we are out of xmit resources. Once
4009  * the transmit resources are available the service procedure is enabled
4010  * and an attempt is made to xmit all messages on the wq.
4011  */
4012 /* ARGSUSED */
4013 static int
4014 fcip_wsrv(queue_t *wq)
4015 {
4016 	mblk_t		*mp;
4017 	struct fcipstr	*slp;
4018 	struct fcip	*fptr;
4019 	struct fcip_dest *fdestp;
4020 	fcph_network_hdr_t *headerp;
4021 
4022 	slp = (struct fcipstr *)wq->q_ptr;
4023 	fptr = slp->sl_fcip;
4024 
4025 	FCIP_TNF_PROBE_2((fcip_wsrv, "fcip io", /* CSTYLED */,
4026 		tnf_string, msg, "enter",
4027 		tnf_opaque, wq, wq));
4028 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "fcip wsrv"));
4029 
4030 	while (mp = getq(wq)) {
4031 		switch (DB_TYPE(mp)) {
4032 		case M_DATA:
4033 			if (fptr && mp) {
4034 				headerp = (fcph_network_hdr_t *)mp->b_rptr;
4035 				fdestp = fcip_get_dest(fptr,
4036 				    &headerp->net_dest_addr);
4037 				if (fdestp == NULL) {
4038 					freemsg(mp);
4039 					goto done;
4040 				}
4041 				if (fcip_start(wq, mp, fptr, fdestp,
4042 				    KM_SLEEP)) {
4043 					goto done;
4044 				}
4045 			} else {
4046 				freemsg(mp);
4047 			}
4048 			break;
4049 
4050 		case M_PROTO:
4051 		case M_PCPROTO:
4052 			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4053 			    (CE_NOTE, "PROT msg in wsrv"));
4054 			fcip_proto(wq, mp);
4055 			break;
4056 		default:
4057 			break;
4058 		}
4059 	}
4060 done:
4061 	return (0);
4062 }
4063 
4064 
4065 /*
4066  * This routine is called from fcip_wsrv to send a message downstream
4067  * on the fibre towards its destination. This routine performs the
4068  * actual WWN to D_ID mapping by looking up the routing and destination
4069  * tables.
4070  */
4071 /* ARGSUSED */
4072 static int
4073 fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
4074     struct fcip_dest *fdestp, int flags)
4075 {
4076 	int			rval;
4077 	int			free;
4078 	fcip_pkt_t		*fcip_pkt;
4079 	fc_packet_t		*fc_pkt;
4080 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4081 	size_t			datalen;
4082 
4083 	FCIP_TNF_PROBE_4((fcip_start, "fcip io", /* CSTYLED */,
4084 	    tnf_string, msg, "enter", tnf_opaque, wq, wq,
4085 	    tnf_opaque, mp, mp,
4086 	    tnf_opaque, MP_DB_TYPE, DB_TYPE(mp)));
4087 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcipstart"));
4088 
4089 	ASSERT(fdestp != NULL);
4090 
4091 	/*
4092 	 * Only return if port has gone offline and not come back online
4093 	 * in a while
4094 	 */
4095 	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
4096 		freemsg(mp);
4097 		return (0);
4098 	}
4099 
4100 	/*
4101 	 * The message block coming in here already has the network and
4102 	 * llc_snap hdr stuffed in
4103 	 */
4104 	/*
4105 	 * Traditionally ethernet drivers at sun handle 3 cases here -
4106 	 * 1. messages with one mblk
4107 	 * 2. messages with 2 mblks
4108 	 * 3. messages with >2 mblks
4109 	 * For now lets handle all the 3 cases in a single case where we
4110 	 * put them together in one mblk that has all the data
4111 	 */
4112 
4113 	if (mp->b_cont != NULL) {
4114 		if (!pullupmsg(mp, -1)) {
4115 			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4116 			    (CE_WARN, "failed to concat message"));
4117 			freemsg(mp);
4118 			return (1);
4119 		}
4120 	}
4121 
4122 	datalen = msgsize(mp);
4123 
4124 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4125 	    "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx",
4126 	    datalen));
4127 
4128 	/*
4129 	 * We cannot have requests larger than FCIPMTU+Headers
4130 	 */
4131 	if (datalen > (FCIPMTU + sizeof (llc_snap_hdr_t) +
4132 		sizeof (fcph_network_hdr_t))) {
4133 		freemsg(mp);
4134 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4135 		    "fcip_pkt_alloc: datalen is larger than "
4136 		    "max possible size."));
4137 		return (1);
4138 	}
4139 
4140 	fcip_pkt = fcip_pkt_alloc(fptr, mp, flags, datalen);
4141 	if (fcip_pkt == NULL) {
4142 		(void) putbq(wq, mp);
4143 		return (1);
4144 	}
4145 
4146 	fcip_pkt->fcip_pkt_mp = mp;
4147 	fcip_pkt->fcip_pkt_wq = wq;
4148 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4149 
4150 	mutex_enter(&fdestp->fcipd_mutex);
4151 	/*
4152 	 * If the device dynamically disappeared, just fail the request.
4153 	 */
4154 	if (fdestp->fcipd_rtable == NULL) {
4155 		mutex_exit(&fdestp->fcipd_mutex);
4156 		fcip_pkt_free(fcip_pkt, 1);
4157 		return (1);
4158 	}
4159 
4160 	/*
4161 	 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet
4162 	 */
4163 
4164 	fc_pkt->pkt_pd = fdestp->fcipd_pd;
4165 
4166 	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
4167 	    fc_pkt, flags) != FC_SUCCESS) {
4168 		mutex_exit(&fdestp->fcipd_mutex);
4169 		fcip_pkt_free(fcip_pkt, 1);
4170 		return (1);
4171 	}
4172 
4173 	fcip_fdestp_enqueue_pkt(fdestp, fcip_pkt);
4174 	fcip_pkt->fcip_pkt_dest = fdestp;
4175 	fc_pkt->pkt_fca_device = fdestp->fcipd_fca_dev;
4176 
4177 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4178 	    "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x",
4179 	    fc_pkt->pkt_cmdlen, fc_pkt->pkt_rsplen, fc_pkt->pkt_datalen));
4180 
4181 	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
4182 	    fdestp->fcipd_did, fcip_pkt_callback);
4183 
4184 	fdestp->fcipd_ncmds++;
4185 
4186 	mutex_exit(&fdestp->fcipd_mutex);
4187 	if ((rval = fcip_transport(fcip_pkt)) == FC_SUCCESS) {
4188 		fptr->fcip_opackets++;
4189 		return (0);
4190 	}
4191 
4192 	free = (rval == FC_STATEC_BUSY || rval == FC_OFFLINE ||
4193 	    rval == FC_TRAN_BUSY) ? 0 : 1;
4194 
4195 	mutex_enter(&fdestp->fcipd_mutex);
4196 	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4197 
4198 	if (!rval) {
4199 		fcip_pkt = NULL;
4200 	} else {
4201 		fdestp->fcipd_ncmds--;
4202 	}
4203 	mutex_exit(&fdestp->fcipd_mutex);
4204 
4205 	if (fcip_pkt != NULL) {
4206 		fcip_pkt_free(fcip_pkt, free);
4207 	}
4208 
4209 	if (!free) {
4210 		(void) putbq(wq, mp);
4211 	}
4212 
4213 	return (1);
4214 }
4215 
4216 
4217 /*
4218  * This routine enqueus a packet marked to be issued to the
4219  * transport in the dest structure. This enables us to timeout any
4220  * request stuck with the FCA/transport for long periods of time
4221  * without a response. fcip_pkt_timeout will attempt to clean up
4222  * any packets hung in this state of limbo.
4223  */
4224 static void
4225 fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4226 {
4227 	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4228 	FCIP_TNF_PROBE_1((fcip_fdestp_enqueue_pkt, "fcip io", /* CSTYLED */,
4229 		tnf_string, msg, "destp enq pkt"));
4230 
4231 	/*
4232 	 * Just hang it off the head of packet list
4233 	 */
4234 	fcip_pkt->fcip_pkt_next = fdestp->fcipd_head;
4235 	fcip_pkt->fcip_pkt_prev = NULL;
4236 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
4237 
4238 	if (fdestp->fcipd_head != NULL) {
4239 		ASSERT(fdestp->fcipd_head->fcip_pkt_prev == NULL);
4240 		fdestp->fcipd_head->fcip_pkt_prev = fcip_pkt;
4241 	}
4242 
4243 	fdestp->fcipd_head = fcip_pkt;
4244 }
4245 
4246 
4247 /*
4248  * dequeues any packets after the transport/FCA tells us it has
4249  * been successfully sent on its way. Ofcourse it doesn't mean that
4250  * the packet will actually reach its destination but its atleast
4251  * a step closer in that direction
4252  */
4253 static int
4254 fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4255 {
4256 	fcip_pkt_t	*fcipd_pkt;
4257 
4258 	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4259 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4260 		fcipd_pkt = fdestp->fcipd_head;
4261 		while (fcipd_pkt) {
4262 			if (fcipd_pkt == fcip_pkt) {
4263 				fcip_pkt_t	*pptr = NULL;
4264 
4265 				if (fcipd_pkt == fdestp->fcipd_head) {
4266 					ASSERT(fcipd_pkt->fcip_pkt_prev ==
4267 					    NULL);
4268 					fdestp->fcipd_head =
4269 					    fcipd_pkt->fcip_pkt_next;
4270 				} else {
4271 					pptr = fcipd_pkt->fcip_pkt_prev;
4272 					ASSERT(pptr != NULL);
4273 					pptr->fcip_pkt_next =
4274 					    fcipd_pkt->fcip_pkt_next;
4275 				}
4276 				if (fcipd_pkt->fcip_pkt_next) {
4277 					pptr = fcipd_pkt->fcip_pkt_next;
4278 					pptr->fcip_pkt_prev =
4279 					    fcipd_pkt->fcip_pkt_prev;
4280 				}
4281 				fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4282 				break;
4283 			}
4284 			fcipd_pkt = fcipd_pkt->fcip_pkt_next;
4285 		}
4286 	} else {
4287 		if (fcip_pkt->fcip_pkt_prev == NULL) {
4288 			ASSERT(fdestp->fcipd_head == fcip_pkt);
4289 			fdestp->fcipd_head = fcip_pkt->fcip_pkt_next;
4290 		} else {
4291 			fcip_pkt->fcip_pkt_prev->fcip_pkt_next =
4292 			    fcip_pkt->fcip_pkt_next;
4293 		}
4294 
4295 		if (fcip_pkt->fcip_pkt_next) {
4296 			fcip_pkt->fcip_pkt_next->fcip_pkt_prev =
4297 			    fcip_pkt->fcip_pkt_prev;
4298 		}
4299 
4300 		fcipd_pkt = fcip_pkt;
4301 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4302 	}
4303 
4304 	return (fcipd_pkt == fcip_pkt);
4305 }
4306 
4307 /*
4308  * The transport routine - this is the routine that actually calls
4309  * into the FCA driver (through the transport ofcourse) to transmit a
4310  * datagram on the fibre. The dest struct assoicated with the port to
4311  * which the data is intended is already bound to the packet, this routine
4312  * only takes care of marking the packet a broadcast packet if it is
4313  * intended to be a broadcast request. This permits the transport to send
4314  * the packet down on the wire even if it doesn't have an entry for the
4315  * D_ID in its d_id hash tables.
4316  */
4317 static int
4318 fcip_transport(fcip_pkt_t *fcip_pkt)
4319 {
4320 	struct fcip		*fptr;
4321 	fc_packet_t		*fc_pkt;
4322 	fcip_port_info_t	*fport;
4323 	struct fcip_dest	*fdestp;
4324 	uint32_t		did;
4325 	int			rval = FC_FAILURE;
4326 	struct fcip_routing_table *frp = NULL;
4327 
4328 	FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4329 		tnf_string, msg, "enter"));
4330 
4331 	fptr = fcip_pkt->fcip_pkt_fptr;
4332 	fport = fptr->fcip_port_info;
4333 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4334 	fdestp = fcip_pkt->fcip_pkt_dest;
4335 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, "fcip_transport called"));
4336 
4337 	did = fptr->fcip_broadcast_did;
4338 	if (fc_pkt->pkt_cmd_fhdr.d_id == did &&
4339 	    fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) {
4340 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4341 		    (CE_NOTE, "trantype set to BROADCAST"));
4342 		fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
4343 	}
4344 
4345 	mutex_enter(&fptr->fcip_mutex);
4346 	if ((fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) &&
4347 	    (fc_pkt->pkt_pd == NULL)) {
4348 		mutex_exit(&fptr->fcip_mutex);
4349 		FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4350 		    tnf_string, msg, "fcip transport no pd"));
4351 		return (rval);
4352 	} else if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
4353 		mutex_exit(&fptr->fcip_mutex);
4354 		FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4355 		    tnf_string, msg, "fcip transport port offline"));
4356 		return (FC_TRAN_BUSY);
4357 	}
4358 	mutex_exit(&fptr->fcip_mutex);
4359 
4360 	if (fdestp) {
4361 		struct fcip_routing_table 	*frp;
4362 
4363 		frp = fdestp->fcipd_rtable;
4364 		mutex_enter(&fptr->fcip_rt_mutex);
4365 		mutex_enter(&fdestp->fcipd_mutex);
4366 		if (fc_pkt->pkt_pd != NULL) {
4367 			if ((frp == NULL) ||
4368 			    (frp && FCIP_RTE_UNAVAIL(frp->fcipr_state))) {
4369 				mutex_exit(&fdestp->fcipd_mutex);
4370 				mutex_exit(&fptr->fcip_rt_mutex);
4371 				if (frp &&
4372 				    (frp->fcipr_state == FCIP_RT_INVALID)) {
4373 					FCIP_TNF_PROBE_1((fcip_transport,
4374 					    "fcip io", /* CSTYLED */,
4375 					    tnf_string, msg,
4376 					    "fcip transport - TRANBUSY"));
4377 					return (FC_TRAN_BUSY);
4378 				} else {
4379 					FCIP_TNF_PROBE_1((fcip_transport,
4380 					    "fcip io", /* CSTYLED */,
4381 					    tnf_string, msg,
4382 					    "fcip transport: frp unavailable"));
4383 					return (rval);
4384 				}
4385 			}
4386 		}
4387 		mutex_exit(&fdestp->fcipd_mutex);
4388 		mutex_exit(&fptr->fcip_rt_mutex);
4389 		ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4390 	}
4391 
4392 	/* Explicitly invalidate this field till fcip decides to use it */
4393 	fc_pkt->pkt_ulp_rscn_infop = NULL;
4394 
4395 	rval = fc_ulp_transport(fport->fcipp_handle, fc_pkt);
4396 	if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
4397 		/*
4398 		 * Need to queue up the command for retry
4399 		 */
4400 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4401 		    (CE_WARN, "ulp_transport failed: 0x%x", rval));
4402 	} else if (rval == FC_LOGINREQ && (frp != NULL)) {
4403 		(void) fcip_do_plogi(fptr, frp);
4404 	} else if (rval == FC_BADPACKET && (frp != NULL)) {
4405 		/*
4406 		 * There is a distinct possiblity in our scheme of things
4407 		 * that we have a routing table entry with a NULL pd struct.
4408 		 * Mark the routing table entry for removal if it is not a
4409 		 * broadcast entry
4410 		 */
4411 		if ((frp->fcipr_d_id.port_id != 0x0) &&
4412 		    (frp->fcipr_d_id.port_id != 0xffffff)) {
4413 			mutex_enter(&fptr->fcip_rt_mutex);
4414 			frp->fcipr_pd = NULL;
4415 			frp->fcipr_state = PORT_DEVICE_INVALID;
4416 			mutex_exit(&fptr->fcip_rt_mutex);
4417 		}
4418 	}
4419 
4420 	FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4421 	    tnf_string, msg, "fcip transport done"));
4422 	return (rval);
4423 }
4424 
4425 /*
4426  * Call back routine. Called by the FCA/transport when the messages
4427  * has been put onto the wire towards its intended destination. We can
4428  * now free the fc_packet associated with the message
4429  */
4430 static void
4431 fcip_pkt_callback(fc_packet_t *fc_pkt)
4432 {
4433 	int			rval;
4434 	fcip_pkt_t		*fcip_pkt;
4435 	struct fcip_dest	*fdestp;
4436 
4437 	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
4438 	fdestp = fcip_pkt->fcip_pkt_dest;
4439 
4440 	/*
4441 	 * take the lock early so that we don't have a race condition
4442 	 * with fcip_timeout
4443 	 *
4444 	 * fdestp->fcipd_mutex isn't really intended to lock per
4445 	 * packet struct - see bug 5105592 for permanent solution
4446 	 */
4447 	mutex_enter(&fdestp->fcipd_mutex);
4448 
4449 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_RETURNED;
4450 	fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
4451 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4452 		mutex_exit(&fdestp->fcipd_mutex);
4453 		return;
4454 	}
4455 
4456 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback"));
4457 
4458 	ASSERT(fdestp->fcipd_rtable != NULL);
4459 	ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4460 	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4461 	fdestp->fcipd_ncmds--;
4462 	mutex_exit(&fdestp->fcipd_mutex);
4463 
4464 	if (rval) {
4465 		fcip_pkt_free(fcip_pkt, 1);
4466 	}
4467 
4468 	FCIP_TNF_PROBE_1((fcip_pkt_callback, "fcip io", /* CSTYLED */,
4469 		tnf_string, msg, "pkt callback done"));
4470 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback done"));
4471 }
4472 
4473 /*
4474  * Return 1 if the topology is supported, else return 0.
4475  * Topology support is consistent with what the whole
4476  * stack supports together.
4477  */
4478 static int
4479 fcip_is_supported_fc_topology(int fc_topology)
4480 {
4481 	switch (fc_topology) {
4482 
4483 	case FC_TOP_PRIVATE_LOOP :
4484 	case FC_TOP_PUBLIC_LOOP :
4485 	case FC_TOP_FABRIC :
4486 	case FC_TOP_NO_NS :
4487 		return (1);
4488 	default :
4489 		return (0);
4490 	}
4491 }
4492 
4493 /*
4494  * handle any topology specific initializations here
4495  * this routine must be called while holding fcip_mutex
4496  */
4497 /* ARGSUSED */
4498 static void
4499 fcip_handle_topology(struct fcip *fptr)
4500 {
4501 
4502 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4503 
4504 	ASSERT(mutex_owned(&fptr->fcip_mutex));
4505 
4506 	/*
4507 	 * Since we know the port's topology - handle topology
4508 	 * specific details here. In Point to Point and Private Loop
4509 	 * topologies - we would probably not have a name server
4510 	 */
4511 
4512 	FCIP_TNF_PROBE_3((fcip_handle_topology, "fcip io", /* CSTYLED */,
4513 		tnf_string, msg, "enter",
4514 		tnf_uint, port_state, fport->fcipp_pstate,
4515 		tnf_uint, topology, fport->fcipp_topology));
4516 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "port state: %x, topology %x",
4517 		fport->fcipp_pstate, fport->fcipp_topology));
4518 
4519 	fptr->fcip_broadcast_did = fcip_get_broadcast_did(fptr);
4520 	mutex_exit(&fptr->fcip_mutex);
4521 	(void) fcip_dest_add_broadcast_entry(fptr, 0);
4522 	mutex_enter(&fptr->fcip_mutex);
4523 
4524 	if (!fcip_is_supported_fc_topology(fport->fcipp_topology)) {
4525 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4526 		    (CE_WARN, "fcip(0x%x): Unsupported port topology (0x%x)",
4527 		    fptr->fcip_instance, fport->fcipp_topology));
4528 		return;
4529 	}
4530 
4531 	switch (fport->fcipp_topology) {
4532 	case FC_TOP_PRIVATE_LOOP: {
4533 
4534 		fc_portmap_t		*port_map;
4535 		uint32_t		listlen, alloclen;
4536 		/*
4537 		 * we may have to maintain routing. Get a list of
4538 		 * all devices on this port that the transport layer is
4539 		 * aware of. Check if any of them is a IS8802 type port,
4540 		 * if yes get its WWN and DID mapping and cache it in
4541 		 * the purport routing table. Since there is no
4542 		 * State Change notification for private loop/point_point
4543 		 * topologies - this table may not be accurate. The static
4544 		 * routing table is updated on a state change callback.
4545 		 */
4546 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "port state valid!!"));
4547 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4548 		listlen = alloclen = FCIP_MAX_PORTS;
4549 		port_map = (fc_portmap_t *)
4550 		    kmem_zalloc((FCIP_MAX_PORTS * sizeof (fc_portmap_t)),
4551 		    KM_SLEEP);
4552 		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4553 		    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4554 			mutex_exit(&fptr->fcip_mutex);
4555 			fcip_rt_update(fptr, port_map, listlen);
4556 			mutex_enter(&fptr->fcip_mutex);
4557 		}
4558 		if (listlen > alloclen) {
4559 			alloclen = listlen;
4560 		}
4561 		kmem_free(port_map, (alloclen * sizeof (fc_portmap_t)));
4562 		/*
4563 		 * Now fall through and register with the transport
4564 		 * that this port is IP capable
4565 		 */
4566 	}
4567 	/* FALLTHROUGH */
4568 	case FC_TOP_NO_NS:
4569 		/*
4570 		 * If we don't have a nameserver, lets wait until we
4571 		 * have to send out a packet to a remote port and then
4572 		 * try and discover the port using ARP/FARP.
4573 		 */
4574 	/* FALLTHROUGH */
4575 	case FC_TOP_PUBLIC_LOOP:
4576 	case FC_TOP_FABRIC: {
4577 		fc_portmap_t	*port_map;
4578 		uint32_t	listlen, alloclen;
4579 
4580 		/* FC_TYPE of 0x05 goes to word 0, LSB */
4581 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4582 
4583 		if (!(fptr->fcip_flags & FCIP_REG_INPROGRESS)) {
4584 			fptr->fcip_flags |= FCIP_REG_INPROGRESS;
4585 			if (taskq_dispatch(fptr->fcip_tq, fcip_port_ns,
4586 			    fptr, KM_NOSLEEP) == TASKQID_INVALID) {
4587 				fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4588 			}
4589 		}
4590 
4591 		/*
4592 		 * If fcip_create_nodes_on_demand is overridden to force
4593 		 * discovery of all nodes in Fabric/Public loop topologies
4594 		 * we need to query for and obtain all nodes and log into
4595 		 * them as with private loop devices
4596 		 */
4597 		if (!fcip_create_nodes_on_demand) {
4598 			fptr->fcip_port_state = FCIP_PORT_ONLINE;
4599 			listlen = alloclen = FCIP_MAX_PORTS;
4600 			port_map = (fc_portmap_t *)
4601 			    kmem_zalloc((FCIP_MAX_PORTS *
4602 			    sizeof (fc_portmap_t)), KM_SLEEP);
4603 			if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4604 			    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4605 				mutex_exit(&fptr->fcip_mutex);
4606 				fcip_rt_update(fptr, port_map, listlen);
4607 				mutex_enter(&fptr->fcip_mutex);
4608 			}
4609 			if (listlen > alloclen) {
4610 				alloclen = listlen;
4611 			}
4612 			kmem_free(port_map,
4613 			    (alloclen * sizeof (fc_portmap_t)));
4614 		}
4615 		break;
4616 	}
4617 
4618 	default:
4619 		break;
4620 	}
4621 }
4622 
4623 static void
4624 fcip_port_ns(void *arg)
4625 {
4626 	struct	fcip		*fptr = (struct fcip *)arg;
4627 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4628 	fc_ns_cmd_t		ns_cmd;
4629 	uint32_t		types[8];
4630 	ns_rfc_type_t		rfc;
4631 
4632 	mutex_enter(&fptr->fcip_mutex);
4633 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
4634 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
4635 		fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4636 		mutex_exit(&fptr->fcip_mutex);
4637 		return;
4638 	}
4639 	mutex_exit(&fptr->fcip_mutex);
4640 
4641 	/*
4642 	 * Prepare the Name server structure to
4643 	 * register with the transport in case of
4644 	 * Fabric configuration.
4645 	 */
4646 	bzero(&rfc, sizeof (rfc));
4647 	bzero(types, sizeof (types));
4648 
4649 	types[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP)] = (1 <<
4650 	    FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP));
4651 
4652 	rfc.rfc_port_id.port_id = fport->fcipp_sid.port_id;
4653 	bcopy(types, rfc.rfc_types, sizeof (types));
4654 
4655 	ns_cmd.ns_flags = 0;
4656 	ns_cmd.ns_cmd = NS_RFT_ID;
4657 	ns_cmd.ns_req_len = sizeof (rfc);
4658 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
4659 	ns_cmd.ns_resp_len = 0;
4660 	ns_cmd.ns_resp_payload = NULL;
4661 
4662 	/*
4663 	 * Perform the Name Server Registration for FC IS8802_SNAP Type.
4664 	 * We don't expect a reply for registering port type
4665 	 */
4666 	(void) fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
4667 		(opaque_t)0, &ns_cmd);
4668 
4669 	mutex_enter(&fptr->fcip_mutex);
4670 	fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4671 	mutex_exit(&fptr->fcip_mutex);
4672 }
4673 
4674 /*
4675  * setup this instance of fcip. This routine inits kstats, allocates
4676  * unsolicited buffers, determines' this port's siblings and handles
4677  * topology specific details which includes registering with the name
4678  * server and also setting up the routing table for this port for
4679  * private loops and point to point topologies
4680  */
4681 static int
4682 fcip_init_port(struct fcip *fptr)
4683 {
4684 	int rval = FC_SUCCESS;
4685 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4686 	static char buf[64];
4687 	size_t	tok_buf_size = 0;
4688 
4689 	ASSERT(fport != NULL);
4690 
4691 	FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4692 		tnf_string, msg, "enter"));
4693 	mutex_enter(&fptr->fcip_mutex);
4694 
4695 	/*
4696 	 * setup mac address for this port. Don't be too worried if
4697 	 * the WWN is zero, there is probably nothing attached to
4698 	 * to the port. There is no point allocating unsolicited buffers
4699 	 * for an unused port so return success if we don't have a MAC
4700 	 * address. Do the port init on a state change notification.
4701 	 */
4702 	if (fcip_setup_mac_addr(fptr) == FCIP_INVALID_WWN) {
4703 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4704 		rval = FC_SUCCESS;
4705 		goto done;
4706 	}
4707 
4708 	/*
4709 	 * clear routing table hash list for this port
4710 	 */
4711 	fcip_rt_flush(fptr);
4712 
4713 	/*
4714 	 * init kstats for this instance
4715 	 */
4716 	fcip_kstat_init(fptr);
4717 
4718 	/*
4719 	 * Allocate unsolicited buffers
4720 	 */
4721 	fptr->fcip_ub_nbufs = fcip_ub_nbufs;
4722 	tok_buf_size = sizeof (*fptr->fcip_ub_tokens) * fcip_ub_nbufs;
4723 
4724 	FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4725 		tnf_string, msg, "debug",
4726 		tnf_int, tokBufsize, tok_buf_size));
4727 
4728 	FCIP_DEBUG(FCIP_DEBUG_INIT,
4729 	    (CE_WARN, "tokBufsize: 0x%lx", tok_buf_size));
4730 
4731 	fptr->fcip_ub_tokens = kmem_zalloc(tok_buf_size, KM_SLEEP);
4732 
4733 	if (fptr->fcip_ub_tokens == NULL) {
4734 		rval = FC_FAILURE;
4735 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4736 		    (CE_WARN, "fcip(%d): failed to allocate unsol buf",
4737 		    fptr->fcip_instance));
4738 		goto done;
4739 	}
4740 	rval = fc_ulp_uballoc(fport->fcipp_handle, &fptr->fcip_ub_nbufs,
4741 		fcip_ub_size, FC_TYPE_IS8802_SNAP, fptr->fcip_ub_tokens);
4742 
4743 	if (rval != FC_SUCCESS) {
4744 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4745 		    (CE_WARN, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!",
4746 		    fptr->fcip_instance, rval));
4747 	}
4748 
4749 	switch (rval) {
4750 	case FC_SUCCESS:
4751 		break;
4752 
4753 	case FC_OFFLINE:
4754 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4755 		rval = FC_FAILURE;
4756 		goto done;
4757 
4758 	case FC_UB_ERROR:
4759 		FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4760 			tnf_string, msg, "invalid ub alloc request"));
4761 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4762 		    (CE_WARN, "invalid ub alloc request !!"));
4763 		rval = FC_FAILURE;
4764 		goto done;
4765 
4766 	case FC_FAILURE:
4767 		/*
4768 		 * requested bytes could not be alloced
4769 		 */
4770 		if (fptr->fcip_ub_nbufs != fcip_ub_nbufs) {
4771 			cmn_err(CE_WARN,
4772 			    "!fcip(0x%x): Failed to alloc unsolicited bufs",
4773 			    ddi_get_instance(fport->fcipp_dip));
4774 			rval = FC_FAILURE;
4775 			goto done;
4776 		}
4777 		break;
4778 
4779 	default:
4780 		rval = FC_FAILURE;
4781 		break;
4782 	}
4783 
4784 	/*
4785 	 * Preallocate a Cache of fcip packets for transmit and receive
4786 	 * We don't want to be holding on to unsolicited buffers while
4787 	 * we transmit the message upstream
4788 	 */
4789 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "allocating fcip_pkt cache"));
4790 
4791 	(void) sprintf(buf, "fcip%d_cache", fptr->fcip_instance);
4792 	fptr->fcip_xmit_cache = kmem_cache_create(buf,
4793 		(fport->fcipp_fca_pkt_size + sizeof (fcip_pkt_t)),
4794 		8, fcip_cache_constructor, fcip_cache_destructor,
4795 		NULL, (void *)fport, NULL, 0);
4796 
4797 	(void) sprintf(buf, "fcip%d_sendup_cache", fptr->fcip_instance);
4798 	fptr->fcip_sendup_cache = kmem_cache_create(buf,
4799 		sizeof (struct fcip_sendup_elem),
4800 		8, fcip_sendup_constructor, NULL, NULL, (void *)fport, NULL, 0);
4801 
4802 	if (fptr->fcip_xmit_cache == NULL) {
4803 		FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4804 			tnf_string, msg, "unable to allocate xmit cache",
4805 			tnf_int, instance, fptr->fcip_instance));
4806 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4807 		    (CE_WARN, "fcip%d unable to allocate xmit cache",
4808 		    fptr->fcip_instance));
4809 		rval = FC_FAILURE;
4810 		goto done;
4811 	}
4812 
4813 	/*
4814 	 * We may need to handle routing tables for point to point and
4815 	 * fcal topologies and register with NameServer for Fabric
4816 	 * topologies.
4817 	 */
4818 	fcip_handle_topology(fptr);
4819 	mutex_exit(&fptr->fcip_mutex);
4820 	if (fcip_dest_add_broadcast_entry(fptr, 1) != FC_SUCCESS) {
4821 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4822 		    (CE_WARN, "fcip(0x%x):add broadcast entry failed!!",
4823 		    fptr->fcip_instance));
4824 		mutex_enter(&fptr->fcip_mutex);
4825 		rval = FC_FAILURE;
4826 		goto done;
4827 	}
4828 
4829 	rval = FC_SUCCESS;
4830 	return (rval);
4831 
4832 done:
4833 	/*
4834 	 * we don't always come here from port_attach - so cleanup
4835 	 * anything done in the init_port routine
4836 	 */
4837 	if (fptr->fcip_kstatp) {
4838 		kstat_delete(fptr->fcip_kstatp);
4839 		fptr->fcip_kstatp = NULL;
4840 	}
4841 
4842 	if (fptr->fcip_xmit_cache) {
4843 		kmem_cache_destroy(fptr->fcip_xmit_cache);
4844 		fptr->fcip_xmit_cache = NULL;
4845 	}
4846 
4847 	if (fptr->fcip_sendup_cache) {
4848 		kmem_cache_destroy(fptr->fcip_sendup_cache);
4849 		fptr->fcip_sendup_cache = NULL;
4850 	}
4851 
4852 	/* release unsolicited buffers */
4853 	if (fptr->fcip_ub_tokens) {
4854 		uint64_t	*tokens = fptr->fcip_ub_tokens;
4855 		fptr->fcip_ub_tokens = NULL;
4856 
4857 		mutex_exit(&fptr->fcip_mutex);
4858 		(void) fc_ulp_ubfree(fport->fcipp_handle, fptr->fcip_ub_nbufs,
4859 			tokens);
4860 		kmem_free(tokens, tok_buf_size);
4861 
4862 	} else {
4863 		mutex_exit(&fptr->fcip_mutex);
4864 	}
4865 
4866 	return (rval);
4867 }
4868 
4869 /*
4870  * Sets up a port's MAC address from its WWN
4871  */
4872 static int
4873 fcip_setup_mac_addr(struct fcip *fptr)
4874 {
4875 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4876 
4877 	ASSERT(mutex_owned(&fptr->fcip_mutex));
4878 
4879 	fptr->fcip_addrflags = 0;
4880 
4881 	/*
4882 	 * we cannot choose a MAC address for our interface - we have
4883 	 * to live with whatever node WWN we get (minus the top two
4884 	 * MSbytes for the MAC address) from the transport layer. We will
4885 	 * treat the WWN as our factory MAC address.
4886 	 */
4887 
4888 	if ((fport->fcipp_nwwn.w.wwn_hi != 0) ||
4889 	    (fport->fcipp_nwwn.w.wwn_lo != 0)) {
4890 		char		etherstr[ETHERSTRL];
4891 
4892 		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
4893 		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
4894 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4895 		    (CE_NOTE, "setupmacaddr ouraddr %s", etherstr));
4896 
4897 		fptr->fcip_addrflags = (FCIP_FACTADDR_PRESENT |
4898 						FCIP_FACTADDR_USE);
4899 	} else {
4900 		/*
4901 		 * No WWN - just return failure - there's not much
4902 		 * we can do since we cannot set the WWN.
4903 		 */
4904 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4905 		    (CE_WARN, "Port does not have a valid WWN"));
4906 		return (FCIP_INVALID_WWN);
4907 	}
4908 	return (FC_SUCCESS);
4909 }
4910 
4911 
4912 /*
4913  * flush routing table entries
4914  */
4915 static void
4916 fcip_rt_flush(struct fcip *fptr)
4917 {
4918 	int index;
4919 
4920 	mutex_enter(&fptr->fcip_rt_mutex);
4921 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
4922 		struct fcip_routing_table 	*frtp, *frtp_next;
4923 		frtp = fptr->fcip_rtable[index];
4924 		while (frtp) {
4925 			frtp_next = frtp->fcipr_next;
4926 			kmem_free(frtp, sizeof (struct fcip_routing_table));
4927 			frtp = frtp_next;
4928 		}
4929 		fptr->fcip_rtable[index] = NULL;
4930 	}
4931 	mutex_exit(&fptr->fcip_rt_mutex);
4932 }
4933 
4934 /*
4935  * Free up the fcip softstate and all allocated resources for the
4936  * fcip instance assoicated with a given port driver instance
4937  *
4938  * Given that the list of structures pointed to by fcip_port_head,
4939  * this function is called from multiple sources, and the
4940  * fcip_global_mutex that protects fcip_port_head must be dropped,
4941  * our best solution is to return a value that indicates the next
4942  * port in the list.  This way the caller doesn't need to worry
4943  * about the race condition where it saves off a pointer to the
4944  * next structure in the list and by the time this routine returns,
4945  * that next structure has already been freed.
4946  */
4947 static fcip_port_info_t *
4948 fcip_softstate_free(fcip_port_info_t *fport)
4949 {
4950 	struct fcip		*fptr = NULL;
4951 	int 			instance;
4952 	timeout_id_t		tid;
4953 	opaque_t		phandle = NULL;
4954 	fcip_port_info_t	*prev_fport, *cur_fport, *next_fport = NULL;
4955 
4956 	ASSERT(MUTEX_HELD(&fcip_global_mutex));
4957 
4958 	if (fport) {
4959 		phandle = fport->fcipp_handle;
4960 		fptr = fport->fcipp_fcip;
4961 	} else {
4962 		return (next_fport);
4963 	}
4964 
4965 	if (fptr) {
4966 		mutex_enter(&fptr->fcip_mutex);
4967 		instance = ddi_get_instance(fptr->fcip_dip);
4968 
4969 		/*
4970 		 * dismantle timeout thread for this instance of fcip
4971 		 */
4972 		tid = fptr->fcip_timeout_id;
4973 		fptr->fcip_timeout_id = NULL;
4974 
4975 		mutex_exit(&fptr->fcip_mutex);
4976 		(void) untimeout(tid);
4977 		mutex_enter(&fptr->fcip_mutex);
4978 
4979 		ASSERT(fcip_num_instances >= 0);
4980 		fcip_num_instances--;
4981 
4982 		/*
4983 		 * stop sendup thread
4984 		 */
4985 		mutex_enter(&fptr->fcip_sendup_mutex);
4986 		if (fptr->fcip_sendup_thr_initted) {
4987 			fptr->fcip_sendup_thr_initted = 0;
4988 			cv_signal(&fptr->fcip_sendup_cv);
4989 			cv_wait(&fptr->fcip_sendup_cv,
4990 			    &fptr->fcip_sendup_mutex);
4991 		}
4992 		ASSERT(fptr->fcip_sendup_head == NULL);
4993 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
4994 		mutex_exit(&fptr->fcip_sendup_mutex);
4995 
4996 		/*
4997 		 * dismantle taskq
4998 		 */
4999 		if (fptr->fcip_tq) {
5000 			taskq_t	*tq = fptr->fcip_tq;
5001 
5002 			fptr->fcip_tq = NULL;
5003 
5004 			mutex_exit(&fptr->fcip_mutex);
5005 			taskq_destroy(tq);
5006 			mutex_enter(&fptr->fcip_mutex);
5007 		}
5008 
5009 		if (fptr->fcip_kstatp) {
5010 			kstat_delete(fptr->fcip_kstatp);
5011 			fptr->fcip_kstatp = NULL;
5012 		}
5013 
5014 		/* flush the routing table entries */
5015 		fcip_rt_flush(fptr);
5016 
5017 		if (fptr->fcip_xmit_cache) {
5018 			kmem_cache_destroy(fptr->fcip_xmit_cache);
5019 			fptr->fcip_xmit_cache = NULL;
5020 		}
5021 
5022 		if (fptr->fcip_sendup_cache) {
5023 			kmem_cache_destroy(fptr->fcip_sendup_cache);
5024 			fptr->fcip_sendup_cache = NULL;
5025 		}
5026 
5027 		fcip_cleanup_dest(fptr);
5028 
5029 		/* release unsolicited buffers */
5030 		if (fptr->fcip_ub_tokens) {
5031 			uint64_t	*tokens = fptr->fcip_ub_tokens;
5032 
5033 			fptr->fcip_ub_tokens = NULL;
5034 			mutex_exit(&fptr->fcip_mutex);
5035 			if (phandle) {
5036 				/*
5037 				 * release the global mutex here to
5038 				 * permit any data pending callbacks to
5039 				 * complete. Else we will deadlock in the
5040 				 * FCA waiting for all unsol buffers to be
5041 				 * returned.
5042 				 */
5043 				mutex_exit(&fcip_global_mutex);
5044 				(void) fc_ulp_ubfree(phandle,
5045 				    fptr->fcip_ub_nbufs, tokens);
5046 				mutex_enter(&fcip_global_mutex);
5047 			}
5048 			kmem_free(tokens, (sizeof (*tokens) * fcip_ub_nbufs));
5049 		} else {
5050 			mutex_exit(&fptr->fcip_mutex);
5051 		}
5052 
5053 		mutex_destroy(&fptr->fcip_mutex);
5054 		mutex_destroy(&fptr->fcip_ub_mutex);
5055 		mutex_destroy(&fptr->fcip_rt_mutex);
5056 		mutex_destroy(&fptr->fcip_dest_mutex);
5057 		mutex_destroy(&fptr->fcip_sendup_mutex);
5058 		cv_destroy(&fptr->fcip_farp_cv);
5059 		cv_destroy(&fptr->fcip_sendup_cv);
5060 		cv_destroy(&fptr->fcip_ub_cv);
5061 
5062 		ddi_soft_state_free(fcip_softp, instance);
5063 	}
5064 
5065 	/*
5066 	 * Now dequeue the fcip_port_info from the port list
5067 	 */
5068 	cur_fport = fcip_port_head;
5069 	prev_fport = NULL;
5070 	while (cur_fport != NULL) {
5071 		if (cur_fport == fport) {
5072 			break;
5073 		}
5074 		prev_fport = cur_fport;
5075 		cur_fport = cur_fport->fcipp_next;
5076 	}
5077 
5078 	/*
5079 	 * Assert that we found a port in our port list
5080 	 */
5081 	ASSERT(cur_fport == fport);
5082 
5083 	if (prev_fport) {
5084 		/*
5085 		 * Not the first port in the port list
5086 		 */
5087 		prev_fport->fcipp_next = fport->fcipp_next;
5088 	} else {
5089 		/*
5090 		 * first port
5091 		 */
5092 		fcip_port_head = fport->fcipp_next;
5093 	}
5094 	next_fport = fport->fcipp_next;
5095 	kmem_free(fport, sizeof (fcip_port_info_t));
5096 
5097 	return (next_fport);
5098 }
5099 
5100 
5101 /*
5102  * This is called by transport for any ioctl operations performed
5103  * on the devctl or other transport minor nodes. It is currently
5104  * unused for fcip
5105  */
5106 /* ARGSUSED */
5107 static int
5108 fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle, dev_t dev,
5109 	int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
5110 	uint32_t claimed)
5111 {
5112 	return (FC_UNCLAIMED);
5113 }
5114 
5115 /*
5116  * DL_INFO_REQ - returns information about the DLPI stream to the DLS user
5117  * requesting information about this interface
5118  */
5119 static void
5120 fcip_ireq(queue_t *wq, mblk_t *mp)
5121 {
5122 	struct fcipstr		*slp;
5123 	struct fcip		*fptr;
5124 	dl_info_ack_t		*dlip;
5125 	struct fcipdladdr	*dlap;
5126 	la_wwn_t		*ep;
5127 	int 			size;
5128 	char			etherstr[ETHERSTRL];
5129 
5130 	slp = (struct fcipstr *)wq->q_ptr;
5131 
5132 	fptr = slp->sl_fcip;
5133 
5134 	FCIP_DEBUG(FCIP_DEBUG_DLPI,
5135 	    (CE_NOTE, "fcip_ireq: info request req rcvd"));
5136 
5137 	FCIP_TNF_PROBE_1((fcip_ireq, "fcip io", /* CSTYLED */,
5138 	    tnf_string, msg, "fcip ireq entered"));
5139 
5140 	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
5141 		dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
5142 		return;
5143 	}
5144 
5145 	/*
5146 	 * Exchange current message for a DL_INFO_ACK
5147 	 */
5148 	size = sizeof (dl_info_ack_t) + FCIPADDRL + ETHERADDRL;
5149 	if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) {
5150 		return;
5151 	}
5152 
5153 	/*
5154 	 * FILL in the DL_INFO_ACK fields and reply
5155 	 */
5156 	dlip = (dl_info_ack_t *)mp->b_rptr;
5157 	*dlip = fcip_infoack;
5158 	dlip->dl_current_state = slp->sl_state;
5159 	dlap = (struct fcipdladdr *)(mp->b_rptr + dlip->dl_addr_offset);
5160 	dlap->dl_sap = slp->sl_sap;
5161 
5162 
5163 	if (fptr) {
5164 		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
5165 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5166 		    (CE_NOTE, "ireq - our mac: %s", etherstr));
5167 		ether_bcopy(&fptr->fcip_macaddr, &dlap->dl_phys);
5168 	} else {
5169 		bzero((caddr_t)&dlap->dl_phys, ETHERADDRL);
5170 	}
5171 
5172 	ep = (la_wwn_t *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
5173 	ether_bcopy(&fcip_arpbroadcast_addr, ep);
5174 
5175 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "sending back info req.."));
5176 	qreply(wq, mp);
5177 }
5178 
5179 
5180 /*
5181  * To handle DL_UNITDATA_REQ requests.
5182  */
5183 
5184 static void
5185 fcip_udreq(queue_t *wq, mblk_t *mp)
5186 {
5187 	struct fcipstr		*slp;
5188 	struct fcip		*fptr;
5189 	fcip_port_info_t	*fport;
5190 	dl_unitdata_req_t	*dludp;
5191 	mblk_t			*nmp;
5192 	struct fcipdladdr	*dlap;
5193 	fcph_network_hdr_t 	*headerp;
5194 	llc_snap_hdr_t		*lsnap;
5195 	t_uscalar_t		off, len;
5196 	struct fcip_dest	*fdestp;
5197 	la_wwn_t		wwn;
5198 	int			hdr_size;
5199 
5200 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "inside fcip_udreq"));
5201 
5202 	FCIP_TNF_PROBE_1((fcip_udreq, "fcip io", /* CSTYLED */,
5203 	    tnf_string, msg, "fcip udreq entered"));
5204 
5205 	slp = (struct fcipstr *)wq->q_ptr;
5206 
5207 	if (slp->sl_state != DL_IDLE) {
5208 		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5209 		return;
5210 	}
5211 
5212 	fptr = slp->sl_fcip;
5213 
5214 	if (fptr == NULL) {
5215 		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5216 		return;
5217 	}
5218 
5219 	fport = fptr->fcip_port_info;
5220 
5221 	dludp = (dl_unitdata_req_t *)mp->b_rptr;
5222 	off = dludp->dl_dest_addr_offset;
5223 	len = dludp->dl_dest_addr_length;
5224 
5225 	/*
5226 	 * Validate destination address format
5227 	 */
5228 	if (!MBLKIN(mp, off, len) || (len != FCIPADDRL)) {
5229 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADADDR, 0);
5230 		return;
5231 	}
5232 
5233 	/*
5234 	 * Error if no M_DATA follows
5235 	 */
5236 	nmp = mp->b_cont;
5237 	if (nmp == NULL) {
5238 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5239 		return;
5240 	}
5241 	dlap = (struct fcipdladdr *)(mp->b_rptr + off);
5242 
5243 	/*
5244 	 * Now get the destination structure for the remote NPORT
5245 	 */
5246 	ether_to_wwn(&dlap->dl_phys, &wwn);
5247 	fdestp = fcip_get_dest(fptr, &wwn);
5248 
5249 	if (fdestp == NULL) {
5250 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE,
5251 		    "udreq - couldn't find dest struct for remote port"));
5252 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5253 		return;
5254 	}
5255 
5256 	/*
5257 	 * Network header + SAP
5258 	 */
5259 	hdr_size = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
5260 
5261 	/* DB_REF gives the no. of msgs pointing to this block */
5262 	if ((DB_REF(nmp) == 1) &&
5263 	    (MBLKHEAD(nmp) >= hdr_size) &&
5264 	    (((uintptr_t)mp->b_rptr & 0x1) == 0)) {
5265 		la_wwn_t wwn;
5266 		nmp->b_rptr -= hdr_size;
5267 
5268 		/* first put the network header */
5269 		headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5270 		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5271 			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5272 		} else {
5273 			ether_to_wwn(&dlap->dl_phys, &wwn);
5274 		}
5275 		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5276 		ether_to_wwn(&fptr->fcip_macaddr, &wwn);
5277 		bcopy(&wwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5278 
5279 		/* Now the snap header */
5280 		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5281 		    sizeof (fcph_network_hdr_t));
5282 		lsnap->dsap = 0xAA;
5283 		lsnap->ssap = 0xAA;
5284 		lsnap->ctrl = 0x03;
5285 		lsnap->oui[0] = 0x00;
5286 		lsnap->oui[1] = 0x00; 	/* 80 */
5287 		lsnap->oui[2] = 0x00;	/* C2 */
5288 		lsnap->pid = BE_16((dlap->dl_sap));
5289 
5290 		freeb(mp);
5291 		mp = nmp;
5292 
5293 	} else {
5294 		la_wwn_t wwn;
5295 
5296 		DB_TYPE(mp) = M_DATA;
5297 		headerp = (fcph_network_hdr_t *)mp->b_rptr;
5298 
5299 		/*
5300 		 * Only fill in the low 48bits of WWN for now - we can
5301 		 * fill in the NAA_ID after we find the port in the
5302 		 * routing tables
5303 		 */
5304 		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5305 			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5306 		} else {
5307 			ether_to_wwn(&dlap->dl_phys, &wwn);
5308 		}
5309 		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5310 		/* need to send our PWWN */
5311 		bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr,
5312 		    sizeof (la_wwn_t));
5313 
5314 		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5315 		    sizeof (fcph_network_hdr_t));
5316 		lsnap->dsap = 0xAA;
5317 		lsnap->ssap = 0xAA;
5318 		lsnap->ctrl = 0x03;
5319 		lsnap->oui[0] = 0x00;
5320 		lsnap->oui[1] = 0x00;
5321 		lsnap->oui[2] = 0x00;
5322 		lsnap->pid = BE_16(dlap->dl_sap);
5323 
5324 		mp->b_wptr = mp->b_rptr + hdr_size;
5325 	}
5326 
5327 	/*
5328 	 * Ethernet drivers have a lot of gunk here to put the Type
5329 	 * information (for Ethernet encapsulation (RFC 894) or the
5330 	 * Length (for 802.2/802.3) - I guess we'll just ignore that
5331 	 * here.
5332 	 */
5333 
5334 	/*
5335 	 * Start the I/O on this port. If fcip_start failed for some reason
5336 	 * we call putbq in fcip_start so we don't need to check the
5337 	 * return value from fcip_start
5338 	 */
5339 	(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
5340 }
5341 
5342 /*
5343  * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed
5344  * for style 2 DLS providers to identify the physical medium through which
5345  * the streams communication will happen
5346  */
5347 static void
5348 fcip_areq(queue_t *wq, mblk_t *mp)
5349 {
5350 	struct fcipstr		*slp;
5351 	union DL_primitives	*dlp;
5352 	fcip_port_info_t	*fport;
5353 	struct fcip		*fptr;
5354 	int			ppa;
5355 
5356 	slp = (struct fcipstr *)wq->q_ptr;
5357 	dlp = (union DL_primitives *)mp->b_rptr;
5358 
5359 	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
5360 		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
5361 		return;
5362 	}
5363 
5364 	if (slp->sl_state != DL_UNATTACHED) {
5365 		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
5366 		return;
5367 	}
5368 
5369 	ppa = dlp->attach_req.dl_ppa;
5370 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "attach req: ppa %x", ppa));
5371 
5372 	/*
5373 	 * check if the PPA is valid
5374 	 */
5375 
5376 	mutex_enter(&fcip_global_mutex);
5377 
5378 	for (fport = fcip_port_head; fport; fport = fport->fcipp_next) {
5379 		if ((fptr = fport->fcipp_fcip) == NULL) {
5380 			continue;
5381 		}
5382 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "ppa %x, inst %x", ppa,
5383 		    ddi_get_instance(fptr->fcip_dip)));
5384 
5385 		if (ppa == ddi_get_instance(fptr->fcip_dip)) {
5386 			FCIP_DEBUG(FCIP_DEBUG_DLPI,
5387 			    (CE_NOTE, "ppa found %x", ppa));
5388 			break;
5389 		}
5390 	}
5391 
5392 	if (fport == NULL) {
5393 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5394 		    (CE_NOTE, "dlerrorack coz fport==NULL"));
5395 
5396 		mutex_exit(&fcip_global_mutex);
5397 
5398 		if (fc_ulp_get_port_handle(ppa) == NULL) {
5399 			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5400 			return;
5401 		}
5402 
5403 		/*
5404 		 * Wait for Port attach callback to trigger.  If port_detach
5405 		 * got in while we were waiting, then ddi_get_soft_state
5406 		 * will return NULL, and we'll return error.
5407 		 */
5408 
5409 		delay(drv_usectohz(FCIP_INIT_DELAY));
5410 		mutex_enter(&fcip_global_mutex);
5411 
5412 		fptr = ddi_get_soft_state(fcip_softp, ppa);
5413 		if (fptr == NULL) {
5414 			mutex_exit(&fcip_global_mutex);
5415 			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5416 			return;
5417 		}
5418 	}
5419 
5420 	/*
5421 	 * set link to device and update our state
5422 	 */
5423 	slp->sl_fcip = fptr;
5424 	slp->sl_state = DL_UNBOUND;
5425 
5426 	mutex_exit(&fcip_global_mutex);
5427 
5428 #ifdef DEBUG
5429 	mutex_enter(&fptr->fcip_mutex);
5430 	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
5431 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_WARN, "port not online yet"));
5432 	}
5433 	mutex_exit(&fptr->fcip_mutex);
5434 #endif
5435 
5436 	dlokack(wq, mp, DL_ATTACH_REQ);
5437 }
5438 
5439 
5440 /*
5441  * DL_DETACH request - detaches a PPA from a stream
5442  */
5443 static void
5444 fcip_dreq(queue_t *wq, mblk_t *mp)
5445 {
5446 	struct fcipstr		*slp;
5447 
5448 	slp = (struct fcipstr *)wq->q_ptr;
5449 
5450 	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
5451 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
5452 		return;
5453 	}
5454 
5455 	if (slp->sl_state != DL_UNBOUND) {
5456 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
5457 		return;
5458 	}
5459 
5460 	fcip_dodetach(slp);
5461 	dlokack(wq, mp, DL_DETACH_REQ);
5462 }
5463 
5464 /*
5465  * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream.
5466  * DLS users communicate with a physical interface through DLSAPs. Multiple
5467  * DLSAPs can be bound to the same stream (PPA)
5468  */
5469 static void
5470 fcip_breq(queue_t *wq, mblk_t *mp)
5471 {
5472 	struct fcipstr		*slp;
5473 	union DL_primitives	*dlp;
5474 	struct fcip		*fptr;
5475 	struct fcipdladdr	fcipaddr;
5476 	t_uscalar_t		sap;
5477 	int			xidtest;
5478 
5479 	slp = (struct fcipstr *)wq->q_ptr;
5480 
5481 	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
5482 		dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
5483 		return;
5484 	}
5485 
5486 	if (slp->sl_state != DL_UNBOUND) {
5487 		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5488 		return;
5489 	}
5490 
5491 	dlp = (union DL_primitives *)mp->b_rptr;
5492 	fptr = slp->sl_fcip;
5493 
5494 	if (fptr == NULL) {
5495 		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5496 		return;
5497 	}
5498 
5499 	sap = dlp->bind_req.dl_sap;
5500 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "fcip_breq - sap: %x", sap));
5501 	xidtest = dlp->bind_req.dl_xidtest_flg;
5502 
5503 	if (xidtest) {
5504 		dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
5505 		return;
5506 	}
5507 
5508 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "DLBIND: sap : %x", sap));
5509 
5510 	if (sap > ETHERTYPE_MAX) {
5511 		dlerrorack(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
5512 		return;
5513 	}
5514 	/*
5515 	 * save SAP for this stream and change the link state
5516 	 */
5517 	slp->sl_sap = sap;
5518 	slp->sl_state = DL_IDLE;
5519 
5520 	fcipaddr.dl_sap = sap;
5521 	ether_bcopy(&fptr->fcip_macaddr, &fcipaddr.dl_phys);
5522 	dlbindack(wq, mp, sap, &fcipaddr, FCIPADDRL, 0, 0);
5523 
5524 	fcip_setipq(fptr);
5525 }
5526 
5527 /*
5528  * DL_UNBIND request to unbind a previously bound DLSAP, from this stream
5529  */
5530 static void
5531 fcip_ubreq(queue_t *wq, mblk_t *mp)
5532 {
5533 	struct fcipstr	*slp;
5534 
5535 	slp = (struct fcipstr *)wq->q_ptr;
5536 
5537 	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
5538 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
5539 		return;
5540 	}
5541 
5542 	if (slp->sl_state != DL_IDLE) {
5543 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
5544 		return;
5545 	}
5546 
5547 	slp->sl_state = DL_UNBOUND;
5548 	slp->sl_sap = 0;
5549 
5550 	(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
5551 	dlokack(wq, mp, DL_UNBIND_REQ);
5552 
5553 	fcip_setipq(slp->sl_fcip);
5554 }
5555 
5556 /*
5557  * Return our physical address
5558  */
5559 static void
5560 fcip_pareq(queue_t *wq, mblk_t *mp)
5561 {
5562 	struct fcipstr 		*slp;
5563 	union DL_primitives	*dlp;
5564 	int			type;
5565 	struct fcip		*fptr;
5566 	fcip_port_info_t	*fport;
5567 	struct ether_addr	addr;
5568 
5569 	slp = (struct fcipstr *)wq->q_ptr;
5570 
5571 	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
5572 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5573 		return;
5574 	}
5575 
5576 	dlp = (union DL_primitives *)mp->b_rptr;
5577 	type = dlp->physaddr_req.dl_addr_type;
5578 	fptr = slp->sl_fcip;
5579 
5580 	if (fptr == NULL) {
5581 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5582 		return;
5583 	}
5584 
5585 	fport = fptr->fcip_port_info;
5586 
5587 	switch (type) {
5588 	case DL_FACT_PHYS_ADDR:
5589 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5590 		    (CE_NOTE, "returning factory phys addr"));
5591 		wwn_to_ether(&fport->fcipp_pwwn, &addr);
5592 		break;
5593 
5594 	case DL_CURR_PHYS_ADDR:
5595 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5596 		    (CE_NOTE, "returning current phys addr"));
5597 		ether_bcopy(&fptr->fcip_macaddr, &addr);
5598 		break;
5599 
5600 	default:
5601 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5602 		    (CE_NOTE, "Not known cmd type in phys addr"));
5603 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
5604 		return;
5605 	}
5606 	dlphysaddrack(wq, mp, &addr, ETHERADDRL);
5607 }
5608 
5609 /*
5610  * Set physical address DLPI request
5611  */
5612 static void
5613 fcip_spareq(queue_t *wq, mblk_t *mp)
5614 {
5615 	struct fcipstr		*slp;
5616 	union DL_primitives	*dlp;
5617 	t_uscalar_t		off, len;
5618 	struct ether_addr	*addrp;
5619 	la_wwn_t		wwn;
5620 	struct fcip		*fptr;
5621 	fc_ns_cmd_t		fcip_ns_cmd;
5622 
5623 	slp = (struct fcipstr *)wq->q_ptr;
5624 
5625 	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
5626 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5627 		return;
5628 	}
5629 
5630 	dlp = (union DL_primitives *)mp->b_rptr;
5631 	len = dlp->set_physaddr_req.dl_addr_length;
5632 	off = dlp->set_physaddr_req.dl_addr_offset;
5633 
5634 	if (!MBLKIN(mp, off, len)) {
5635 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5636 		return;
5637 	}
5638 
5639 	addrp = (struct ether_addr *)(mp->b_rptr + off);
5640 
5641 	/*
5642 	 * If the length of physical address is not correct or address
5643 	 * specified is a broadcast address or multicast addr -
5644 	 * return an error.
5645 	 */
5646 	if ((len != ETHERADDRL) ||
5647 	    ((addrp->ether_addr_octet[0] & 01) == 1) ||
5648 	    (ether_cmp(addrp, &fcip_arpbroadcast_addr) == 0)) {
5649 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5650 		return;
5651 	}
5652 
5653 	/*
5654 	 * check if a stream is attached to this device. Else return an error
5655 	 */
5656 	if ((fptr = slp->sl_fcip) == NULL) {
5657 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5658 		return;
5659 	}
5660 
5661 	/*
5662 	 * set the new interface local address. We request the transport
5663 	 * layer to change the Port WWN for this device - return an error
5664 	 * if we don't succeed.
5665 	 */
5666 
5667 	ether_to_wwn(addrp, &wwn);
5668 	if (fcip_set_wwn(&wwn) == FC_SUCCESS) {
5669 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5670 		    (CE_WARN, "WWN changed in spareq"));
5671 	} else {
5672 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5673 	}
5674 
5675 	/*
5676 	 * register The new Port WWN and Node WWN with the transport
5677 	 * and Nameserver. Hope the transport ensures all current I/O
5678 	 * has stopped before actually attempting to register a new
5679 	 * port and Node WWN else we are hosed. Maybe a Link reset
5680 	 * will get everyone's attention.
5681 	 */
5682 	fcip_ns_cmd.ns_flags = 0;
5683 	fcip_ns_cmd.ns_cmd = NS_RPN_ID;
5684 	fcip_ns_cmd.ns_req_len = sizeof (la_wwn_t);
5685 	fcip_ns_cmd.ns_req_payload = (caddr_t)&wwn.raw_wwn[0];
5686 	fcip_ns_cmd.ns_resp_len = 0;
5687 	fcip_ns_cmd.ns_resp_payload = (caddr_t)0;
5688 	if (fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
5689 	    (opaque_t)0, &fcip_ns_cmd) != FC_SUCCESS) {
5690 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5691 		    (CE_WARN, "setting Port WWN failed"));
5692 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5693 		return;
5694 	}
5695 
5696 	dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ);
5697 }
5698 
5699 /*
5700  * change our port's WWN if permitted by hardware
5701  */
5702 /* ARGSUSED */
5703 static int
5704 fcip_set_wwn(la_wwn_t *pwwn)
5705 {
5706 	/*
5707 	 * We're usually not allowed to change the WWN of adapters
5708 	 * but some adapters do permit us to change the WWN - don't
5709 	 * permit setting of WWNs (yet?) - This behavior could be
5710 	 * modified if needed
5711 	 */
5712 	return (FC_FAILURE);
5713 }
5714 
5715 
5716 /*
5717  * This routine fills in the header for fastpath data requests. What this
5718  * does in simple terms is, instead of sending all data through the Unitdata
5719  * request dlpi code paths (which will then append the protocol specific
5720  * header - network and snap headers in our case), the upper layers issue
5721  * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint
5722  * driver to give the header it needs appended and the upper layer
5723  * allocates and fills in the header and calls our put routine
5724  */
5725 static void
5726 fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp)
5727 {
5728 	mblk_t			*nmp;
5729 	struct fcipstr		*slp;
5730 	struct fcipdladdr	*dlap;
5731 	dl_unitdata_req_t	*dlup;
5732 	fcph_network_hdr_t	*headerp;
5733 	la_wwn_t		wwn;
5734 	llc_snap_hdr_t		*lsnap;
5735 	struct fcip		*fptr;
5736 	fcip_port_info_t	*fport;
5737 	t_uscalar_t		off, len;
5738 	size_t			hdrlen;
5739 	int 			error;
5740 
5741 	slp = (struct fcipstr *)wq->q_ptr;
5742 	fptr = slp->sl_fcip;
5743 	if (fptr == NULL) {
5744 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5745 		    (CE_NOTE, "dliochdr : returns EINVAL1"));
5746 		miocnak(wq, mp, 0, EINVAL);
5747 		return;
5748 	}
5749 
5750 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + FCIPADDRL);
5751 	if (error != 0) {
5752 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5753 		    (CE_NOTE, "dliochdr : returns %d", error));
5754 		miocnak(wq, mp, 0, error);
5755 		return;
5756 	}
5757 
5758 	fport = fptr->fcip_port_info;
5759 
5760 	/*
5761 	 * check if the DL_UNITDATA_REQ destination addr has valid offset
5762 	 * and length values
5763 	 */
5764 	dlup = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
5765 	off = dlup->dl_dest_addr_offset;
5766 	len = dlup->dl_dest_addr_length;
5767 	if (dlup->dl_primitive != DL_UNITDATA_REQ ||
5768 	    !MBLKIN(mp->b_cont, off, len) || (len != FCIPADDRL)) {
5769 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5770 		    (CE_NOTE, "dliochdr : returns EINVAL2"));
5771 		miocnak(wq, mp, 0, EINVAL);
5772 		return;
5773 	}
5774 
5775 	dlap = (struct fcipdladdr *)(mp->b_cont->b_rptr + off);
5776 
5777 	/*
5778 	 * Allocate a new mblk to hold the ether header
5779 	 */
5780 
5781 	/*
5782 	 * setup space for network header
5783 	 */
5784 	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
5785 	if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
5786 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5787 		    (CE_NOTE, "dliochdr : returns ENOMEM"));
5788 		miocnak(wq, mp, 0, ENOMEM);
5789 		return;
5790 	}
5791 	nmp->b_wptr += hdrlen;
5792 
5793 	/*
5794 	 * Fill in the Network Hdr and LLC SNAP header;
5795 	 */
5796 	headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5797 	/*
5798 	 * just fill in the Node WWN here - we can fill in the NAA_ID when
5799 	 * we search the routing table
5800 	 */
5801 	if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5802 		ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5803 	} else {
5804 		ether_to_wwn(&dlap->dl_phys, &wwn);
5805 	}
5806 	bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5807 	bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5808 	lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + sizeof (fcph_network_hdr_t));
5809 	lsnap->dsap = 0xAA;
5810 	lsnap->ssap = 0xAA;
5811 	lsnap->ctrl = 0x03;
5812 	lsnap->oui[0] = 0x00;
5813 	lsnap->oui[1] = 0x00;
5814 	lsnap->oui[2] = 0x00;
5815 	lsnap->pid = BE_16(dlap->dl_sap);
5816 
5817 	/*
5818 	 * Link new mblk in after the "request" mblks.
5819 	 */
5820 	linkb(mp, nmp);
5821 
5822 	slp->sl_flags |= FCIP_SLFAST;
5823 
5824 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5825 	    (CE_NOTE, "dliochdr : returns success "));
5826 	miocack(wq, mp, msgsize(mp->b_cont), 0);
5827 }
5828 
5829 
5830 /*
5831  * Establish a kmem cache for fcip packets
5832  */
5833 static int
5834 fcip_cache_constructor(void *buf, void *arg, int flags)
5835 {
5836 	fcip_pkt_t		*fcip_pkt = buf;
5837 	fc_packet_t		*fc_pkt;
5838 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5839 	int			(*cb) (caddr_t);
5840 	struct fcip		*fptr;
5841 
5842 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
5843 
5844 	ASSERT(fport != NULL);
5845 
5846 	fptr = fport->fcipp_fcip;
5847 
5848 	/*
5849 	 * we allocated space for our private area at the end of the
5850 	 * fc packet. Make sure we point to it correctly. Ideally we
5851 	 * should just push fc_packet_private to the beginning or end
5852 	 * of the fc_packet structure
5853 	 */
5854 	fcip_pkt->fcip_pkt_next = NULL;
5855 	fcip_pkt->fcip_pkt_prev = NULL;
5856 	fcip_pkt->fcip_pkt_dest = NULL;
5857 	fcip_pkt->fcip_pkt_state = 0;
5858 	fcip_pkt->fcip_pkt_reason = 0;
5859 	fcip_pkt->fcip_pkt_flags = 0;
5860 	fcip_pkt->fcip_pkt_fptr = fptr;
5861 	fcip_pkt->fcip_pkt_dma_flags = 0;
5862 
5863 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5864 	fc_pkt->pkt_ulp_rscn_infop = NULL;
5865 
5866 	/*
5867 	 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect
5868 	 * any responses for outbound IP data so no need to setup
5869 	 * response or data dma handles.
5870 	 */
5871 	if (ddi_dma_alloc_handle(fport->fcipp_dip,
5872 	    &fport->fcipp_cmd_dma_attr, cb, NULL,
5873 	    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
5874 		return (FCIP_FAILURE);
5875 	}
5876 
5877 	fc_pkt->pkt_cmd_acc = fc_pkt->pkt_resp_acc = NULL;
5878 	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)buf +
5879 	    sizeof (fcip_pkt_t));
5880 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
5881 
5882 	fc_pkt->pkt_cmd_cookie_cnt = fc_pkt->pkt_resp_cookie_cnt =
5883 	    fc_pkt->pkt_data_cookie_cnt = 0;
5884 	fc_pkt->pkt_cmd_cookie = fc_pkt->pkt_resp_cookie =
5885 	    fc_pkt->pkt_data_cookie = NULL;
5886 
5887 	return (FCIP_SUCCESS);
5888 }
5889 
5890 /*
5891  * destroy the fcip kmem cache
5892  */
5893 static void
5894 fcip_cache_destructor(void *buf, void *arg)
5895 {
5896 	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)buf;
5897 	fc_packet_t		*fc_pkt;
5898 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5899 	struct fcip		*fptr;
5900 
5901 	ASSERT(fport != NULL);
5902 
5903 	fptr = fport->fcipp_fcip;
5904 
5905 	ASSERT(fptr == fcip_pkt->fcip_pkt_fptr);
5906 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5907 
5908 	if (fc_pkt->pkt_cmd_dma) {
5909 		ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
5910 	}
5911 }
5912 
5913 /*
5914  * the fcip destination structure is hashed on Node WWN assuming
5915  * a  NAA_ID of 0x1 (IEEE)
5916  */
5917 static struct fcip_dest *
5918 fcip_get_dest(struct fcip *fptr, la_wwn_t *pwwn)
5919 {
5920 	struct fcip_dest	*fdestp = NULL;
5921 	fcip_port_info_t	*fport;
5922 	int			hash_bucket;
5923 	opaque_t		pd;
5924 	int			rval;
5925 	struct fcip_routing_table *frp;
5926 	la_wwn_t		twwn;
5927 	uint32_t		*twwnp = (uint32_t *)&twwn;
5928 
5929 	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
5930 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5931 	    (CE_NOTE, "get dest hashbucket : 0x%x", hash_bucket));
5932 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5933 	    (CE_NOTE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5934 	    pwwn->raw_wwn[2], pwwn->raw_wwn[3], pwwn->raw_wwn[4],
5935 	    pwwn->raw_wwn[5], pwwn->raw_wwn[6], pwwn->raw_wwn[7]));
5936 
5937 	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
5938 
5939 	if (fcip_check_port_exists(fptr)) {
5940 		/* fptr is stale, return fdestp */
5941 		return (fdestp);
5942 	}
5943 	fport = fptr->fcip_port_info;
5944 
5945 	/*
5946 	 * First check if we have active I/Os going on with the
5947 	 * destination port (an entry would exist in fcip_dest hash table)
5948 	 */
5949 	mutex_enter(&fptr->fcip_dest_mutex);
5950 	fdestp = fptr->fcip_dest[hash_bucket];
5951 	while (fdestp != NULL) {
5952 		mutex_enter(&fdestp->fcipd_mutex);
5953 		if (fdestp->fcipd_rtable) {
5954 			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
5955 			    FCIP_COMPARE_NWWN) == 0) {
5956 				FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5957 				    (CE_NOTE, "found fdestp"));
5958 				mutex_exit(&fdestp->fcipd_mutex);
5959 				mutex_exit(&fptr->fcip_dest_mutex);
5960 				return (fdestp);
5961 			}
5962 		}
5963 		mutex_exit(&fdestp->fcipd_mutex);
5964 		fdestp = fdestp->fcipd_next;
5965 	}
5966 	mutex_exit(&fptr->fcip_dest_mutex);
5967 
5968 	/*
5969 	 * We did not find the destination port information in our
5970 	 * active port list so search for an entry in our routing
5971 	 * table.
5972 	 */
5973 	mutex_enter(&fptr->fcip_rt_mutex);
5974 	frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
5975 	mutex_exit(&fptr->fcip_rt_mutex);
5976 
5977 	if (frp == NULL || (frp && (!FCIP_RTE_UNAVAIL(frp->fcipr_state)) &&
5978 	    frp->fcipr_state != PORT_DEVICE_LOGGED_IN) ||
5979 	    (frp && frp->fcipr_pd == NULL)) {
5980 		/*
5981 		 * No entry for the destination port in our routing
5982 		 * table too. First query the transport to see if it
5983 		 * already has structures for the destination port in
5984 		 * its hash tables. This must be done for all topologies
5985 		 * since we could have retired entries in the hash tables
5986 		 * which may have to be re-added without a statechange
5987 		 * callback happening. Its better to try and get an entry
5988 		 * for the destination port rather than simply failing a
5989 		 * request though it may be an overkill in private loop
5990 		 * topologies.
5991 		 * If a entry for the remote port exists in the transport's
5992 		 * hash tables, we are fine and can add the entry to our
5993 		 * routing and dest hash lists, Else for fabric configs we
5994 		 * query the nameserver if one exists or issue FARP ELS.
5995 		 */
5996 
5997 		/*
5998 		 * We need to do a PortName based Nameserver
5999 		 * query operation. So get the right PortWWN
6000 		 * for the adapter.
6001 		 */
6002 		bcopy(pwwn, &twwn, sizeof (la_wwn_t));
6003 
6004 		/*
6005 		 * Try IEEE Name (Format 1) first, this is the default and
6006 		 * Emulex uses this format.
6007 		 */
6008 		pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6009 					    &twwn, &rval, 1);
6010 
6011 		if (rval != FC_SUCCESS) {
6012 			/*
6013 			 * If IEEE Name (Format 1) query failed, try IEEE
6014 			 * Extended Name (Format 2) which Qlogic uses.
6015 			 * And try port 1 on Qlogic FC-HBA first.
6016 			 * Note: On x86, we need to byte swap the 32-bit
6017 			 * word first, after the modification, swap it back.
6018 			 */
6019 			*twwnp = BE_32(*twwnp);
6020 			twwn.w.nport_id = QLC_PORT_1_ID_BITS;
6021 			twwn.w.naa_id = QLC_PORT_NAA;
6022 			*twwnp = BE_32(*twwnp);
6023 			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6024 						    &twwn, &rval, 1);
6025 		}
6026 
6027 		if (rval != FC_SUCCESS) {
6028 			/* If still failed, try port 2 on Qlogic FC-HBA. */
6029 			*twwnp = BE_32(*twwnp);
6030 			twwn.w.nport_id = QLC_PORT_2_ID_BITS;
6031 			*twwnp = BE_32(*twwnp);
6032 			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6033 						    &twwn, &rval, 1);
6034 		}
6035 
6036 		if (rval == FC_SUCCESS) {
6037 			fc_portmap_t	map;
6038 			/*
6039 			 * Add the newly found destination structure
6040 			 * to our routing table. Create a map with
6041 			 * the device we found. We could ask the
6042 			 * transport to give us the list of all
6043 			 * devices connected to our port but we
6044 			 * probably don't need to know all the devices
6045 			 * so let us just constuct a list with only
6046 			 * one device instead.
6047 			 */
6048 
6049 			fc_ulp_copy_portmap(&map, pd);
6050 			fcip_rt_update(fptr, &map, 1);
6051 
6052 			mutex_enter(&fptr->fcip_rt_mutex);
6053 			frp = fcip_lookup_rtable(fptr, pwwn,
6054 			    FCIP_COMPARE_NWWN);
6055 			mutex_exit(&fptr->fcip_rt_mutex);
6056 
6057 			fdestp = fcip_add_dest(fptr, frp);
6058 		} else if (fcip_farp_supported &&
6059 			(FC_TOP_EXTERNAL(fport->fcipp_topology) ||
6060 			(fport->fcipp_topology == FC_TOP_PT_PT))) {
6061 			/*
6062 			 * The Name server request failed so
6063 			 * issue an FARP
6064 			 */
6065 			fdestp = fcip_do_farp(fptr, pwwn, NULL,
6066 				0, 0);
6067 		} else {
6068 		    fdestp = NULL;
6069 		}
6070 	} else if (frp && frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6071 		/*
6072 		 * Prepare a dest structure to return to caller
6073 		 */
6074 		fdestp = fcip_add_dest(fptr, frp);
6075 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6076 		    (CE_NOTE, "in fcip get dest non fabric"));
6077 	}
6078 	return (fdestp);
6079 }
6080 
6081 
6082 /*
6083  * Endian clean WWN compare.
6084  * Returns 0 if they compare OK, else return non zero value.
6085  * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN,
6086  * FCIP_COMPARE_BROADCAST.
6087  */
6088 static int
6089 fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag)
6090 {
6091 	int rval = 0;
6092 	if ((wwn1->raw_wwn[2] != wwn2->raw_wwn[2]) ||
6093 	    (wwn1->raw_wwn[3] != wwn2->raw_wwn[3]) ||
6094 	    (wwn1->raw_wwn[4] != wwn2->raw_wwn[4]) ||
6095 	    (wwn1->raw_wwn[5] != wwn2->raw_wwn[5]) ||
6096 	    (wwn1->raw_wwn[6] != wwn2->raw_wwn[6]) ||
6097 	    (wwn1->raw_wwn[7] != wwn2->raw_wwn[7])) {
6098 		rval = 1;
6099 	} else if ((flag == FCIP_COMPARE_PWWN) &&
6100 	    (((wwn1->raw_wwn[0] & 0xf0) != (wwn2->raw_wwn[0] & 0xf0)) ||
6101 	    (wwn1->raw_wwn[1] != wwn2->raw_wwn[1]))) {
6102 		rval = 1;
6103 	}
6104 	return (rval);
6105 }
6106 
6107 
6108 /*
6109  * Add an entry for a remote port in the dest hash table. Dest hash table
6110  * has entries for ports in the routing hash table with which we decide
6111  * to establish IP communication with. The no. of entries in the dest hash
6112  * table must always be less than or equal to the entries in the routing
6113  * hash table. Every entry in the dest hash table ofcourse must have a
6114  * corresponding entry in the routing hash table
6115  */
6116 static struct fcip_dest *
6117 fcip_add_dest(struct fcip *fptr, struct fcip_routing_table *frp)
6118 {
6119 	struct fcip_dest *fdestp = NULL;
6120 	la_wwn_t	*pwwn;
6121 	int hash_bucket;
6122 	struct fcip_dest *fdest_new;
6123 
6124 	if (frp == NULL) {
6125 		return (fdestp);
6126 	}
6127 
6128 	pwwn = &frp->fcipr_pwwn;
6129 	mutex_enter(&fptr->fcip_dest_mutex);
6130 	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
6131 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6132 	    (CE_NOTE, "add dest hash_bucket: 0x%x", hash_bucket));
6133 
6134 	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
6135 
6136 	fdestp = fptr->fcip_dest[hash_bucket];
6137 	while (fdestp != NULL) {
6138 		mutex_enter(&fdestp->fcipd_mutex);
6139 		if (fdestp->fcipd_rtable) {
6140 			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
6141 			    FCIP_COMPARE_PWWN) == 0) {
6142 				mutex_exit(&fdestp->fcipd_mutex);
6143 				mutex_exit(&fptr->fcip_dest_mutex);
6144 				return (fdestp);
6145 			}
6146 		}
6147 		mutex_exit(&fdestp->fcipd_mutex);
6148 		fdestp = fdestp->fcipd_next;
6149 	}
6150 
6151 	ASSERT(fdestp == NULL);
6152 
6153 	fdest_new = (struct fcip_dest *)
6154 			kmem_zalloc(sizeof (struct fcip_dest), KM_SLEEP);
6155 
6156 	mutex_init(&fdest_new->fcipd_mutex, NULL, MUTEX_DRIVER, NULL);
6157 	fdest_new->fcipd_next = fptr->fcip_dest[hash_bucket];
6158 	fdest_new->fcipd_refcnt = 0;
6159 	fdest_new->fcipd_rtable = frp;
6160 	fdest_new->fcipd_ncmds = 0;
6161 	fptr->fcip_dest[hash_bucket] = fdest_new;
6162 	fdest_new->fcipd_flags = FCIP_PORT_NOTLOGGED;
6163 
6164 	mutex_exit(&fptr->fcip_dest_mutex);
6165 	return (fdest_new);
6166 }
6167 
6168 /*
6169  * Cleanup the dest hash table and remove all entries
6170  */
6171 static void
6172 fcip_cleanup_dest(struct fcip *fptr)
6173 {
6174 	struct fcip_dest *fdestp = NULL;
6175 	struct fcip_dest *fdest_delp = NULL;
6176 	int i;
6177 
6178 	mutex_enter(&fptr->fcip_dest_mutex);
6179 
6180 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
6181 		fdestp = fptr->fcip_dest[i];
6182 		while (fdestp != NULL) {
6183 			mutex_destroy(&fdestp->fcipd_mutex);
6184 			fdest_delp = fdestp;
6185 			fdestp = fdestp->fcipd_next;
6186 			kmem_free(fdest_delp, sizeof (struct fcip_dest));
6187 			fptr->fcip_dest[i] = NULL;
6188 		}
6189 	}
6190 	mutex_exit(&fptr->fcip_dest_mutex);
6191 }
6192 
6193 
6194 /*
6195  * Send FARP requests for Fabric ports when we don't have the port
6196  * we wish to talk to in our routing hash table. FARP is specially required
6197  * to talk to FC switches for inband switch management. Most FC switches
6198  * today have a switch FC IP address for IP over FC inband switch management
6199  * but the WWN and Port_ID for this traffic is not available through the
6200  * Nameservers since the switch themeselves are transparent.
6201  */
6202 /* ARGSUSED */
6203 static struct fcip_dest *
6204 fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, char *ip_addr,
6205     size_t ip_addr_len, int flags)
6206 {
6207 	fcip_pkt_t		*fcip_pkt;
6208 	fc_packet_t		*fc_pkt;
6209 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6210 	la_els_farp_t		farp_cmd;
6211 	la_els_farp_t		*fcmd;
6212 	struct fcip_dest	*fdestp = NULL;
6213 	int			rval;
6214 	clock_t			farp_lbolt;
6215 	la_wwn_t		broadcast_wwn;
6216 	struct fcip_dest	*bdestp;
6217 	struct fcip_routing_table 	*frp;
6218 
6219 	bdestp = fcip_get_dest(fptr, &broadcast_wwn);
6220 
6221 	if (bdestp == NULL) {
6222 		return (fdestp);
6223 	}
6224 
6225 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
6226 	    sizeof (la_els_farp_t), bdestp->fcipd_pd, KM_SLEEP);
6227 
6228 	if (fcip_pkt == NULL) {
6229 		return (fdestp);
6230 	}
6231 
6232 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6233 	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
6234 
6235 	mutex_enter(&bdestp->fcipd_mutex);
6236 	if (bdestp->fcipd_rtable == NULL) {
6237 		mutex_exit(&bdestp->fcipd_mutex);
6238 		fcip_ipkt_free(fcip_pkt);
6239 		return (fdestp);
6240 	}
6241 
6242 	fcip_pkt->fcip_pkt_dest = bdestp;
6243 	fc_pkt->pkt_fca_device = bdestp->fcipd_fca_dev;
6244 
6245 	bdestp->fcipd_ncmds++;
6246 	mutex_exit(&bdestp->fcipd_mutex);
6247 
6248 	fcip_init_broadcast_pkt(fcip_pkt, NULL, 1);
6249 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6250 
6251 	/*
6252 	 * Now initialize the FARP payload itself
6253 	 */
6254 	fcmd = &farp_cmd;
6255 	fcmd->ls_code.ls_code = LA_ELS_FARP_REQ;
6256 	fcmd->ls_code.mbz = 0;
6257 	/*
6258 	 * for now just match the Port WWN since the other match addr
6259 	 * code points are optional. We can explore matching the IP address
6260 	 * if needed
6261 	 */
6262 	if (ip_addr) {
6263 		fcmd->match_addr = FARP_MATCH_WW_PN_IPv4;
6264 	} else {
6265 		fcmd->match_addr = FARP_MATCH_WW_PN;
6266 	}
6267 
6268 	/*
6269 	 * Request the responder port to log into us - that way
6270 	 * the Transport is aware of the remote port when we create
6271 	 * an entry for it in our tables
6272 	 */
6273 	fcmd->resp_flags = FARP_INIT_REPLY | FARP_INIT_P_LOGI;
6274 	fcmd->req_id = fport->fcipp_sid;
6275 	fcmd->dest_id.port_id = fc_pkt->pkt_cmd_fhdr.d_id;
6276 	bcopy(&fport->fcipp_pwwn, &fcmd->req_pwwn, sizeof (la_wwn_t));
6277 	bcopy(&fport->fcipp_nwwn, &fcmd->req_nwwn, sizeof (la_wwn_t));
6278 	bcopy(pwwn, &fcmd->resp_pwwn, sizeof (la_wwn_t));
6279 	/*
6280 	 * copy in source IP address if we get to know it
6281 	 */
6282 	if (ip_addr) {
6283 		bcopy(ip_addr, fcmd->resp_ip, ip_addr_len);
6284 	}
6285 
6286 	fc_pkt->pkt_cmdlen = sizeof (la_els_farp_t);
6287 	fc_pkt->pkt_rsplen = sizeof (la_els_farp_t);
6288 	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6289 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6290 
6291 	/*
6292 	 * Endian safe copy
6293 	 */
6294 	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6295 	    sizeof (la_els_farp_t));
6296 
6297 	/*
6298 	 * send the packet in polled mode.
6299 	 */
6300 	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
6301 	if (rval != FC_SUCCESS) {
6302 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6303 		    "fcip_transport of farp pkt failed 0x%x", rval));
6304 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
6305 		fcip_ipkt_free(fcip_pkt);
6306 
6307 		mutex_enter(&bdestp->fcipd_mutex);
6308 		bdestp->fcipd_ncmds--;
6309 		mutex_exit(&bdestp->fcipd_mutex);
6310 
6311 		return (fdestp);
6312 	}
6313 
6314 	farp_lbolt = ddi_get_lbolt();
6315 	farp_lbolt += drv_usectohz(FCIP_FARP_TIMEOUT);
6316 
6317 	mutex_enter(&fptr->fcip_mutex);
6318 	fptr->fcip_farp_rsp_flag = 0;
6319 	while (!fptr->fcip_farp_rsp_flag) {
6320 		if (cv_timedwait(&fptr->fcip_farp_cv, &fptr->fcip_mutex,
6321 		    farp_lbolt) == -1) {
6322 			/*
6323 			 * No FARP response from any destination port
6324 			 * so bail out.
6325 			 */
6326 			fptr->fcip_farp_rsp_flag = 1;
6327 		} else {
6328 			/*
6329 			 * We received a FARP response - check to see if the
6330 			 * response was in reply to our FARP request.
6331 			 */
6332 
6333 			mutex_enter(&fptr->fcip_rt_mutex);
6334 			frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
6335 			mutex_exit(&fptr->fcip_rt_mutex);
6336 
6337 			if ((frp != NULL) &&
6338 			    !FCIP_RTE_UNAVAIL(frp->fcipr_state)) {
6339 				fdestp = fcip_get_dest(fptr, pwwn);
6340 			} else {
6341 				/*
6342 				 * Not our FARP response so go back and wait
6343 				 * again till FARP_TIMEOUT expires
6344 				 */
6345 				fptr->fcip_farp_rsp_flag = 0;
6346 			}
6347 		}
6348 	}
6349 	mutex_exit(&fptr->fcip_mutex);
6350 
6351 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6352 	fcip_ipkt_free(fcip_pkt);
6353 	mutex_enter(&bdestp->fcipd_mutex);
6354 	bdestp->fcipd_ncmds--;
6355 	mutex_exit(&bdestp->fcipd_mutex);
6356 	return (fdestp);
6357 }
6358 
6359 
6360 
6361 /*
6362  * Helper routine to PLOGI to a remote port we wish to talk to.
6363  * This may not be required since the port driver does logins anyway,
6364  * but this can be required in fabric cases since FARP requests/responses
6365  * don't require you to be logged in?
6366  */
6367 
6368 /* ARGSUSED */
6369 static int
6370 fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp)
6371 {
6372 	fcip_pkt_t		*fcip_pkt;
6373 	fc_packet_t		*fc_pkt;
6374 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6375 	la_els_logi_t		logi;
6376 	int			rval;
6377 	fc_frame_hdr_t		*fr_hdr;
6378 
6379 	/*
6380 	 * Don't bother to login for broadcast RTE entries
6381 	 */
6382 	if ((frp->fcipr_d_id.port_id == 0x0) ||
6383 	    (frp->fcipr_d_id.port_id == 0xffffff)) {
6384 		return (FC_FAILURE);
6385 	}
6386 
6387 	/*
6388 	 * We shouldn't pound in too many logins here
6389 	 *
6390 	 */
6391 	if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS ||
6392 	    frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6393 		return (FC_SUCCESS);
6394 	}
6395 
6396 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_logi_t),
6397 	    sizeof (la_els_logi_t), frp->fcipr_pd, KM_SLEEP);
6398 
6399 	if (fcip_pkt == NULL) {
6400 		return (FC_FAILURE);
6401 	}
6402 
6403 	/*
6404 	 * Update back pointer for login state update
6405 	 */
6406 	fcip_pkt->fcip_pkt_frp = frp;
6407 	frp->fcipr_state = FCIP_RT_LOGIN_PROGRESS;
6408 
6409 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6410 
6411 	/*
6412 	 * Initialize frame header for ELS
6413 	 */
6414 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6415 	fr_hdr->r_ctl = R_CTL_ELS_REQ;
6416 	fr_hdr->type = FC_TYPE_EXTENDED_LS;
6417 	fr_hdr->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6418 	fr_hdr->df_ctl = 0;
6419 	fr_hdr->s_id = fport->fcipp_sid.port_id;
6420 	fr_hdr->d_id = frp->fcipr_d_id.port_id;
6421 	fr_hdr->seq_cnt = 0;
6422 	fr_hdr->ox_id = 0xffff;
6423 	fr_hdr->rx_id = 0xffff;
6424 	fr_hdr->ro = 0;
6425 
6426 	fc_pkt->pkt_rsplen = sizeof (la_els_logi_t);
6427 	fc_pkt->pkt_comp = fcip_ipkt_callback;
6428 	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6429 	fc_pkt->pkt_timeout = 10;	/* 10 seconds */
6430 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6431 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6432 
6433 	/*
6434 	 * Everybody does class 3, so let's just set it.  If the transport
6435 	 * knows better, it will deal with the class appropriately.
6436 	 */
6437 
6438 	fc_pkt->pkt_tran_flags = FC_TRAN_INTR | FC_TRAN_CLASS3;
6439 
6440 	/*
6441 	 * we need only fill in the ls_code and the cmd frame header
6442 	 */
6443 	bzero((void *)&logi, sizeof (la_els_logi_t));
6444 	logi.ls_code.ls_code = LA_ELS_PLOGI;
6445 	logi.ls_code.mbz = 0;
6446 
6447 	FCIP_CP_OUT((uint8_t *)&logi, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6448 	    sizeof (la_els_logi_t));
6449 
6450 	rval = fc_ulp_login(fport->fcipp_handle, &fc_pkt, 1);
6451 	if (rval != FC_SUCCESS) {
6452 		cmn_err(CE_WARN,
6453 		    "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x",
6454 		    frp->fcipr_d_id.port_id, rval);
6455 		fcip_ipkt_free(fcip_pkt);
6456 	}
6457 	return (rval);
6458 }
6459 
6460 /*
6461  * The packet callback routine - called from the transport/FCA after
6462  * it is done DMA'ing/sending out the packet contents on the wire so
6463  * that the alloc'ed packet can be freed
6464  */
6465 static void
6466 fcip_ipkt_callback(fc_packet_t *fc_pkt)
6467 {
6468 	ls_code_t			logi_req;
6469 	ls_code_t			logi_resp;
6470 	fcip_pkt_t			*fcip_pkt;
6471 	fc_frame_hdr_t			*fr_hdr;
6472 	struct fcip 			*fptr;
6473 	fcip_port_info_t		*fport;
6474 	struct fcip_routing_table	*frp;
6475 
6476 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6477 
6478 	FCIP_CP_IN(fc_pkt->pkt_resp, (uint8_t *)&logi_resp,
6479 	    fc_pkt->pkt_resp_acc, sizeof (logi_resp));
6480 
6481 	FCIP_CP_IN(fc_pkt->pkt_cmd, (uint8_t *)&logi_req, fc_pkt->pkt_cmd_acc,
6482 	    sizeof (logi_req));
6483 
6484 	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
6485 	frp = fcip_pkt->fcip_pkt_frp;
6486 	fptr = fcip_pkt->fcip_pkt_fptr;
6487 	fport = fptr->fcip_port_info;
6488 
6489 	ASSERT(logi_req.ls_code == LA_ELS_PLOGI);
6490 
6491 	if (fc_pkt->pkt_state != FC_PKT_SUCCESS ||
6492 	    logi_resp.ls_code != LA_ELS_ACC) {
6493 		/* EMPTY */
6494 
6495 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6496 		    "opcode : 0x%x to d_id: 0x%x failed",
6497 		    logi_req.ls_code, fr_hdr->d_id));
6498 
6499 		mutex_enter(&fptr->fcip_rt_mutex);
6500 		frp->fcipr_state = PORT_DEVICE_INVALID;
6501 		frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
6502 		    (FCIP_RTE_TIMEOUT / 2);
6503 		mutex_exit(&fptr->fcip_rt_mutex);
6504 	} else {
6505 		fc_portid_t	d_id;
6506 
6507 		d_id.port_id = fr_hdr->d_id;
6508 		d_id.priv_lilp_posit = 0;
6509 
6510 		/*
6511 		 * Update PLOGI results; FCA Handle, and Port device handles
6512 		 */
6513 		mutex_enter(&fptr->fcip_rt_mutex);
6514 		frp->fcipr_pd = fc_pkt->pkt_pd;
6515 		frp->fcipr_fca_dev =
6516 		    fc_ulp_get_fca_device(fport->fcipp_handle, d_id);
6517 		frp->fcipr_state = PORT_DEVICE_LOGGED_IN;
6518 		mutex_exit(&fptr->fcip_rt_mutex);
6519 	}
6520 
6521 	fcip_ipkt_free(fcip_pkt);
6522 }
6523 
6524 
6525 /*
6526  * pkt_alloc routine for outbound IP datagrams. The cache constructor
6527  * Only initializes the pkt_cmd_dma (which is where the outbound datagram
6528  * is stuffed) since we don't expect response
6529  */
6530 static fcip_pkt_t *
6531 fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, int flags, int datalen)
6532 {
6533 	fcip_pkt_t 	*fcip_pkt;
6534 	fc_packet_t	*fc_pkt;
6535 	ddi_dma_cookie_t	pkt_cookie;
6536 	ddi_dma_cookie_t	*cp;
6537 	uint32_t		cnt;
6538 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6539 
6540 	fcip_pkt = kmem_cache_alloc(fptr->fcip_xmit_cache, flags);
6541 	if (fcip_pkt == NULL) {
6542 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6543 		    "fcip_pkt_alloc: kmem_cache_alloc failed"));
6544 		return (NULL);
6545 	}
6546 
6547 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6548 	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6549 	fc_pkt->pkt_tran_flags = 0;
6550 	fcip_pkt->fcip_pkt_dma_flags = 0;
6551 
6552 	/*
6553 	 * the cache constructor has allocated the dma handle
6554 	 */
6555 	fc_pkt->pkt_cmd = (caddr_t)bp->b_rptr;
6556 	if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6557 	    (caddr_t)bp->b_rptr, datalen, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
6558 	    DDI_DMA_DONTWAIT, NULL, &pkt_cookie,
6559 	    &fc_pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED) {
6560 			goto fail;
6561 	}
6562 
6563 	fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6564 
6565 	if (fc_pkt->pkt_cmd_cookie_cnt >
6566 	    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6567 		goto fail;
6568 	}
6569 
6570 	ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6571 
6572 	cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6573 	    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6574 	    KM_NOSLEEP);
6575 
6576 	if (cp == NULL) {
6577 		goto fail;
6578 	}
6579 
6580 	*cp = pkt_cookie;
6581 	cp++;
6582 	for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6583 		ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6584 		*cp = pkt_cookie;
6585 	}
6586 
6587 	fc_pkt->pkt_cmdlen = datalen;
6588 
6589 	fcip_pkt->fcip_pkt_mp = NULL;
6590 	fcip_pkt->fcip_pkt_wq = NULL;
6591 	fcip_pkt->fcip_pkt_dest = NULL;
6592 	fcip_pkt->fcip_pkt_next = NULL;
6593 	fcip_pkt->fcip_pkt_prev = NULL;
6594 	fcip_pkt->fcip_pkt_state = 0;
6595 	fcip_pkt->fcip_pkt_reason = 0;
6596 	fcip_pkt->fcip_pkt_flags = 0;
6597 	fcip_pkt->fcip_pkt_frp = NULL;
6598 
6599 	return (fcip_pkt);
6600 fail:
6601 	if (fcip_pkt) {
6602 		fcip_pkt_free(fcip_pkt, 0);
6603 	}
6604 	return ((fcip_pkt_t *)0);
6605 }
6606 
6607 /*
6608  * Free a packet and all its associated resources
6609  */
6610 static void
6611 fcip_pkt_free(struct fcip_pkt *fcip_pkt, int free_mblk)
6612 {
6613 	fc_packet_t	*fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6614 	struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6615 
6616 	if (fc_pkt->pkt_cmd_cookie != NULL) {
6617 		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6618 		    sizeof (ddi_dma_cookie_t));
6619 		fc_pkt->pkt_cmd_cookie = NULL;
6620 	}
6621 
6622 	fcip_free_pkt_dma(fcip_pkt);
6623 	if (free_mblk && fcip_pkt->fcip_pkt_mp) {
6624 		freemsg(fcip_pkt->fcip_pkt_mp);
6625 		fcip_pkt->fcip_pkt_mp = NULL;
6626 	}
6627 
6628 	(void) fc_ulp_uninit_packet(fptr->fcip_port_info->fcipp_handle, fc_pkt);
6629 
6630 	kmem_cache_free(fptr->fcip_xmit_cache, (void *)fcip_pkt);
6631 }
6632 
6633 /*
6634  * Allocate a Packet for internal driver use. This is for requests
6635  * that originate from within the driver
6636  */
6637 static fcip_pkt_t *
6638 fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, int resplen,
6639     opaque_t pd, int flags)
6640 {
6641 	fcip_pkt_t 		*fcip_pkt;
6642 	fc_packet_t		*fc_pkt;
6643 	int			(*cb)(caddr_t);
6644 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6645 	size_t			real_len;
6646 	uint_t			held_here = 0;
6647 	ddi_dma_cookie_t	pkt_cookie;
6648 	ddi_dma_cookie_t	*cp;
6649 	uint32_t		cnt;
6650 
6651 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
6652 
6653 	fcip_pkt = kmem_zalloc((sizeof (fcip_pkt_t) +
6654 	    fport->fcipp_fca_pkt_size), flags);
6655 
6656 	if (fcip_pkt == NULL) {
6657 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6658 		    (CE_WARN, "pkt alloc of ineternal pkt failed"));
6659 		goto fail;
6660 	}
6661 
6662 	fcip_pkt->fcip_pkt_flags = FCIP_PKT_INTERNAL;
6663 	fcip_pkt->fcip_pkt_fptr = fptr;
6664 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6665 	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6666 	fc_pkt->pkt_tran_flags = 0;
6667 	fc_pkt->pkt_cmdlen = 0;
6668 	fc_pkt->pkt_rsplen = 0;
6669 	fc_pkt->pkt_datalen = 0;
6670 	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)fcip_pkt +
6671 	    sizeof (fcip_pkt_t));
6672 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6673 
6674 	if (cmdlen) {
6675 		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6676 		    &fport->fcipp_cmd_dma_attr, cb, NULL,
6677 		    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
6678 			goto fail;
6679 		}
6680 
6681 		if (ddi_dma_mem_alloc(fc_pkt->pkt_cmd_dma, cmdlen,
6682 		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6683 		    cb, NULL, (caddr_t *)&fc_pkt->pkt_cmd,
6684 		    &real_len, &fc_pkt->pkt_cmd_acc) != DDI_SUCCESS) {
6685 			goto fail;
6686 		}
6687 
6688 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_MEM;
6689 		fc_pkt->pkt_cmdlen = cmdlen;
6690 
6691 		if (real_len < cmdlen) {
6692 			goto fail;
6693 		}
6694 
6695 		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6696 		    (caddr_t)fc_pkt->pkt_cmd, real_len,
6697 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6698 		    &pkt_cookie, &fc_pkt->pkt_cmd_cookie_cnt) !=
6699 		    DDI_DMA_MAPPED) {
6700 			goto fail;
6701 		}
6702 
6703 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6704 
6705 		if (fc_pkt->pkt_cmd_cookie_cnt >
6706 		    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6707 			goto fail;
6708 		}
6709 
6710 		ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6711 
6712 		cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6713 		    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6714 		    KM_NOSLEEP);
6715 
6716 		if (cp == NULL) {
6717 			goto fail;
6718 		}
6719 
6720 		*cp = pkt_cookie;
6721 		cp++;
6722 		for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6723 			ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6724 			*cp = pkt_cookie;
6725 		}
6726 	}
6727 
6728 	if (resplen) {
6729 		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6730 		    &fport->fcipp_resp_dma_attr, cb, NULL,
6731 		    &fc_pkt->pkt_resp_dma) != DDI_SUCCESS) {
6732 			goto fail;
6733 		}
6734 
6735 		if (ddi_dma_mem_alloc(fc_pkt->pkt_resp_dma, resplen,
6736 		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6737 		    cb, NULL, (caddr_t *)&fc_pkt->pkt_resp,
6738 		    &real_len, &fc_pkt->pkt_resp_acc) != DDI_SUCCESS) {
6739 			goto fail;
6740 		}
6741 
6742 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_MEM;
6743 
6744 		if (real_len < resplen) {
6745 			goto fail;
6746 		}
6747 
6748 		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_resp_dma, NULL,
6749 		    (caddr_t)fc_pkt->pkt_resp, real_len,
6750 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6751 		    &pkt_cookie, &fc_pkt->pkt_resp_cookie_cnt) !=
6752 		    DDI_DMA_MAPPED) {
6753 			goto fail;
6754 		}
6755 
6756 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_BOUND;
6757 		fc_pkt->pkt_rsplen = resplen;
6758 
6759 		if (fc_pkt->pkt_resp_cookie_cnt >
6760 		    fport->fcipp_resp_dma_attr.dma_attr_sgllen) {
6761 			goto fail;
6762 		}
6763 
6764 		ASSERT(fc_pkt->pkt_resp_cookie_cnt != 0);
6765 
6766 		cp = fc_pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6767 		    fc_pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
6768 		    KM_NOSLEEP);
6769 
6770 		if (cp == NULL) {
6771 			goto fail;
6772 		}
6773 
6774 		*cp = pkt_cookie;
6775 		cp++;
6776 		for (cnt = 1; cnt < fc_pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
6777 			ddi_dma_nextcookie(fc_pkt->pkt_resp_dma, &pkt_cookie);
6778 			*cp = pkt_cookie;
6779 		}
6780 	}
6781 
6782 	/*
6783 	 * Initialize pkt_pd prior to calling fc_ulp_init_packet
6784 	 */
6785 
6786 	fc_pkt->pkt_pd = pd;
6787 
6788 	/*
6789 	 * Ask the FCA to bless the internal packet
6790 	 */
6791 	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
6792 	    fc_pkt, flags) != FC_SUCCESS) {
6793 		goto fail;
6794 	}
6795 
6796 	/*
6797 	 * Keep track of # of ipkts alloc-ed
6798 	 * This function can get called with mutex either held or not. So, we'll
6799 	 * grab mutex if it is not already held by this thread.
6800 	 * This has to be cleaned up someday.
6801 	 */
6802 	if (!MUTEX_HELD(&fptr->fcip_mutex)) {
6803 		held_here = 1;
6804 		mutex_enter(&fptr->fcip_mutex);
6805 	}
6806 
6807 	fptr->fcip_num_ipkts_pending++;
6808 
6809 	if (held_here)
6810 		mutex_exit(&fptr->fcip_mutex);
6811 
6812 	return (fcip_pkt);
6813 fail:
6814 	if (fcip_pkt) {
6815 		fcip_ipkt_free(fcip_pkt);
6816 	}
6817 
6818 	return (NULL);
6819 }
6820 
6821 /*
6822  * free up an internal IP packet (like a FARP pkt etc)
6823  */
6824 static void
6825 fcip_ipkt_free(fcip_pkt_t *fcip_pkt)
6826 {
6827 	fc_packet_t		*fc_pkt;
6828 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6829 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6830 
6831 	ASSERT(fptr != NULL);
6832 	ASSERT(!mutex_owned(&fptr->fcip_mutex));
6833 
6834 	/* One less ipkt to wait for */
6835 	mutex_enter(&fptr->fcip_mutex);
6836 	if (fptr->fcip_num_ipkts_pending)	/* Safety check */
6837 		fptr->fcip_num_ipkts_pending--;
6838 	mutex_exit(&fptr->fcip_mutex);
6839 
6840 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6841 
6842 	if (fc_pkt->pkt_cmd_cookie != NULL) {
6843 		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6844 		    sizeof (ddi_dma_cookie_t));
6845 		fc_pkt->pkt_cmd_cookie = NULL;
6846 	}
6847 
6848 	if (fc_pkt->pkt_resp_cookie != NULL) {
6849 		kmem_free(fc_pkt->pkt_resp_cookie, fc_pkt->pkt_resp_cookie_cnt *
6850 		    sizeof (ddi_dma_cookie_t));
6851 		fc_pkt->pkt_resp_cookie = NULL;
6852 	}
6853 
6854 	if (fc_ulp_uninit_packet(fport->fcipp_handle, fc_pkt) != FC_SUCCESS) {
6855 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
6856 		    "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p",
6857 		    (void *)fc_pkt));
6858 	}
6859 	fcip_free_pkt_dma(fcip_pkt);
6860 	kmem_free(fcip_pkt, (sizeof (fcip_pkt_t) + fport->fcipp_fca_pkt_size));
6861 }
6862 
6863 /*
6864  * initialize a unicast request. This is a misnomer because even the
6865  * broadcast requests are initialized with this routine
6866  */
6867 static void
6868 fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, fc_portid_t did,
6869     void (*comp) ())
6870 {
6871 	fc_packet_t		*fc_pkt;
6872 	fc_frame_hdr_t		*fr_hdr;
6873 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6874 
6875 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6876 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6877 
6878 	fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6879 	fr_hdr->s_id = sid.port_id;
6880 	fr_hdr->d_id = did.port_id;
6881 	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6882 	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ;
6883 	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6884 	fr_hdr->seq_cnt = 0;
6885 	fr_hdr->ox_id = 0xffff;
6886 	fr_hdr->rx_id = 0xffff;
6887 	fr_hdr->ro = 0;
6888 	/*
6889 	 * reset all the length fields
6890 	 */
6891 	fc_pkt->pkt_rsplen = 0;
6892 	fc_pkt->pkt_datalen = 0;
6893 	fc_pkt->pkt_comp = comp;
6894 	if (comp) {
6895 		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6896 	} else {
6897 		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6898 	}
6899 	fc_pkt->pkt_tran_type = FC_PKT_OUTBOUND | FC_PKT_IP_WRITE;
6900 	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6901 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6902 }
6903 
6904 
6905 /*
6906  * Initialize a fcip_packet for broadcast data transfers
6907  */
6908 static void
6909 fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), int is_els)
6910 {
6911 	fc_packet_t		*fc_pkt;
6912 	fc_frame_hdr_t		*fr_hdr;
6913 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6914 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6915 	uint32_t		sid;
6916 	uint32_t		did;
6917 
6918 	FCIP_TNF_PROBE_1((fcip_init_broadcast_pkt, "fcip io", /* CSTYLED */,
6919 		tnf_string, msg, "enter"));
6920 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6921 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6922 	sid = fport->fcipp_sid.port_id;
6923 
6924 	if (is_els) {
6925 		fr_hdr->r_ctl = R_CTL_ELS_REQ;
6926 	} else {
6927 		fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6928 	}
6929 	fr_hdr->s_id = sid;
6930 	/*
6931 	 * The destination broadcast address depends on the topology
6932 	 * of the underlying port
6933 	 */
6934 	did = fptr->fcip_broadcast_did;
6935 	/*
6936 	 * mark pkt a broadcast pkt
6937 	 */
6938 	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6939 
6940 	fr_hdr->d_id = did;
6941 	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6942 	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
6943 	fr_hdr->f_ctl &= ~(F_CTL_SEQ_INITIATIVE);
6944 	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6945 	fr_hdr->seq_cnt = 0;
6946 	fr_hdr->ox_id = 0xffff;
6947 	fr_hdr->rx_id = 0xffff;
6948 	fr_hdr->ro = 0;
6949 	fc_pkt->pkt_comp = comp;
6950 
6951 	if (comp) {
6952 		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6953 	} else {
6954 		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6955 	}
6956 
6957 	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6958 	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6959 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6960 }
6961 
6962 
6963 
6964 /*
6965  * Free up all DMA resources associated with an allocated packet
6966  */
6967 static void
6968 fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt)
6969 {
6970 	fc_packet_t	*fc_pkt;
6971 
6972 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6973 
6974 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6975 	    (CE_NOTE, "in freepktdma : flags 0x%x",
6976 	    fcip_pkt->fcip_pkt_dma_flags));
6977 
6978 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_BOUND) {
6979 		(void) ddi_dma_unbind_handle(fc_pkt->pkt_cmd_dma);
6980 	}
6981 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_MEM) {
6982 		ddi_dma_mem_free(&fc_pkt->pkt_cmd_acc);
6983 	}
6984 
6985 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_BOUND) {
6986 		(void) ddi_dma_unbind_handle(fc_pkt->pkt_resp_dma);
6987 	}
6988 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_MEM) {
6989 		ddi_dma_mem_free(&fc_pkt->pkt_resp_acc);
6990 	}
6991 	/*
6992 	 * for internal commands, we need to free up the dma handles too.
6993 	 * This is done in the cache destructor for non internal cmds
6994 	 */
6995 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_INTERNAL) {
6996 		if (fc_pkt->pkt_cmd_dma) {
6997 			ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
6998 		}
6999 		if (fc_pkt->pkt_resp_dma) {
7000 			ddi_dma_free_handle(&fc_pkt->pkt_resp_dma);
7001 		}
7002 	}
7003 }
7004 
7005 
7006 /*
7007  * helper routine to generate a string, given an ether addr
7008  */
7009 static void
7010 fcip_ether_to_str(struct ether_addr *e, caddr_t s)
7011 {
7012 	int i;
7013 
7014 	for (i = 0; i < sizeof (struct ether_addr); i++, s += 2) {
7015 		FCIP_DEBUG(FCIP_DEBUG_MISC,
7016 		    (CE_CONT, "0x%02X:", e->ether_addr_octet[i]));
7017 		(void) sprintf(s, "%02X", e->ether_addr_octet[i]);
7018 	}
7019 
7020 	*s = '\0';
7021 }
7022 
7023 /*
7024  * When a broadcast request comes from the upper streams modules, it
7025  * is ugly to look into every datagram to figure out if it is a broadcast
7026  * datagram or a unicast packet. Instead just add the broadcast entries
7027  * into our routing and dest tables and the standard hash table look ups
7028  * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig
7029  * seems to be very ethernet specific and it requires broadcasts to the
7030  * ether broadcast addr of 0xffffffffff to succeed even though we specified
7031  * in the dl_info request that our broadcast MAC addr is 0x0000000000
7032  * (can't figure out why RFC2625 did this though). So add broadcast entries
7033  * for both MAC address
7034  */
7035 static int
7036 fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag)
7037 {
7038 	fc_portmap_t 		map;
7039 	struct fcip_routing_table *frp;
7040 	uint32_t		did;
7041 	la_wwn_t		broadcast_wwn;
7042 
7043 	/*
7044 	 * get port_id of destination for broadcast - this is topology
7045 	 * dependent
7046 	 */
7047 	did = fptr->fcip_broadcast_did;
7048 
7049 	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
7050 	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7051 	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7052 
7053 	map.map_did.port_id = did;
7054 	map.map_hard_addr.hard_addr = did;
7055 	map.map_state = PORT_DEVICE_VALID;
7056 	if (new_flag) {
7057 		map.map_type = PORT_DEVICE_NEW;
7058 	} else {
7059 		map.map_type = PORT_DEVICE_CHANGED;
7060 	}
7061 	map.map_flags = 0;
7062 	map.map_pd = NULL;
7063 	bzero(&map.map_fc4_types, sizeof (map.map_fc4_types));
7064 	fcip_rt_update(fptr, &map, 1);
7065 	mutex_enter(&fptr->fcip_rt_mutex);
7066 	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7067 	mutex_exit(&fptr->fcip_rt_mutex);
7068 	if (frp == NULL) {
7069 		return (FC_FAILURE);
7070 	}
7071 	(void) fcip_add_dest(fptr, frp);
7072 	/*
7073 	 * The Upper IP layers expect the traditional broadcast MAC addr
7074 	 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip
7075 	 * stream through the /etc/hostname.fcipXX file. Instead of checking
7076 	 * each phys addr for a match with fcip's ARP header broadcast
7077 	 * addr (0x00 00 00 00 00 00), its simply easier to add another
7078 	 * broadcast entry for 0xff ff ff ff ff ff.
7079 	 */
7080 	ether_to_wwn(&fcipnhbroadcastaddr, &broadcast_wwn);
7081 	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7082 	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7083 	fcip_rt_update(fptr, &map, 1);
7084 	mutex_enter(&fptr->fcip_rt_mutex);
7085 	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7086 	mutex_exit(&fptr->fcip_rt_mutex);
7087 	if (frp == NULL) {
7088 		return (FC_FAILURE);
7089 	}
7090 	(void) fcip_add_dest(fptr, frp);
7091 	return (FC_SUCCESS);
7092 }
7093 
7094 /*
7095  * We need to obtain the D_ID of the broadcast port for transmitting all
7096  * our broadcast (and multicast) requests. The broadcast D_ID as we know
7097  * is dependent on the link topology
7098  */
7099 static uint32_t
7100 fcip_get_broadcast_did(struct fcip *fptr)
7101 {
7102 	fcip_port_info_t	*fport = fptr->fcip_port_info;
7103 	uint32_t		did = 0;
7104 	uint32_t		sid;
7105 
7106 	FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7107 		tnf_string, msg, "enter",
7108 		tnf_opaque, fptr, fptr));
7109 
7110 	sid = fport->fcipp_sid.port_id;
7111 
7112 	switch (fport->fcipp_topology) {
7113 
7114 	case FC_TOP_PT_PT: {
7115 		fc_portmap_t	*port_map = NULL;
7116 		uint32_t	listlen = 0;
7117 
7118 		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
7119 		    &listlen, FC_ULP_PLOGI_DONTCARE) == FC_SUCCESS) {
7120 			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE,
7121 			    "fcip_gpmap: listlen :  0x%x", listlen));
7122 			if (listlen == 1) {
7123 				did = port_map->map_did.port_id;
7124 			}
7125 		}
7126 		if (port_map) {
7127 			kmem_free(port_map, listlen * sizeof (fc_portmap_t));
7128 		}
7129 		if (listlen != 1) {
7130 			/* Dummy return value */
7131 			return (0x00FFFFFF);
7132 		}
7133 		break;
7134 	}
7135 
7136 	case FC_TOP_NO_NS:
7137 	/* FALLTHROUGH */
7138 	case FC_TOP_FABRIC:
7139 		/*
7140 		 * The broadcast address is the same whether or not
7141 		 * the switch/fabric contains a Name service.
7142 		 */
7143 		did = 0x00FFFFFF;
7144 		break;
7145 
7146 	case FC_TOP_PUBLIC_LOOP:
7147 		/*
7148 		 * The open replicate primitive must not be used. The
7149 		 * broadcast sequence is simply sent to ALPA 0x00. The
7150 		 * fabric controller then propagates the broadcast to all
7151 		 * other ports. The fabric propagates the broadcast by
7152 		 * using the OPNfr primitive.
7153 		 */
7154 		did = 0x00;
7155 		break;
7156 
7157 	case FC_TOP_PRIVATE_LOOP:
7158 		/*
7159 		 * The source port for broadcast in private loop mode
7160 		 * must send an OPN(fr) signal forcing all ports in the
7161 		 * loop to replicate the frames that they receive.
7162 		 */
7163 		did = 0x00FFFFFF;
7164 		break;
7165 
7166 	case FC_TOP_UNKNOWN:
7167 	/* FALLTHROUGH */
7168 	default:
7169 		did = sid;
7170 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
7171 		    "fcip(0x%x):unknown topology in init_broadcast_pkt",
7172 		    fptr->fcip_instance));
7173 		break;
7174 	}
7175 	FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7176 		tnf_string, msg, "return",
7177 		tnf_opaque, did, did));
7178 
7179 	return (did);
7180 }
7181 
7182 
7183 /*
7184  * fcip timeout performs 2 operations:
7185  * 1. timeout any packets sent to the FCA for which a callback hasn't
7186  *    happened. If you are wondering why we need a callback since all
7187  *    traffic in FCIP is unidirectional, hence all exchanges are unidirectional
7188  *    but wait, we can only free up the resources after we know the FCA has
7189  *    DMA'ed out the data. pretty obvious eh :)
7190  *
7191  * 2. Retire and routing table entries we marked up for retiring. This is
7192  *    to give the link a chance to recover instead of marking a port down
7193  *    when we have lost all communication with it after a link transition
7194  */
7195 static void
7196 fcip_timeout(void *arg)
7197 {
7198 	struct fcip 			*fptr = (struct fcip *)arg;
7199 	int				i;
7200 	fcip_pkt_t			*fcip_pkt;
7201 	struct fcip_dest		*fdestp;
7202 	int 				index;
7203 	struct fcip_routing_table 	*frtp;
7204 	int				dispatch_rte_removal = 0;
7205 
7206 	mutex_enter(&fptr->fcip_mutex);
7207 
7208 	fptr->fcip_flags |= FCIP_IN_TIMEOUT;
7209 	fptr->fcip_timeout_ticks += fcip_tick_incr;
7210 
7211 	if (fptr->fcip_flags & (FCIP_DETACHED | FCIP_DETACHING | \
7212 	    FCIP_SUSPENDED | FCIP_POWER_DOWN)) {
7213 		fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7214 		mutex_exit(&fptr->fcip_mutex);
7215 		return;
7216 	}
7217 
7218 	if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
7219 		if (fptr->fcip_timeout_ticks > fptr->fcip_mark_offline) {
7220 			fptr->fcip_flags |= FCIP_LINK_DOWN;
7221 		}
7222 	}
7223 	if (!fptr->fcip_flags & FCIP_RTE_REMOVING) {
7224 		dispatch_rte_removal = 1;
7225 	}
7226 	mutex_exit(&fptr->fcip_mutex);
7227 
7228 	/*
7229 	 * Check if we have any Invalid routing table entries in our
7230 	 * hashtable we have marked off for deferred removal. If any,
7231 	 * we can spawn a taskq thread to do the cleanup for us. We
7232 	 * need to avoid cleanup in the timeout thread since we may
7233 	 * have to wait for outstanding commands to complete before
7234 	 * we retire a routing table entry. Also dispatch the taskq
7235 	 * thread only if we are already do not have a taskq thread
7236 	 * dispatched.
7237 	 */
7238 	if (dispatch_rte_removal) {
7239 		mutex_enter(&fptr->fcip_rt_mutex);
7240 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7241 			frtp = fptr->fcip_rtable[index];
7242 			while (frtp) {
7243 				if ((frtp->fcipr_state == FCIP_RT_INVALID) &&
7244 				    (fptr->fcip_timeout_ticks >
7245 				    frtp->fcipr_invalid_timeout)) {
7246 					/*
7247 					 * If we cannot schedule a task thread
7248 					 * let us attempt again on the next
7249 					 * tick rather than call
7250 					 * fcip_rte_remove_deferred() from here
7251 					 * directly since the routine can sleep.
7252 					 */
7253 					frtp->fcipr_state = FCIP_RT_RETIRED;
7254 
7255 					mutex_enter(&fptr->fcip_mutex);
7256 					fptr->fcip_flags |= FCIP_RTE_REMOVING;
7257 					mutex_exit(&fptr->fcip_mutex);
7258 
7259 					if (taskq_dispatch(fptr->fcip_tq,
7260 					    fcip_rte_remove_deferred, fptr,
7261 					    KM_NOSLEEP) == TASKQID_INVALID) {
7262 						/*
7263 						 * failed - so mark the entry
7264 						 * as invalid again.
7265 						 */
7266 						frtp->fcipr_state =
7267 						    FCIP_RT_INVALID;
7268 
7269 						mutex_enter(&fptr->fcip_mutex);
7270 						fptr->fcip_flags &=
7271 						    ~FCIP_RTE_REMOVING;
7272 						mutex_exit(&fptr->fcip_mutex);
7273 					}
7274 				}
7275 				frtp = frtp->fcipr_next;
7276 			}
7277 		}
7278 		mutex_exit(&fptr->fcip_rt_mutex);
7279 	}
7280 
7281 	mutex_enter(&fptr->fcip_dest_mutex);
7282 
7283 	/*
7284 	 * Now timeout any packets stuck with the transport/FCA for too long
7285 	 */
7286 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7287 		fdestp = fptr->fcip_dest[i];
7288 		while (fdestp != NULL) {
7289 			mutex_enter(&fdestp->fcipd_mutex);
7290 			for (fcip_pkt = fdestp->fcipd_head; fcip_pkt != NULL;
7291 			    fcip_pkt = fcip_pkt->fcip_pkt_next) {
7292 				if (fcip_pkt->fcip_pkt_flags &
7293 				    (FCIP_PKT_RETURNED | FCIP_PKT_IN_TIMEOUT |
7294 				    FCIP_PKT_IN_ABORT)) {
7295 					continue;
7296 				}
7297 				if (fptr->fcip_timeout_ticks >
7298 				    fcip_pkt->fcip_pkt_ttl) {
7299 					fcip_pkt->fcip_pkt_flags |=
7300 					    FCIP_PKT_IN_TIMEOUT;
7301 
7302 					mutex_exit(&fdestp->fcipd_mutex);
7303 					if (taskq_dispatch(fptr->fcip_tq,
7304 					    fcip_pkt_timeout, fcip_pkt,
7305 					    KM_NOSLEEP) == TASKQID_INVALID) {
7306 						/*
7307 						 * timeout immediately
7308 						 */
7309 						fcip_pkt_timeout(fcip_pkt);
7310 					}
7311 					mutex_enter(&fdestp->fcipd_mutex);
7312 					/*
7313 					 * The linked list is altered because
7314 					 * of one of the following reasons:
7315 					 *	a. Timeout code dequeued a pkt
7316 					 *	b. Pkt completion happened
7317 					 *
7318 					 * So restart the spin starting at
7319 					 * the head again; This is a bit
7320 					 * excessive, but okay since
7321 					 * fcip_timeout_ticks isn't incremented
7322 					 * for this spin, we will skip the
7323 					 * not-to-be-timedout packets quickly
7324 					 */
7325 					fcip_pkt = fdestp->fcipd_head;
7326 					if (fcip_pkt == NULL) {
7327 						break;
7328 					}
7329 				}
7330 			}
7331 			mutex_exit(&fdestp->fcipd_mutex);
7332 			fdestp = fdestp->fcipd_next;
7333 		}
7334 	}
7335 	mutex_exit(&fptr->fcip_dest_mutex);
7336 
7337 	/*
7338 	 * reschedule the timeout thread
7339 	 */
7340 	mutex_enter(&fptr->fcip_mutex);
7341 
7342 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
7343 	    drv_usectohz(1000000));
7344 	fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7345 	mutex_exit(&fptr->fcip_mutex);
7346 }
7347 
7348 
7349 /*
7350  * This routine is either called from taskq or directly from fcip_timeout
7351  * does the actual job of aborting the packet
7352  */
7353 static void
7354 fcip_pkt_timeout(void *arg)
7355 {
7356 	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)arg;
7357 	struct fcip_dest	*fdestp;
7358 	struct fcip		*fptr;
7359 	fc_packet_t		*fc_pkt;
7360 	fcip_port_info_t	*fport;
7361 	int			rval;
7362 
7363 	fdestp = fcip_pkt->fcip_pkt_dest;
7364 	fptr = fcip_pkt->fcip_pkt_fptr;
7365 	fport = fptr->fcip_port_info;
7366 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
7367 
7368 	/*
7369 	 * try to abort the pkt
7370 	 */
7371 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_ABORT;
7372 	rval = fc_ulp_abort(fport->fcipp_handle, fc_pkt, KM_NOSLEEP);
7373 
7374 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
7375 	    (CE_NOTE, "fc_ulp_abort returns: 0x%x", rval));
7376 
7377 	if (rval == FC_SUCCESS) {
7378 		ASSERT(fdestp != NULL);
7379 
7380 		/*
7381 		 * dequeue the pkt from the dest structure pkt list
7382 		 */
7383 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7384 		mutex_enter(&fdestp->fcipd_mutex);
7385 		rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7386 		ASSERT(rval == 1);
7387 		mutex_exit(&fdestp->fcipd_mutex);
7388 
7389 		/*
7390 		 * Now cleanup the pkt and free the mblk
7391 		 */
7392 		fcip_pkt_free(fcip_pkt, 1);
7393 	} else {
7394 		/*
7395 		 * abort failed - just mark the pkt as done and
7396 		 * wait for it to complete in fcip_pkt_callback since
7397 		 * the pkt has already been xmitted by the FCA
7398 		 */
7399 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_TIMEOUT;
7400 		if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_RETURNED) {
7401 			fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7402 			mutex_enter(&fdestp->fcipd_mutex);
7403 			rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7404 			ASSERT(rval == 1);
7405 			mutex_exit(&fdestp->fcipd_mutex);
7406 
7407 			fcip_pkt_free(fcip_pkt, 1);
7408 		}
7409 		return;
7410 	}
7411 }
7412 
7413 
7414 /*
7415  * Remove  a routing table entry marked for deferred removal. This routine
7416  * unlike fcip_pkt_timeout, is always called from a taskq context
7417  */
7418 static void
7419 fcip_rte_remove_deferred(void *arg)
7420 {
7421 	struct fcip 			*fptr = (struct fcip *)arg;
7422 	int				hash_bucket;
7423 	struct fcip_dest 		*fdestp;
7424 	la_wwn_t			*pwwn;
7425 	int 				index;
7426 	struct fcip_routing_table 	*frtp, *frtp_next, *frtp_prev;
7427 
7428 
7429 	mutex_enter(&fptr->fcip_rt_mutex);
7430 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7431 		frtp = fptr->fcip_rtable[index];
7432 		frtp_prev = NULL;
7433 		while (frtp) {
7434 			frtp_next = frtp->fcipr_next;
7435 
7436 			if (frtp->fcipr_state == FCIP_RT_RETIRED) {
7437 
7438 				pwwn = &frtp->fcipr_pwwn;
7439 				/*
7440 				 * Get hold of destination pointer
7441 				 */
7442 				mutex_enter(&fptr->fcip_dest_mutex);
7443 
7444 				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
7445 				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
7446 
7447 				fdestp = fptr->fcip_dest[hash_bucket];
7448 				while (fdestp != NULL) {
7449 					mutex_enter(&fdestp->fcipd_mutex);
7450 					if (fdestp->fcipd_rtable) {
7451 						if (fcip_wwn_compare(pwwn,
7452 						    &fdestp->fcipd_pwwn,
7453 						    FCIP_COMPARE_PWWN) == 0) {
7454 							mutex_exit(
7455 							&fdestp->fcipd_mutex);
7456 							break;
7457 						}
7458 					}
7459 					mutex_exit(&fdestp->fcipd_mutex);
7460 					fdestp = fdestp->fcipd_next;
7461 				}
7462 
7463 				mutex_exit(&fptr->fcip_dest_mutex);
7464 				if (fdestp == NULL) {
7465 					frtp_prev = frtp;
7466 					frtp = frtp_next;
7467 					continue;
7468 				}
7469 
7470 				mutex_enter(&fdestp->fcipd_mutex);
7471 				if (fdestp->fcipd_ncmds) {
7472 					/*
7473 					 * Instead of waiting to drain commands
7474 					 * let us revisit this RT entry in
7475 					 * the next pass.
7476 					 */
7477 					mutex_exit(&fdestp->fcipd_mutex);
7478 					frtp_prev = frtp;
7479 					frtp = frtp_next;
7480 					continue;
7481 				}
7482 
7483 				/*
7484 				 * We are clean, so remove the RTE
7485 				 */
7486 				fdestp->fcipd_rtable = NULL;
7487 				mutex_exit(&fdestp->fcipd_mutex);
7488 
7489 				FCIP_TNF_PROBE_2((fcip_rte_remove_deferred,
7490 					"fcip io", /* CSTYLED */,
7491 					tnf_string, msg,
7492 					"remove retired routing entry",
7493 					tnf_int, index, index));
7494 
7495 				if (frtp_prev == NULL) {
7496 					/* first element */
7497 					fptr->fcip_rtable[index] =
7498 					    frtp->fcipr_next;
7499 				} else {
7500 					frtp_prev->fcipr_next =
7501 					    frtp->fcipr_next;
7502 				}
7503 				kmem_free(frtp,
7504 				    sizeof (struct fcip_routing_table));
7505 
7506 				frtp = frtp_next;
7507 			} else {
7508 				frtp_prev = frtp;
7509 				frtp = frtp_next;
7510 			}
7511 		}
7512 	}
7513 	mutex_exit(&fptr->fcip_rt_mutex);
7514 	/*
7515 	 * Clear the RTE_REMOVING flag
7516 	 */
7517 	mutex_enter(&fptr->fcip_mutex);
7518 	fptr->fcip_flags &= ~FCIP_RTE_REMOVING;
7519 	mutex_exit(&fptr->fcip_mutex);
7520 }
7521 
7522 /*
7523  * Walk through all the dest hash table entries and count up the total
7524  * no. of packets outstanding against a given port
7525  */
7526 static int
7527 fcip_port_get_num_pkts(struct fcip *fptr)
7528 {
7529 	int 			num_cmds = 0;
7530 	int 			i;
7531 	struct fcip_dest	*fdestp;
7532 
7533 	ASSERT(mutex_owned(&fptr->fcip_dest_mutex));
7534 
7535 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7536 		fdestp = fptr->fcip_dest[i];
7537 		while (fdestp != NULL) {
7538 			mutex_enter(&fdestp->fcipd_mutex);
7539 
7540 			ASSERT(fdestp->fcipd_ncmds >= 0);
7541 
7542 			if (fdestp->fcipd_ncmds > 0) {
7543 				num_cmds += fdestp->fcipd_ncmds;
7544 			}
7545 			mutex_exit(&fdestp->fcipd_mutex);
7546 			fdestp = fdestp->fcipd_next;
7547 		}
7548 	}
7549 
7550 	return (num_cmds);
7551 }
7552 
7553 
7554 /*
7555  * Walk through the routing table for this state instance and see if there is a
7556  * PLOGI in progress for any of the entries. Return success even if we find one.
7557  */
7558 static int
7559 fcip_plogi_in_progress(struct fcip *fptr)
7560 {
7561 	int				i;
7562 	struct fcip_routing_table	*frp;
7563 
7564 	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
7565 
7566 	for (i = 0; i < FCIP_RT_HASH_ELEMS; i++) {
7567 		frp = fptr->fcip_rtable[i];
7568 		while (frp) {
7569 			if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS) {
7570 				/* Found an entry where PLOGI is in progress */
7571 				return (1);
7572 			}
7573 			frp = frp->fcipr_next;
7574 		}
7575 	}
7576 
7577 	return (0);
7578 }
7579 
7580 /*
7581  * Walk through the fcip port global list and check if the given port exists in
7582  * the list. Returns "0" if port exists and "1" if otherwise.
7583  */
7584 static int
7585 fcip_check_port_exists(struct fcip *fptr)
7586 {
7587 	fcip_port_info_t	*cur_fport;
7588 	fcip_port_info_t	*fport;
7589 
7590 	mutex_enter(&fcip_global_mutex);
7591 	fport = fptr->fcip_port_info;
7592 	cur_fport = fcip_port_head;
7593 	while (cur_fport != NULL) {
7594 		if (cur_fport == fport) {
7595 			/* Found */
7596 			mutex_exit(&fcip_global_mutex);
7597 			return (0);
7598 		} else {
7599 			cur_fport = cur_fport->fcipp_next;
7600 		}
7601 	}
7602 	mutex_exit(&fcip_global_mutex);
7603 
7604 	return (1);
7605 }
7606 
7607 /*
7608  * Constructor to initialize the sendup elements for callback into
7609  * modules upstream
7610  */
7611 
7612 /* ARGSUSED */
7613 static int
7614 fcip_sendup_constructor(void *buf, void *arg, int flags)
7615 {
7616 	struct fcip_sendup_elem	*msg_elem = (struct fcip_sendup_elem *)buf;
7617 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
7618 
7619 	ASSERT(fport != NULL);
7620 
7621 	msg_elem->fcipsu_mp = NULL;
7622 	msg_elem->fcipsu_func = NULL;
7623 	msg_elem->fcipsu_next = NULL;
7624 
7625 	return (FCIP_SUCCESS);
7626 }
7627