xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/ulp/fcip.c (revision 2570281cf351044b6936651ce26dbe1f801dcbd8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  */
26 
27 /*
28  * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module
29  * This is a pseudo driver module to handle encapsulation of IP and ARP
30  * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS
31  * driver module which interfaces with IP/ARP using DLPI. This module
32  * is a Style-2 DLS provider.
33  *
34  * The implementation of this module is based on RFC 2625 which gives
35  * details on the encapsulation of IP/ARP data over FibreChannel.
36  * The fcip module needs to resolve an IP address to a port address before
37  * sending data to a destination port. A FC device port has 2 addresses
38  * associated with it: A 8 byte World Wide unique Port Name and a 3 byte
39  * volatile Port number or Port_ID.
40  *
41  * The mapping between a IP address and the World Wide Port Name is handled
42  * by the ARP layer since the IP over FC draft requires the MAC address to
43  * be the least significant six bytes of the WorldWide Port Names. The
44  * fcip module however needs to identify the destination port uniquely when
45  * the destination FC device has multiple FC ports.
46  *
47  * The FC layer mapping between the World Wide Port Name and the Port_ID
48  * will be handled through the use of a fabric name server or through the
49  * use of the FARP ELS command as described in the draft. Since the Port_IDs
50  * are volatile, the mapping between the World Wide Port Name and Port_IDs
51  * must be maintained and validated before use each time a datagram
52  * needs to be sent to the destination ports. The FC transport module
53  * informs the fcip module of all changes to states of ports on the
54  * fabric through registered callbacks. This enables the fcip module
55  * to maintain the WW_PN to Port_ID mappings current.
56  *
57  * For details on how this module interfaces with the FibreChannel Transport
58  * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport
59  * Programming guide details the APIs between ULPs and the Transport.
60  *
61  * Now for some Caveats:
62  *
63  * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have
64  * the NAA bits set to '0001' indicating a IEEE 48bit address which
65  * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel
66  * adapters containing 2 or more ports, IEEE naming cannot identify the
67  * ports on an adapter uniquely so we will in the first implementation
68  * be operating only on Port 0 of each adapter.
69  */
70 
71 #include	<sys/types.h>
72 #include	<sys/errno.h>
73 #include	<sys/debug.h>
74 #include	<sys/time.h>
75 #include	<sys/sysmacros.h>
76 #include	<sys/systm.h>
77 #include	<sys/user.h>
78 #include	<sys/stropts.h>
79 #include	<sys/stream.h>
80 #include	<sys/strlog.h>
81 #include	<sys/strsubr.h>
82 #include	<sys/cmn_err.h>
83 #include	<sys/cpu.h>
84 #include	<sys/kmem.h>
85 #include	<sys/conf.h>
86 #include	<sys/ddi.h>
87 #include	<sys/sunddi.h>
88 #include	<sys/ksynch.h>
89 #include	<sys/stat.h>
90 #include	<sys/kstat.h>
91 #include	<sys/vtrace.h>
92 #include	<sys/strsun.h>
93 #include	<sys/varargs.h>
94 #include	<sys/modctl.h>
95 #include 	<sys/thread.h>
96 #include 	<sys/var.h>
97 #include 	<sys/proc.h>
98 #include	<inet/common.h>
99 #include	<netinet/ip6.h>
100 #include	<inet/ip.h>
101 #include	<inet/arp.h>
102 #include	<inet/mi.h>
103 #include	<inet/nd.h>
104 #include	<sys/dlpi.h>
105 #include	<sys/ethernet.h>
106 #include	<sys/file.h>
107 #include	<sys/syslog.h>
108 #include	<sys/disp.h>
109 #include	<sys/taskq.h>
110 
111 /*
112  * Leadville includes
113  */
114 
115 #include	<sys/fibre-channel/fc.h>
116 #include	<sys/fibre-channel/impl/fc_ulpif.h>
117 #include	<sys/fibre-channel/ulp/fcip.h>
118 
119 #define	FCIP_ESBALLOC
120 
121 /*
122  * Function prototypes
123  */
124 
125 /* standard loadable modules entry points */
126 static int	fcip_attach(dev_info_t *, ddi_attach_cmd_t);
127 static int 	fcip_detach(dev_info_t *, ddi_detach_cmd_t);
128 static void 	fcip_dodetach(struct fcipstr *slp);
129 static int fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
130     void *arg, void **result);
131 
132 
133 /* streams specific */
134 static void fcip_setipq(struct fcip *fptr);
135 static int fcip_wput(queue_t *, mblk_t *);
136 static int fcip_wsrv(queue_t *);
137 static void fcip_proto(queue_t *, mblk_t *);
138 static void fcip_ioctl(queue_t *, mblk_t *);
139 static int fcip_open(queue_t *wq, dev_t *devp, int flag,
140 		int sflag, cred_t *credp);
141 static int fcip_close(queue_t *rq, int flag, cred_t *credp);
142 static int fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
143     struct fcip_dest *fdestp, int flags);
144 static void fcip_sendup(struct fcip *fptr, mblk_t *mp,
145     struct fcipstr *(*acceptfunc)());
146 static struct fcipstr *fcip_accept(struct fcipstr *slp, struct fcip *fptr,
147     int type, la_wwn_t *dhostp);
148 static mblk_t *fcip_addudind(struct fcip *fptr, mblk_t *mp,
149     fcph_network_hdr_t *nhdr, int type);
150 static int fcip_setup_mac_addr(struct fcip *fptr);
151 static void fcip_kstat_init(struct fcip *fptr);
152 static int fcip_stat_update(kstat_t *, int);
153 
154 
155 /* dlpi specific */
156 static void fcip_spareq(queue_t *wq, mblk_t *mp);
157 static void fcip_pareq(queue_t *wq, mblk_t *mp);
158 static void fcip_ubreq(queue_t *wq, mblk_t *mp);
159 static void fcip_breq(queue_t *wq, mblk_t *mp);
160 static void fcip_dreq(queue_t *wq, mblk_t *mp);
161 static void fcip_areq(queue_t *wq, mblk_t *mp);
162 static void fcip_udreq(queue_t *wq, mblk_t *mp);
163 static void fcip_ireq(queue_t *wq, mblk_t *mp);
164 static void fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp);
165 
166 
167 /* solaris sundry, DR/CPR etc */
168 static int fcip_cache_constructor(void *buf, void *arg, int size);
169 static void fcip_cache_destructor(void *buf, void *size);
170 static int fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd);
171 static int fcip_handle_resume(fcip_port_info_t *fport,
172     fc_ulp_port_info_t *port_info, fc_attach_cmd_t cmd);
173 static fcip_port_info_t *fcip_softstate_free(fcip_port_info_t *fport);
174 static int fcip_port_attach_handler(struct fcip *fptr);
175 
176 
177 /*
178  * ulp - transport interface function prototypes
179  */
180 static int fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *,
181     fc_attach_cmd_t cmd, uint32_t sid);
182 static int fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *,
183     fc_detach_cmd_t cmd);
184 static int fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle,
185     dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
186     uint32_t claimed);
187 static void fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
188     uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
189     uint32_t listlen, uint32_t sid);
190 static int fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
191     fc_unsol_buf_t *buf, uint32_t claimed);
192 static int fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
193     fc_unsol_buf_t *payload, uint32_t claimed);
194 
195 
196 /* Routing table specific */
197 static void fcip_handle_topology(struct fcip *fptr);
198 static int fcip_init_port(struct fcip *fptr);
199 struct fcip_routing_table *fcip_lookup_rtable(struct fcip *fptr,
200     la_wwn_t *pwwn, int matchflag);
201 static void fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist,
202     uint32_t listlen);
203 static void fcip_rt_flush(struct fcip *fptr);
204 static void fcip_rte_remove_deferred(void *arg);
205 static int fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp);
206 
207 
208 /* dest table specific */
209 static struct fcip_dest *fcip_get_dest(struct fcip *fptr,
210     la_wwn_t *dlphys);
211 static struct fcip_dest *fcip_add_dest(struct fcip *fptr,
212     struct fcip_routing_table *frp);
213 static int fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag);
214 static uint32_t fcip_get_broadcast_did(struct fcip *fptr);
215 static void fcip_cleanup_dest(struct fcip *fptr);
216 
217 
218 /* helper functions */
219 static fcip_port_info_t *fcip_get_port(opaque_t phandle);
220 static int fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag);
221 static void fcip_ether_to_str(struct ether_addr *e, caddr_t s);
222 static int fcip_port_get_num_pkts(struct fcip *fptr);
223 static int fcip_check_port_busy(struct fcip *fptr);
224 static void fcip_check_remove_minor_node(void);
225 static int fcip_set_wwn(la_wwn_t *pwwn);
226 static int fcip_plogi_in_progress(struct fcip *fptr);
227 static int fcip_check_port_exists(struct fcip *fptr);
228 static int fcip_is_supported_fc_topology(int fc_topology);
229 
230 
231 /* pkt specific */
232 static fcip_pkt_t *fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp,
233     int flags, int datalen);
234 static void fcip_pkt_free(struct fcip_pkt *fcip_pkt, int flags);
235 static fcip_pkt_t *fcip_ipkt_alloc(struct fcip *fptr, int cmdlen,
236     int resplen, opaque_t pd, int flags);
237 static void fcip_ipkt_free(fcip_pkt_t *fcip_pkt);
238 static void fcip_ipkt_callback(fc_packet_t *fc_pkt);
239 static void fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt);
240 static void fcip_pkt_callback(fc_packet_t *fc_pkt);
241 static void fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid,
242     fc_portid_t did, void (*comp) ());
243 static int fcip_transport(fcip_pkt_t *fcip_pkt);
244 static void fcip_pkt_timeout(void *arg);
245 static void fcip_timeout(void *arg);
246 static void fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp,
247     fcip_pkt_t *fcip_pkt);
248 static int fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp,
249     fcip_pkt_t *fcip_pkt);
250 static int fcip_sendup_constructor(void *buf, void *arg, int flags);
251 static void fcip_sendup_thr(void *arg);
252 static int fcip_sendup_alloc_enque(struct fcip *ftpr, mblk_t *mp,
253     struct fcipstr *(*f)());
254 
255 /*
256  * zero copy inbound data handling
257  */
258 #ifdef FCIP_ESBALLOC
259 static void fcip_ubfree(char *arg);
260 #endif /* FCIP_ESBALLOC */
261 
262 #if !defined(FCIP_ESBALLOC)
263 static void *fcip_allocb(size_t size, uint_t pri);
264 #endif
265 
266 
267 /* FCIP FARP support functions */
268 static struct fcip_dest *fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn,
269     char *ip_addr, size_t ip_addr_len, int flags);
270 static void fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (),
271     int is_els);
272 static int fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd);
273 static int fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd);
274 static void fcip_cache_arp_broadcast(struct fcip *ftpr, fc_unsol_buf_t *buf);
275 static void fcip_port_ns(void *arg);
276 
277 #ifdef DEBUG
278 
279 #include <sys/debug.h>
280 
281 #define	FCIP_DEBUG_DEFAULT	0x1
282 #define	FCIP_DEBUG_ATTACH	0x2
283 #define	FCIP_DEBUG_INIT		0x4
284 #define	FCIP_DEBUG_DETACH	0x8
285 #define	FCIP_DEBUG_DLPI		0x10
286 #define	FCIP_DEBUG_ELS		0x20
287 #define	FCIP_DEBUG_DOWNSTREAM	0x40
288 #define	FCIP_DEBUG_UPSTREAM	0x80
289 #define	FCIP_DEBUG_MISC		0x100
290 
291 #define	FCIP_DEBUG_STARTUP	(FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT)
292 #define	FCIP_DEBUG_DATAOUT	(FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM)
293 #define	FCIP_DEBUG_DATAIN	(FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM)
294 
295 static int fcip_debug = FCIP_DEBUG_DEFAULT;
296 
297 #define	FCIP_DEBUG(level, args)	\
298 	if (fcip_debug & (level))	cmn_err args;
299 
300 #else	/* DEBUG */
301 
302 #define	FCIP_DEBUG(level, args)		/* do nothing */
303 
304 #endif	/* DEBUG */
305 
306 #define	KIOIP	KSTAT_INTR_PTR(fcip->fcip_intrstats)
307 
308 /*
309  * Endian independent ethernet to WWN copy
310  */
311 #define	ether_to_wwn(E, W)	\
312 	bzero((void *)(W), sizeof (la_wwn_t)); \
313 	bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \
314 	(W)->raw_wwn[0] |= 0x10
315 
316 /*
317  * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr.
318  * The args to the macro are pointers to WWN and ether_addr structures
319  */
320 #define	wwn_to_ether(W, E)	\
321 	bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL)
322 
323 /*
324  * The module_info structure contains identification and limit values.
325  * All queues associated with a certain driver share the same module_info
326  * structures. This structure defines the characteristics of that driver/
327  * module's queues. The module name must be unique. The max and min packet
328  * sizes limit the no. of characters in M_DATA messages. The Hi and Lo
329  * water marks are for flow control when a module has a service procedure.
330  */
331 static struct module_info	fcipminfo = {
332 	FCIPIDNUM,	/* mi_idnum : Module ID num */
333 	FCIPNAME, 	/* mi_idname: Module Name */
334 	FCIPMINPSZ,	/* mi_minpsz: Min packet size */
335 	FCIPMAXPSZ,	/* mi_maxpsz: Max packet size */
336 	FCIPHIWAT,	/* mi_hiwat : High water mark */
337 	FCIPLOWAT	/* mi_lowat : Low water mark */
338 };
339 
340 /*
341  * The qinit structres contain the module put, service. open and close
342  * procedure pointers. All modules and drivers with the same streamtab
343  * file (i.e same fmodsw or cdevsw entry points) point to the same
344  * upstream (read) and downstream (write) qinit structs.
345  */
346 static struct qinit	fcip_rinit = {
347 	NULL,		/* qi_putp */
348 	NULL,		/* qi_srvp */
349 	fcip_open,	/* qi_qopen */
350 	fcip_close,	/* qi_qclose */
351 	NULL,		/* qi_qadmin */
352 	&fcipminfo,	/* qi_minfo */
353 	NULL		/* qi_mstat */
354 };
355 
356 static struct qinit	fcip_winit = {
357 	fcip_wput,	/* qi_putp */
358 	fcip_wsrv,	/* qi_srvp */
359 	NULL,		/* qi_qopen */
360 	NULL,		/* qi_qclose */
361 	NULL,		/* qi_qadmin */
362 	&fcipminfo,	/* qi_minfo */
363 	NULL		/* qi_mstat */
364 };
365 
366 /*
367  * streamtab contains pointers to the read and write qinit structures
368  */
369 
370 static struct streamtab fcip_info = {
371 	&fcip_rinit,	/* st_rdinit */
372 	&fcip_winit,	/* st_wrinit */
373 	NULL,		/* st_muxrinit */
374 	NULL,		/* st_muxwrinit */
375 };
376 
377 static struct cb_ops  fcip_cb_ops = {
378 	nodev,				/* open */
379 	nodev,				/* close */
380 	nodev,				/* strategy */
381 	nodev,				/* print */
382 	nodev,				/* dump */
383 	nodev,				/* read */
384 	nodev,				/* write */
385 	nodev,				/* ioctl */
386 	nodev,				/* devmap */
387 	nodev,				/* mmap */
388 	nodev,				/* segmap */
389 	nochpoll,			/* poll */
390 	ddi_prop_op,			/* cb_prop_op */
391 	&fcip_info,			/* streamtab  */
392 	D_MP | D_HOTPLUG,		/* Driver compatibility flag */
393 	CB_REV,				/* rev */
394 	nodev,				/* int (*cb_aread)() */
395 	nodev				/* int (*cb_awrite)() */
396 };
397 
398 /*
399  * autoconfiguration routines.
400  */
401 static struct dev_ops fcip_ops = {
402 	DEVO_REV,		/* devo_rev, */
403 	0,			/* refcnt  */
404 	fcip_getinfo,		/* info */
405 	nulldev,		/* identify */
406 	nulldev,		/* probe */
407 	fcip_attach,		/* attach */
408 	fcip_detach,		/* detach */
409 	nodev,			/* RESET */
410 	&fcip_cb_ops,		/* driver operations */
411 	NULL,			/* bus operations */
412 	ddi_power		/* power management */
413 };
414 
415 #define	FCIP_VERSION	"1.61"
416 #define	FCIP_NAME	"SunFC FCIP v" FCIP_VERSION
417 
418 #define	PORT_DRIVER	"fp"
419 
420 #define	GETSTRUCT(struct, number)	\
421 	((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \
422 		KM_SLEEP))
423 
424 static struct modldrv modldrv = {
425 	&mod_driverops,			/* Type of module - driver */
426 	FCIP_NAME,			/* Name of module */
427 	&fcip_ops,			/* driver ops */
428 };
429 
430 static struct modlinkage modlinkage = {
431 	MODREV_1, (void *)&modldrv, NULL
432 };
433 
434 
435 /*
436  * Now for some global statics
437  */
438 static uint32_t	fcip_ub_nbufs = FCIP_UB_NBUFS;
439 static uint32_t fcip_ub_size = FCIP_UB_SIZE;
440 static int fcip_pkt_ttl_ticks = FCIP_PKT_TTL;
441 static int fcip_tick_incr = 1;
442 static int fcip_wait_cmds = FCIP_WAIT_CMDS;
443 static int fcip_num_attaching = 0;
444 static int fcip_port_attach_pending = 0;
445 static int fcip_create_nodes_on_demand = 1;	/* keep it similar to fcp */
446 static int fcip_cache_on_arp_broadcast = 0;
447 static int fcip_farp_supported = 0;
448 static int fcip_minor_node_created = 0;
449 
450 /*
451  * Supported FCAs
452  */
453 #define	QLC_PORT_1_ID_BITS		0x100
454 #define	QLC_PORT_2_ID_BITS		0x101
455 #define	QLC_PORT_NAA			0x2
456 #define	QLC_MODULE_NAME			"qlc"
457 #define	IS_QLC_PORT(port_dip)		\
458 			(strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\
459 			QLC_MODULE_NAME) == 0)
460 
461 
462 /*
463  * fcip softstate structures head.
464  */
465 
466 static void *fcip_softp = NULL;
467 
468 /*
469  * linked list of active (inuse) driver streams
470  */
471 
472 static int fcip_num_instances = 0;
473 static dev_info_t *fcip_module_dip = (dev_info_t *)0;
474 
475 
476 /*
477  * Ethernet broadcast address: Broadcast addressing in IP over fibre
478  * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN).
479  *
480  * The broadcast addressing varies for differing topologies a node may be in:
481  *	- On a private loop the ARP broadcast is a class 3 sequence sent
482  *	  using OPNfr (Open Broadcast Replicate primitive) followed by
483  *	  the ARP frame to D_ID 0xFFFFFF
484  *
485  *	- On a public Loop the broadcast sequence is sent to AL_PA 0x00
486  *	  (no OPNfr primitive).
487  *
488  *	- For direct attach and point to point topologies we just send
489  *	  the frame to D_ID 0xFFFFFF
490  *
491  * For public loop the handling would probably be different - for now
492  * I'll just declare this struct - It can be deleted if not necessary.
493  *
494  */
495 
496 
497 /*
498  * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is
499  * returned as a part of an  DL_INFO_ACK message which is a M_PCPROTO message
500  * returned in response to a DL_INFO_REQ message sent to us from a DLS user
501  * Let us fake an ether header as much as possible.
502  *
503  * dl_addr_length is the Provider's DLSAP addr which is SAP addr +
504  *                Physical addr of the provider. We set this to
505  *                ushort_t + sizeof (la_wwn_t) for Fibre Channel ports.
506  * dl_mac_type    Lets just use DL_ETHER - we can try using DL_IPFC, a new
507  *		  dlpi.h define later.
508  * dl_sap_length  -2 indicating the SAP address follows the Physical addr
509  *		  component in the DLSAP addr.
510  * dl_service_mode: DLCLDS - connectionless data link service.
511  *
512  */
513 
514 static dl_info_ack_t fcip_infoack = {
515 	DL_INFO_ACK,				/* dl_primitive */
516 	FCIPMTU,				/* dl_max_sdu */
517 	0,					/* dl_min_sdu */
518 	FCIPADDRL,				/* dl_addr_length */
519 	DL_ETHER,				/* dl_mac_type */
520 	0,					/* dl_reserved */
521 	0,					/* dl_current_state */
522 	-2,					/* dl_sap_length */
523 	DL_CLDLS,				/* dl_service_mode */
524 	0,					/* dl_qos_length */
525 	0,					/* dl_qos_offset */
526 	0,					/* dl_range_length */
527 	0,					/* dl_range_offset */
528 	DL_STYLE2,				/* dl_provider_style */
529 	sizeof (dl_info_ack_t),			/* dl_addr_offset */
530 	DL_VERSION_2,				/* dl_version */
531 	ETHERADDRL,				/* dl_brdcst_addr_length */
532 	sizeof (dl_info_ack_t) + FCIPADDRL,	/* dl_brdcst_addr_offset */
533 	0					/* dl_growth */
534 };
535 
536 /*
537  * FCIP broadcast address definition.
538  */
539 static	struct ether_addr	fcipnhbroadcastaddr = {
540 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
541 };
542 
543 /*
544  * RFC2625 requires the broadcast ARP address in the ARP data payload to
545  * be set to 0x00 00 00 00 00 00 for ARP broadcast packets
546  */
547 static	struct ether_addr	fcip_arpbroadcast_addr = {
548 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
549 };
550 
551 
552 #define	ether_bcopy(src, dest)	bcopy((src), (dest), ETHERADDRL);
553 
554 /*
555  * global kernel locks
556  */
557 static kcondvar_t	fcip_global_cv;
558 static kmutex_t		fcip_global_mutex;
559 
560 /*
561  * fctl external defines
562  */
563 extern int fc_ulp_add(fc_ulp_modinfo_t *);
564 
565 /*
566  * fctl data structures
567  */
568 
569 #define	FCIP_REV	0x07
570 
571 /* linked list of port info structures */
572 static fcip_port_info_t *fcip_port_head = NULL;
573 
574 /* linked list of fcip structures */
575 static struct fcipstr	*fcipstrup = NULL;
576 static krwlock_t	fcipstruplock;
577 
578 
579 /*
580  * Module information structure. This structure gives the FC Transport modules
581  * information about an ULP that registers with it.
582  */
583 static fc_ulp_modinfo_t	fcip_modinfo = {
584 	0,			/* for xref checks? */
585 	FCTL_ULP_MODREV_4,	/* FCIP revision */
586 	FC_TYPE_IS8802_SNAP,	/* type 5 for SNAP encapsulated datagrams */
587 	FCIP_NAME,		/* module name as in the modldrv struct */
588 	0x0,			/* get all statec callbacks for now */
589 	fcip_port_attach,	/* port attach callback */
590 	fcip_port_detach,	/* port detach callback */
591 	fcip_port_ioctl,	/* port ioctl callback */
592 	fcip_els_cb,		/* els callback */
593 	fcip_data_cb,		/* data callback */
594 	fcip_statec_cb		/* state change callback */
595 };
596 
597 
598 /*
599  * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry
600  *
601  * ddi-forceattach=1;
602  *
603  * This will ensure that fp is loaded at bootup. No additional checks are needed
604  */
605 int
_init(void)606 _init(void)
607 {
608 	int	rval;
609 
610 	/*
611 	 * Initialize the mutexs used by port attach and other callbacks.
612 	 * The transport can call back into our port_attach_callback
613 	 * routine even before _init() completes and bad things can happen.
614 	 */
615 	mutex_init(&fcip_global_mutex, NULL, MUTEX_DRIVER, NULL);
616 	cv_init(&fcip_global_cv, NULL, CV_DRIVER, NULL);
617 	rw_init(&fcipstruplock, NULL, RW_DRIVER, NULL);
618 
619 	mutex_enter(&fcip_global_mutex);
620 	fcip_port_attach_pending = 1;
621 	mutex_exit(&fcip_global_mutex);
622 
623 	/*
624 	 * Now attempt to register fcip with the transport.
625 	 * If fc_ulp_add fails, fcip module will not be loaded.
626 	 */
627 	rval = fc_ulp_add(&fcip_modinfo);
628 	if (rval != FC_SUCCESS) {
629 		mutex_destroy(&fcip_global_mutex);
630 		cv_destroy(&fcip_global_cv);
631 		rw_destroy(&fcipstruplock);
632 		switch (rval) {
633 		case FC_ULP_SAMEMODULE:
634 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
635 			    "!fcip: module is already registered with"
636 			    " transport"));
637 			rval = EEXIST;
638 			break;
639 		case FC_ULP_SAMETYPE:
640 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
641 			    "!fcip: Another module of the same ULP type 0x%x"
642 			    " is already registered with the transport",
643 			    fcip_modinfo.ulp_type));
644 			rval = EEXIST;
645 			break;
646 		case FC_BADULP:
647 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
648 			    "!fcip: Current fcip version 0x%x does not match"
649 			    " fctl version",
650 			    fcip_modinfo.ulp_rev));
651 			rval = ENODEV;
652 			break;
653 		default:
654 			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
655 			    "!fcip: fc_ulp_add failed with status 0x%x", rval));
656 			rval = ENODEV;
657 			break;
658 		}
659 		return (rval);
660 	}
661 
662 	if ((rval = ddi_soft_state_init(&fcip_softp, sizeof (struct fcip),
663 			FCIP_NUM_INSTANCES)) != 0) {
664 		mutex_destroy(&fcip_global_mutex);
665 		cv_destroy(&fcip_global_cv);
666 		rw_destroy(&fcipstruplock);
667 		(void) fc_ulp_remove(&fcip_modinfo);
668 		return (rval);
669 	}
670 
671 	if ((rval = mod_install(&modlinkage)) != 0) {
672 		(void) fc_ulp_remove(&fcip_modinfo);
673 		mutex_destroy(&fcip_global_mutex);
674 		cv_destroy(&fcip_global_cv);
675 		rw_destroy(&fcipstruplock);
676 		ddi_soft_state_fini(&fcip_softp);
677 	}
678 	return (rval);
679 }
680 
681 /*
682  * Unload the port driver if this was the only ULP loaded and then
683  * deregister with the transport.
684  */
685 int
_fini(void)686 _fini(void)
687 {
688 	int	rval;
689 	int	rval1;
690 
691 	/*
692 	 * Do not permit the module to be unloaded before a port
693 	 * attach callback has happened.
694 	 */
695 	mutex_enter(&fcip_global_mutex);
696 	if (fcip_num_attaching || fcip_port_attach_pending) {
697 		mutex_exit(&fcip_global_mutex);
698 		return (EBUSY);
699 	}
700 	mutex_exit(&fcip_global_mutex);
701 
702 	if ((rval = mod_remove(&modlinkage)) != 0) {
703 		return (rval);
704 	}
705 
706 	/*
707 	 * unregister with the transport layer
708 	 */
709 	rval1 = fc_ulp_remove(&fcip_modinfo);
710 
711 	/*
712 	 * If the ULP was not registered with the transport, init should
713 	 * have failed. If transport has no knowledge of our existence
714 	 * we should simply bail out and succeed
715 	 */
716 #ifdef DEBUG
717 	if (rval1 == FC_BADULP) {
718 		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
719 		"fcip: ULP was never registered with the transport"));
720 		rval = ENODEV;
721 	} else if (rval1 == FC_BADTYPE) {
722 		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
723 			"fcip: No ULP of this type 0x%x was registered with "
724 			"transport", fcip_modinfo.ulp_type));
725 		rval = ENODEV;
726 	}
727 #endif /* DEBUG */
728 
729 	mutex_destroy(&fcip_global_mutex);
730 	rw_destroy(&fcipstruplock);
731 	cv_destroy(&fcip_global_cv);
732 	ddi_soft_state_fini(&fcip_softp);
733 
734 	return (rval);
735 }
736 
737 /*
738  * Info about this loadable module
739  */
740 int
_info(struct modinfo * modinfop)741 _info(struct modinfo *modinfop)
742 {
743 	return (mod_info(&modlinkage, modinfop));
744 }
745 
746 /*
747  * The port attach callback is invoked by the port driver when a FCA
748  * port comes online and binds with the transport layer. The transport
749  * then callsback into all ULP modules registered with it. The Port attach
750  * call back will also provide the ULP module with the Port's WWN and S_ID
751  */
752 /* ARGSUSED */
753 static int
fcip_port_attach(opaque_t ulp_handle,fc_ulp_port_info_t * port_info,fc_attach_cmd_t cmd,uint32_t sid)754 fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
755     fc_attach_cmd_t cmd, uint32_t sid)
756 {
757 	int 			rval = FC_FAILURE;
758 	int 			instance;
759 	struct fcip		*fptr;
760 	fcip_port_info_t	*fport = NULL;
761 	fcip_port_info_t	*cur_fport;
762 	fc_portid_t		src_id;
763 
764 	switch (cmd) {
765 	case FC_CMD_ATTACH: {
766 		la_wwn_t	*ww_pn = NULL;
767 		/*
768 		 * It was determined that, as per spec, the lower 48 bits of
769 		 * the port-WWN will always be unique. This will make the MAC
770 		 * address (i.e the lower 48 bits of the WWN), that IP/ARP
771 		 * depend on, unique too. Hence we should be able to remove the
772 		 * restriction of attaching to only one of the ports of
773 		 * multi port FCAs.
774 		 *
775 		 * Earlier, fcip used to attach only to qlc module and fail
776 		 * silently for attach failures resulting from unknown FCAs or
777 		 * unsupported FCA ports. Now, we'll do no such checks.
778 		 */
779 		ww_pn = &port_info->port_pwwn;
780 
781 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
782 		    "port id bits: 0x%x", ww_pn->w.nport_id));
783 		/*
784 		 * A port has come online
785 		 */
786 		mutex_enter(&fcip_global_mutex);
787 		fcip_num_instances++;
788 		fcip_num_attaching++;
789 
790 		if (fcip_port_head == NULL) {
791 			/* OK to sleep here ? */
792 			fport = kmem_zalloc(sizeof (fcip_port_info_t),
793 						KM_NOSLEEP);
794 			if (fport == NULL) {
795 				fcip_num_instances--;
796 				fcip_num_attaching--;
797 				ASSERT(fcip_num_attaching >= 0);
798 				mutex_exit(&fcip_global_mutex);
799 				rval = FC_FAILURE;
800 				cmn_err(CE_WARN, "!fcip(%d): port attach "
801 				    "failed: alloc failed",
802 				    ddi_get_instance(port_info->port_dip));
803 				goto done;
804 			}
805 			fcip_port_head = fport;
806 		} else {
807 			/*
808 			 * traverse the port list and also check for
809 			 * duplicate port attaches - Nothing wrong in being
810 			 * paranoid Heh Heh.
811 			 */
812 			cur_fport = fcip_port_head;
813 			while (cur_fport != NULL) {
814 				if (cur_fport->fcipp_handle ==
815 				    port_info->port_handle) {
816 					fcip_num_instances--;
817 					fcip_num_attaching--;
818 					ASSERT(fcip_num_attaching >= 0);
819 					mutex_exit(&fcip_global_mutex);
820 					FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
821 					    "!fcip(%d): port already "
822 					    "attached!!", ddi_get_instance(
823 					    port_info->port_dip)));
824 					rval = FC_FAILURE;
825 					goto done;
826 				}
827 				cur_fport = cur_fport->fcipp_next;
828 			}
829 			fport = kmem_zalloc(sizeof (fcip_port_info_t),
830 						KM_NOSLEEP);
831 			if (fport == NULL) {
832 				rval = FC_FAILURE;
833 				fcip_num_instances--;
834 				fcip_num_attaching--;
835 				ASSERT(fcip_num_attaching >= 0);
836 				mutex_exit(&fcip_global_mutex);
837 				cmn_err(CE_WARN, "!fcip(%d): port attach "
838 				    "failed: alloc failed",
839 				    ddi_get_instance(port_info->port_dip));
840 				goto done;
841 			}
842 			fport->fcipp_next = fcip_port_head;
843 			fcip_port_head = fport;
844 		}
845 
846 		mutex_exit(&fcip_global_mutex);
847 
848 		/*
849 		 * now fill in the details about the port itself
850 		 */
851 		fport->fcipp_linkage = *port_info->port_linkage;
852 		fport->fcipp_handle = port_info->port_handle;
853 		fport->fcipp_dip = port_info->port_dip;
854 		fport->fcipp_topology = port_info->port_flags;
855 		fport->fcipp_pstate = port_info->port_state;
856 		fport->fcipp_naa = port_info->port_pwwn.w.naa_id;
857 		bcopy(&port_info->port_pwwn, &fport->fcipp_pwwn,
858 		    sizeof (la_wwn_t));
859 		bcopy(&port_info->port_nwwn, &fport->fcipp_nwwn,
860 		    sizeof (la_wwn_t));
861 		fport->fcipp_fca_pkt_size = port_info->port_fca_pkt_size;
862 		fport->fcipp_cmd_dma_attr = *port_info->port_cmd_dma_attr;
863 		fport->fcipp_resp_dma_attr = *port_info->port_resp_dma_attr;
864 		fport->fcipp_fca_acc_attr = *port_info->port_acc_attr;
865 		src_id.port_id = sid;
866 		src_id.priv_lilp_posit = 0;
867 		fport->fcipp_sid = src_id;
868 
869 		/*
870 		 * allocate soft state for this instance
871 		 */
872 		instance = ddi_get_instance(fport->fcipp_dip);
873 		if (ddi_soft_state_zalloc(fcip_softp,
874 		    instance) != DDI_SUCCESS) {
875 			rval = FC_FAILURE;
876 			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
877 			    "soft state alloc failed", instance);
878 			goto failure;
879 		}
880 
881 		fptr = ddi_get_soft_state(fcip_softp, instance);
882 
883 		if (fptr == NULL) {
884 			rval = FC_FAILURE;
885 			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
886 			    "failure to get soft state", instance);
887 			goto failure;
888 		}
889 
890 		/*
891 		 * initialize all mutexes and locks required for this module
892 		 */
893 		mutex_init(&fptr->fcip_mutex, NULL, MUTEX_DRIVER, NULL);
894 		mutex_init(&fptr->fcip_ub_mutex, NULL, MUTEX_DRIVER, NULL);
895 		mutex_init(&fptr->fcip_rt_mutex, NULL, MUTEX_DRIVER, NULL);
896 		mutex_init(&fptr->fcip_dest_mutex, NULL, MUTEX_DRIVER, NULL);
897 		mutex_init(&fptr->fcip_sendup_mutex, NULL, MUTEX_DRIVER, NULL);
898 		cv_init(&fptr->fcip_farp_cv, NULL, CV_DRIVER, NULL);
899 		cv_init(&fptr->fcip_sendup_cv, NULL, CV_DRIVER, NULL);
900 		cv_init(&fptr->fcip_ub_cv, NULL, CV_DRIVER, NULL);
901 
902 		mutex_enter(&fptr->fcip_mutex);
903 
904 		fptr->fcip_dip = fport->fcipp_dip;	/* parent's dip */
905 		fptr->fcip_instance = instance;
906 		fptr->fcip_ub_upstream = 0;
907 
908 		if (FC_PORT_STATE_MASK(port_info->port_state) ==
909 		    FC_STATE_ONLINE) {
910 			fptr->fcip_port_state = FCIP_PORT_ONLINE;
911 			if (fptr->fcip_flags & FCIP_LINK_DOWN) {
912 				fptr->fcip_flags &= ~FCIP_LINK_DOWN;
913 			}
914 		} else {
915 			fptr->fcip_port_state = FCIP_PORT_OFFLINE;
916 		}
917 
918 		fptr->fcip_flags |= FCIP_ATTACHING;
919 		fptr->fcip_port_info = fport;
920 
921 		/*
922 		 * Extract our MAC addr from our port's WWN. The lower 48
923 		 * bits will be our MAC address
924 		 */
925 		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
926 
927 		fport->fcipp_fcip = fptr;
928 
929 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
930 		    (CE_NOTE, "fcipdest : 0x%lx, rtable : 0x%lx",
931 		    (long)(sizeof (fptr->fcip_dest)),
932 		    (long)(sizeof (fptr->fcip_rtable))));
933 
934 		bzero(fptr->fcip_dest, sizeof (fptr->fcip_dest));
935 		bzero(fptr->fcip_rtable, sizeof (fptr->fcip_rtable));
936 
937 		/*
938 		 * create a taskq to handle sundry jobs for the driver
939 		 * This way we can have jobs run in parallel
940 		 */
941 		fptr->fcip_tq = taskq_create("fcip_tasks",
942 		    FCIP_NUM_THREADS, MINCLSYSPRI, FCIP_MIN_TASKS,
943 		    FCIP_MAX_TASKS, TASKQ_PREPOPULATE);
944 
945 		mutex_exit(&fptr->fcip_mutex);
946 
947 		/*
948 		 * create a separate thread to handle all unsolicited
949 		 * callback handling. This is because unsolicited_callback
950 		 * can happen from an interrupt context and the upstream
951 		 * modules can put new messages right back in the same
952 		 * thread context. This usually works fine, but sometimes
953 		 * we may have to block to obtain the dest struct entries
954 		 * for some remote ports.
955 		 */
956 		mutex_enter(&fptr->fcip_sendup_mutex);
957 		if (thread_create(NULL, DEFAULTSTKSZ,
958 		    (void (*)())fcip_sendup_thr, (caddr_t)fptr, 0, &p0,
959 		    TS_RUN, minclsyspri) == NULL) {
960 			mutex_exit(&fptr->fcip_sendup_mutex);
961 			cmn_err(CE_WARN,
962 			    "!unable to create fcip sendup thread for "
963 			    " instance: 0x%x", instance);
964 			rval = FC_FAILURE;
965 			goto done;
966 		}
967 		fptr->fcip_sendup_thr_initted = 1;
968 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
969 		mutex_exit(&fptr->fcip_sendup_mutex);
970 
971 
972 		/* Let the attach handler do the rest */
973 		if (fcip_port_attach_handler(fptr) != FC_SUCCESS) {
974 			/*
975 			 * We have already cleaned up so return
976 			 */
977 			rval = FC_FAILURE;
978 			cmn_err(CE_WARN, "!fcip(%d): port attach failed",
979 			    instance);
980 			goto done;
981 		}
982 
983 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_CONT,
984 		    "!fcip attach for port instance (0x%x) successful",
985 		    instance));
986 
987 		rval = FC_SUCCESS;
988 		goto done;
989 	}
990 	case FC_CMD_POWER_UP:
991 	/* FALLTHROUGH */
992 	case FC_CMD_RESUME:
993 		mutex_enter(&fcip_global_mutex);
994 		fport = fcip_port_head;
995 		while (fport != NULL) {
996 			if (fport->fcipp_handle == port_info->port_handle) {
997 				break;
998 			}
999 			fport = fport->fcipp_next;
1000 		}
1001 		if (fport == NULL) {
1002 			rval = FC_SUCCESS;
1003 			mutex_exit(&fcip_global_mutex);
1004 			goto done;
1005 		}
1006 		rval = fcip_handle_resume(fport, port_info, cmd);
1007 		mutex_exit(&fcip_global_mutex);
1008 		goto done;
1009 
1010 	default:
1011 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1012 		    "unknown cmd type 0x%x in port_attach", cmd));
1013 		rval = FC_FAILURE;
1014 		goto done;
1015 	}
1016 
1017 failure:
1018 	if (fport) {
1019 		mutex_enter(&fcip_global_mutex);
1020 		fcip_num_attaching--;
1021 		ASSERT(fcip_num_attaching >= 0);
1022 		(void) fcip_softstate_free(fport);
1023 		fcip_port_attach_pending = 0;
1024 		mutex_exit(&fcip_global_mutex);
1025 	}
1026 	return (rval);
1027 
1028 done:
1029 	mutex_enter(&fcip_global_mutex);
1030 	fcip_port_attach_pending = 0;
1031 	mutex_exit(&fcip_global_mutex);
1032 	return (rval);
1033 }
1034 
1035 /*
1036  * fcip_port_attach_handler : Completes the port attach operation after
1037  * the ulp_port_attach routine has completed its ground work. The job
1038  * of this function among other things is to obtain and handle topology
1039  * specifics, initialize a port, setup broadcast address entries in
1040  * the fcip tables etc. This routine cleans up behind itself on failures.
1041  * Returns FC_SUCCESS or FC_FAILURE.
1042  */
1043 static int
fcip_port_attach_handler(struct fcip * fptr)1044 fcip_port_attach_handler(struct fcip *fptr)
1045 {
1046 	fcip_port_info_t		*fport = fptr->fcip_port_info;
1047 	int				rval = FC_FAILURE;
1048 
1049 	ASSERT(fport != NULL);
1050 
1051 	mutex_enter(&fcip_global_mutex);
1052 
1053 	FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1054 	    "fcip module dip: %p instance: %d",
1055 	    (void *)fcip_module_dip, ddi_get_instance(fptr->fcip_dip)));
1056 
1057 	if (fcip_module_dip == NULL) {
1058 		clock_t		fcip_lbolt;
1059 
1060 		fcip_lbolt = ddi_get_lbolt();
1061 		/*
1062 		 * we need to use the fcip devinfo for creating
1063 		 * the clone device node, but the fcip attach
1064 		 * (from its conf file entry claiming to be a
1065 		 * child of pseudo) may not have happened yet.
1066 		 * wait here for 10 seconds and fail port attach
1067 		 * if the fcip devinfo is not attached yet
1068 		 */
1069 		fcip_lbolt += drv_usectohz(FCIP_INIT_DELAY);
1070 
1071 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1072 		    (CE_WARN, "cv_timedwait lbolt %lx", fcip_lbolt));
1073 
1074 		(void) cv_timedwait(&fcip_global_cv, &fcip_global_mutex,
1075 		    fcip_lbolt);
1076 
1077 		if (fcip_module_dip == NULL) {
1078 			mutex_exit(&fcip_global_mutex);
1079 
1080 			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1081 				"fcip attach did not happen"));
1082 			goto port_attach_cleanup;
1083 		}
1084 	}
1085 
1086 	if ((!fcip_minor_node_created) &&
1087 	    fcip_is_supported_fc_topology(fport->fcipp_topology)) {
1088 		/*
1089 		 * Checking for same topologies which are considered valid
1090 		 * by fcip_handle_topology(). Dont create a minor node if
1091 		 * nothing is hanging off the FC port.
1092 		 */
1093 		if (ddi_create_minor_node(fcip_module_dip, "fcip", S_IFCHR,
1094 		    ddi_get_instance(fptr->fcip_dip), DDI_PSEUDO,
1095 		    CLONE_DEV) == DDI_FAILURE) {
1096 			mutex_exit(&fcip_global_mutex);
1097 			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1098 			    "failed to create minor node for fcip(%d)",
1099 			    ddi_get_instance(fptr->fcip_dip)));
1100 			goto port_attach_cleanup;
1101 		}
1102 		fcip_minor_node_created++;
1103 	}
1104 	mutex_exit(&fcip_global_mutex);
1105 
1106 	/*
1107 	 * initialize port for traffic
1108 	 */
1109 	if (fcip_init_port(fptr) != FC_SUCCESS) {
1110 		/* fcip_init_port has already cleaned up its stuff */
1111 
1112 		mutex_enter(&fcip_global_mutex);
1113 
1114 		if ((fcip_num_instances == 1) &&
1115 		    (fcip_minor_node_created == 1)) {
1116 			/* Remove minor node iff this is the last instance */
1117 			ddi_remove_minor_node(fcip_module_dip, NULL);
1118 		}
1119 
1120 		mutex_exit(&fcip_global_mutex);
1121 
1122 		goto port_attach_cleanup;
1123 	}
1124 
1125 	mutex_enter(&fptr->fcip_mutex);
1126 	fptr->fcip_flags &= ~FCIP_ATTACHING;
1127 	fptr->fcip_flags |= FCIP_INITED;
1128 	fptr->fcip_timeout_ticks = 0;
1129 
1130 	/*
1131 	 * start the timeout threads
1132 	 */
1133 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1134 	    drv_usectohz(1000000));
1135 
1136 	mutex_exit(&fptr->fcip_mutex);
1137 	mutex_enter(&fcip_global_mutex);
1138 	fcip_num_attaching--;
1139 	ASSERT(fcip_num_attaching >= 0);
1140 	mutex_exit(&fcip_global_mutex);
1141 	rval = FC_SUCCESS;
1142 	return (rval);
1143 
1144 port_attach_cleanup:
1145 	mutex_enter(&fcip_global_mutex);
1146 	(void) fcip_softstate_free(fport);
1147 	fcip_num_attaching--;
1148 	ASSERT(fcip_num_attaching >= 0);
1149 	mutex_exit(&fcip_global_mutex);
1150 	rval = FC_FAILURE;
1151 	return (rval);
1152 }
1153 
1154 
1155 /*
1156  * Handler for DDI_RESUME operations. Port must be ready to restart IP
1157  * traffic on resume
1158  */
1159 static int
fcip_handle_resume(fcip_port_info_t * fport,fc_ulp_port_info_t * port_info,fc_attach_cmd_t cmd)1160 fcip_handle_resume(fcip_port_info_t *fport, fc_ulp_port_info_t *port_info,
1161     fc_attach_cmd_t cmd)
1162 {
1163 	int 		rval = FC_SUCCESS;
1164 	struct fcip	*fptr = fport->fcipp_fcip;
1165 	struct fcipstr	*tslp;
1166 	int		index;
1167 
1168 
1169 	ASSERT(fptr != NULL);
1170 
1171 	mutex_enter(&fptr->fcip_mutex);
1172 
1173 	if (cmd == FC_CMD_POWER_UP) {
1174 		fptr->fcip_flags &= ~(FCIP_POWER_DOWN);
1175 		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1176 			mutex_exit(&fptr->fcip_mutex);
1177 			return (FC_SUCCESS);
1178 		}
1179 	} else if (cmd == FC_CMD_RESUME) {
1180 		fptr->fcip_flags &= ~(FCIP_SUSPENDED);
1181 	} else {
1182 		mutex_exit(&fptr->fcip_mutex);
1183 		return (FC_FAILURE);
1184 	}
1185 
1186 	/*
1187 	 * set the current port state and topology
1188 	 */
1189 	fport->fcipp_topology = port_info->port_flags;
1190 	fport->fcipp_pstate = port_info->port_state;
1191 
1192 	rw_enter(&fcipstruplock, RW_READER);
1193 	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1194 		if (tslp->sl_fcip == fptr) {
1195 			break;
1196 		}
1197 	}
1198 	rw_exit(&fcipstruplock);
1199 
1200 	/*
1201 	 * No active streams on this port
1202 	 */
1203 	if (tslp == NULL) {
1204 		rval = FC_SUCCESS;
1205 		goto done;
1206 	}
1207 
1208 	mutex_enter(&fptr->fcip_rt_mutex);
1209 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1210 		struct fcip_routing_table 	*frp;
1211 
1212 		frp = fptr->fcip_rtable[index];
1213 		while (frp) {
1214 			uint32_t		did;
1215 			/*
1216 			 * Mark the broadcast RTE available again. It
1217 			 * was marked SUSPENDED during SUSPEND.
1218 			 */
1219 			did = fcip_get_broadcast_did(fptr);
1220 			if (frp->fcipr_d_id.port_id == did) {
1221 				frp->fcipr_state = 0;
1222 				index = FCIP_RT_HASH_ELEMS;
1223 				break;
1224 			}
1225 			frp = frp->fcipr_next;
1226 		}
1227 	}
1228 	mutex_exit(&fptr->fcip_rt_mutex);
1229 
1230 	/*
1231 	 * fcip_handle_topology will update the port entries in the
1232 	 * routing table.
1233 	 * fcip_handle_topology also takes care of resetting the
1234 	 * fcipr_state field in the routing table structure. The entries
1235 	 * were set to RT_INVALID during suspend.
1236 	 */
1237 	fcip_handle_topology(fptr);
1238 
1239 done:
1240 	/*
1241 	 * Restart the timeout thread
1242 	 */
1243 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1244 	    drv_usectohz(1000000));
1245 	mutex_exit(&fptr->fcip_mutex);
1246 	return (rval);
1247 }
1248 
1249 
1250 /*
1251  * Insert a destination port entry into the routing table for
1252  * this port
1253  */
1254 static void
fcip_rt_update(struct fcip * fptr,fc_portmap_t * devlist,uint32_t listlen)1255 fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, uint32_t listlen)
1256 {
1257 	struct fcip_routing_table	*frp;
1258 	fcip_port_info_t		*fport = fptr->fcip_port_info;
1259 	int				hash_bucket, i;
1260 	fc_portmap_t			*pmap;
1261 	char				wwn_buf[20];
1262 
1263 	ASSERT(!mutex_owned(&fptr->fcip_mutex));
1264 	mutex_enter(&fptr->fcip_rt_mutex);
1265 
1266 	for (i = 0; i < listlen; i++) {
1267 		pmap = &(devlist[i]);
1268 
1269 		frp = fcip_lookup_rtable(fptr, &(pmap->map_pwwn),
1270 		    FCIP_COMPARE_PWWN);
1271 		/*
1272 		 * If an entry for a port in the devlist exists in the
1273 		 * in the per port routing table, make sure the data
1274 		 * is current. We need to do this irrespective of the
1275 		 * underlying port topology.
1276 		 */
1277 		switch (pmap->map_type) {
1278 		/* FALLTHROUGH */
1279 		case PORT_DEVICE_NOCHANGE:
1280 		/* FALLTHROUGH */
1281 		case PORT_DEVICE_USER_LOGIN:
1282 		/* FALLTHROUGH */
1283 		case PORT_DEVICE_CHANGED:
1284 		/* FALLTHROUGH */
1285 		case PORT_DEVICE_NEW:
1286 			if (frp == NULL) {
1287 				goto add_new_entry;
1288 			} else if (frp) {
1289 				goto update_entry;
1290 			} else {
1291 				continue;
1292 			}
1293 
1294 		case PORT_DEVICE_OLD:
1295 		/* FALLTHROUGH */
1296 		case PORT_DEVICE_USER_LOGOUT:
1297 			/*
1298 			 * Mark entry for removal from Routing Table if
1299 			 * one exists. Let the timeout thread actually
1300 			 * remove the entry after we've given up hopes
1301 			 * of the port ever showing up.
1302 			 */
1303 			if (frp) {
1304 				uint32_t		did;
1305 
1306 				/*
1307 				 * Mark the routing table as invalid to bail
1308 				 * the packets early that are in transit
1309 				 */
1310 				did = fptr->fcip_broadcast_did;
1311 				if (frp->fcipr_d_id.port_id != did) {
1312 					frp->fcipr_pd = NULL;
1313 					frp->fcipr_state = FCIP_RT_INVALID;
1314 					frp->fcipr_invalid_timeout =
1315 					    fptr->fcip_timeout_ticks +
1316 					    FCIP_RTE_TIMEOUT;
1317 				}
1318 			}
1319 			continue;
1320 
1321 		default:
1322 			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
1323 			    "unknown map flags in rt_update"));
1324 			continue;
1325 		}
1326 add_new_entry:
1327 		ASSERT(frp == NULL);
1328 		hash_bucket = FCIP_RT_HASH(pmap->map_pwwn.raw_wwn);
1329 
1330 		ASSERT(hash_bucket < FCIP_RT_HASH_ELEMS);
1331 
1332 		frp = (struct fcip_routing_table *)
1333 		    kmem_zalloc(sizeof (struct fcip_routing_table), KM_SLEEP);
1334 		/* insert at beginning of hash bucket */
1335 		frp->fcipr_next = fptr->fcip_rtable[hash_bucket];
1336 		fptr->fcip_rtable[hash_bucket] = frp;
1337 		fc_wwn_to_str(&pmap->map_pwwn, wwn_buf);
1338 		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1339 		    "added entry for pwwn %s and d_id 0x%x",
1340 		    wwn_buf, pmap->map_did.port_id));
1341 update_entry:
1342 		bcopy((void *)&pmap->map_pwwn,
1343 		    (void *)&frp->fcipr_pwwn, sizeof (la_wwn_t));
1344 		bcopy((void *)&pmap->map_nwwn, (void *)&frp->fcipr_nwwn,
1345 		    sizeof (la_wwn_t));
1346 		frp->fcipr_d_id = pmap->map_did;
1347 		frp->fcipr_state = pmap->map_state;
1348 		frp->fcipr_pd = pmap->map_pd;
1349 
1350 		/*
1351 		 * If there is no pd for a destination port that is not
1352 		 * a broadcast entry, the port is pretty much unusable - so
1353 		 * mark the port for removal so we can try adding back the
1354 		 * entry again.
1355 		 */
1356 		if ((frp->fcipr_pd == NULL) &&
1357 		    (frp->fcipr_d_id.port_id != fptr->fcip_broadcast_did)) {
1358 			frp->fcipr_state = PORT_DEVICE_INVALID;
1359 			frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
1360 			    (FCIP_RTE_TIMEOUT / 2);
1361 		}
1362 		frp->fcipr_fca_dev =
1363 		    fc_ulp_get_fca_device(fport->fcipp_handle, pmap->map_did);
1364 
1365 		/*
1366 		 * login to the remote port. Don't worry about
1367 		 * plogi failures for now
1368 		 */
1369 		if (pmap->map_pd != NULL) {
1370 			(void) fcip_do_plogi(fptr, frp);
1371 		} else if (FC_TOP_EXTERNAL(fport->fcipp_topology)) {
1372 			fc_wwn_to_str(&frp->fcipr_pwwn, wwn_buf);
1373 			FCIP_DEBUG(FCIP_DEBUG_MISC, (CE_NOTE,
1374 			    "logging into pwwn %s, d_id 0x%x",
1375 			    wwn_buf, frp->fcipr_d_id.port_id));
1376 			(void) fcip_do_plogi(fptr, frp);
1377 		}
1378 
1379 	}
1380 	mutex_exit(&fptr->fcip_rt_mutex);
1381 }
1382 
1383 
1384 /*
1385  * return a matching routing table entry for a given fcip instance
1386  */
1387 struct fcip_routing_table *
fcip_lookup_rtable(struct fcip * fptr,la_wwn_t * wwn,int matchflag)1388 fcip_lookup_rtable(struct fcip *fptr, la_wwn_t *wwn, int matchflag)
1389 {
1390 	struct fcip_routing_table	*frp = NULL;
1391 	int				hash_bucket;
1392 
1393 	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
1394 
1395 	hash_bucket = FCIP_RT_HASH(wwn->raw_wwn);
1396 	frp = fptr->fcip_rtable[hash_bucket];
1397 	while (frp != NULL) {
1398 
1399 		if (fcip_wwn_compare(&frp->fcipr_pwwn, wwn, matchflag) == 0) {
1400 			break;
1401 		}
1402 
1403 		frp = frp->fcipr_next;
1404 	}
1405 	return (frp);
1406 }
1407 
1408 /*
1409  * Attach of fcip under pseudo. The actual setup of the interface
1410  * actually happens in fcip_port_attach on a callback from the
1411  * transport. The port_attach callback however can proceed only
1412  * after the devinfo for fcip has been created under pseudo
1413  */
1414 static int
fcip_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1415 fcip_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1416 {
1417 	switch ((int)cmd) {
1418 
1419 	case DDI_ATTACH: {
1420 		ASSERT(fcip_module_dip == NULL);
1421 		fcip_module_dip = dip;
1422 
1423 		/*
1424 		 * this call originates as a result of fcip's conf
1425 		 * file entry and will result in a fcip instance being
1426 		 * a child of pseudo. We should ensure here that the port
1427 		 * driver (fp) has been loaded and initted since we would
1428 		 * never get a port attach callback without fp being loaded.
1429 		 * If we are unable to succesfully load and initalize fp -
1430 		 * just fail this attach.
1431 		 */
1432 		mutex_enter(&fcip_global_mutex);
1433 
1434 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1435 		    (CE_WARN, "global cv - signaling"));
1436 
1437 		cv_signal(&fcip_global_cv);
1438 
1439 		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1440 		    (CE_WARN, "global cv - signaled"));
1441 		mutex_exit(&fcip_global_mutex);
1442 		return (DDI_SUCCESS);
1443 	}
1444 	case DDI_RESUME:
1445 		/*
1446 		 * Resume appears trickier
1447 		 */
1448 		return (DDI_SUCCESS);
1449 	default:
1450 		return (DDI_FAILURE);
1451 	}
1452 }
1453 
1454 
1455 /*
1456  * The detach entry point to permit unloading fcip. We make sure
1457  * there are no active streams before we proceed with the detach
1458  */
1459 /* ARGSUSED */
1460 static int
fcip_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1461 fcip_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1462 {
1463 	struct fcip		*fptr;
1464 	fcip_port_info_t	*fport;
1465 	int			detached;
1466 
1467 	switch (cmd) {
1468 	case DDI_DETACH: {
1469 		/*
1470 		 * If we got here, any active streams should have been
1471 		 * unplumbed but check anyway
1472 		 */
1473 		mutex_enter(&fcip_global_mutex);
1474 		if (fcipstrup != NULL) {
1475 			mutex_exit(&fcip_global_mutex);
1476 			return (DDI_FAILURE);
1477 		}
1478 
1479 		if (fcip_port_head != NULL) {
1480 			/*
1481 			 * Check to see if we have unattached/unbound
1482 			 * ports. If all the ports are unattached/unbound go
1483 			 * ahead and unregister with the transport
1484 			 */
1485 			fport = fcip_port_head;
1486 			while (fport != NULL) {
1487 				fptr = fport->fcipp_fcip;
1488 				if (fptr == NULL) {
1489 					continue;
1490 				}
1491 				mutex_enter(&fptr->fcip_mutex);
1492 				fptr->fcip_flags |= FCIP_DETACHING;
1493 				if (fptr->fcip_ipq ||
1494 				    fptr->fcip_flags & (FCIP_IN_TIMEOUT |
1495 				    FCIP_IN_CALLBACK | FCIP_ATTACHING |
1496 				    FCIP_SUSPENDED | FCIP_POWER_DOWN |
1497 				    FCIP_REG_INPROGRESS)) {
1498 					mutex_exit(&fptr->fcip_mutex);
1499 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1500 					    "fcip instance busy"));
1501 					break;
1502 				}
1503 				/*
1504 				 * Check for any outstanding pkts. If yes
1505 				 * fail the detach
1506 				 */
1507 				mutex_enter(&fptr->fcip_dest_mutex);
1508 				if (fcip_port_get_num_pkts(fptr) > 0) {
1509 					mutex_exit(&fptr->fcip_dest_mutex);
1510 					mutex_exit(&fptr->fcip_mutex);
1511 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1512 					    "fcip instance busy - pkts "
1513 					    "pending"));
1514 					break;
1515 				}
1516 				mutex_exit(&fptr->fcip_dest_mutex);
1517 
1518 				mutex_enter(&fptr->fcip_rt_mutex);
1519 				if (fcip_plogi_in_progress(fptr)) {
1520 					mutex_exit(&fptr->fcip_rt_mutex);
1521 					mutex_exit(&fptr->fcip_mutex);
1522 					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1523 					    "fcip instance busy - plogi in "
1524 					    "progress"));
1525 					break;
1526 				}
1527 				mutex_exit(&fptr->fcip_rt_mutex);
1528 
1529 				mutex_exit(&fptr->fcip_mutex);
1530 				fport = fport->fcipp_next;
1531 			}
1532 			/*
1533 			 * if fport is non NULL - we have active ports
1534 			 */
1535 			if (fport != NULL) {
1536 				/*
1537 				 * Remove the DETACHING flags on the ports
1538 				 */
1539 				fport = fcip_port_head;
1540 				while (fport != NULL) {
1541 					fptr = fport->fcipp_fcip;
1542 					mutex_enter(&fptr->fcip_mutex);
1543 					fptr->fcip_flags &= ~(FCIP_DETACHING);
1544 					mutex_exit(&fptr->fcip_mutex);
1545 					fport = fport->fcipp_next;
1546 				}
1547 				mutex_exit(&fcip_global_mutex);
1548 				return (DDI_FAILURE);
1549 			}
1550 		}
1551 
1552 		/*
1553 		 * free up all softstate structures
1554 		 */
1555 		fport = fcip_port_head;
1556 		while (fport != NULL) {
1557 			detached = 1;
1558 
1559 			fptr = fport->fcipp_fcip;
1560 			if (fptr) {
1561 				mutex_enter(&fptr->fcip_mutex);
1562 				/*
1563 				 * Check to see if somebody beat us to the
1564 				 * punch
1565 				 */
1566 				detached = fptr->fcip_flags & FCIP_DETACHED;
1567 				fptr->fcip_flags &= ~(FCIP_DETACHING);
1568 				fptr->fcip_flags |= FCIP_DETACHED;
1569 				mutex_exit(&fptr->fcip_mutex);
1570 			}
1571 
1572 			if (!detached) {
1573 				fport = fcip_softstate_free(fport);
1574 			} else {
1575 				/*
1576 				 * If the port was marked as detached
1577 				 * but it was still in the list, that
1578 				 * means another thread has marked it
1579 				 * but we got in while it released the
1580 				 * fcip_global_mutex in softstate_free.
1581 				 * Given that, we're still safe to use
1582 				 * fport->fcipp_next to find out what
1583 				 * the next port on the list is.
1584 				 */
1585 				fport = fport->fcipp_next;
1586 			}
1587 
1588 			FCIP_DEBUG(FCIP_DEBUG_DETACH,
1589 			    (CE_NOTE, "detaching port"));
1590 		}
1591 
1592 		/*
1593 		 * If we haven't removed all the port structures, we
1594 		 * aren't yet ready to be detached.
1595 		 */
1596 		if (fcip_port_head != NULL) {
1597 			mutex_exit(&fcip_global_mutex);
1598 			return (DDI_FAILURE);
1599 		}
1600 
1601 		fcip_num_instances = 0;
1602 		mutex_exit(&fcip_global_mutex);
1603 		fcip_module_dip = NULL;
1604 		return (DDI_SUCCESS);
1605 	}
1606 	case DDI_SUSPEND:
1607 		return (DDI_SUCCESS);
1608 	default:
1609 		return (DDI_FAILURE);
1610 	}
1611 }
1612 
1613 /*
1614  * The port_detach callback is called from the transport when a
1615  * FC port is being removed from the transport's control. This routine
1616  * provides fcip with an opportunity to cleanup all activities and
1617  * structures on the port marked for removal.
1618  */
1619 /* ARGSUSED */
1620 static int
fcip_port_detach(opaque_t ulp_handle,fc_ulp_port_info_t * port_info,fc_detach_cmd_t cmd)1621 fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
1622     fc_detach_cmd_t cmd)
1623 {
1624 	int 			rval = FC_FAILURE;
1625 	fcip_port_info_t	*fport;
1626 	struct fcip		*fptr;
1627 	struct fcipstr		*strp;
1628 
1629 	switch (cmd) {
1630 	case FC_CMD_DETACH: {
1631 		mutex_enter(&fcip_global_mutex);
1632 
1633 		if (fcip_port_head == NULL) {
1634 			/*
1635 			 * we are all done but our fini has not been
1636 			 * called yet!! Let's hope we have no active
1637 			 * fcip instances here. - strange secnario but
1638 			 * no harm in having this return a success.
1639 			 */
1640 			fcip_check_remove_minor_node();
1641 
1642 			mutex_exit(&fcip_global_mutex);
1643 			return (FC_SUCCESS);
1644 		} else {
1645 			/*
1646 			 * traverse the port list
1647 			 */
1648 			fport = fcip_port_head;
1649 			while (fport != NULL) {
1650 				if (fport->fcipp_handle ==
1651 				    port_info->port_handle) {
1652 					fptr = fport->fcipp_fcip;
1653 
1654 					/*
1655 					 * Fail the port detach if there is
1656 					 * still an attached, bound stream on
1657 					 * this interface.
1658 					 */
1659 
1660 					rw_enter(&fcipstruplock, RW_READER);
1661 
1662 					for (strp = fcipstrup; strp != NULL;
1663 					    strp = strp->sl_nextp) {
1664 						if (strp->sl_fcip == fptr) {
1665 							rw_exit(&fcipstruplock);
1666 							mutex_exit(
1667 							    &fcip_global_mutex);
1668 							return (FC_FAILURE);
1669 						}
1670 					}
1671 
1672 					rw_exit(&fcipstruplock);
1673 
1674 					/*
1675 					 * fail port detach if we are in
1676 					 * the middle of a deferred port attach
1677 					 * or if the port has outstanding pkts
1678 					 */
1679 					if (fptr != NULL) {
1680 						mutex_enter(&fptr->fcip_mutex);
1681 						if (fcip_check_port_busy
1682 						    (fptr) ||
1683 						    (fptr->fcip_flags &
1684 						    FCIP_DETACHED)) {
1685 							mutex_exit(
1686 							    &fptr->fcip_mutex);
1687 							mutex_exit(
1688 							    &fcip_global_mutex);
1689 							return (FC_FAILURE);
1690 						}
1691 
1692 						fptr->fcip_flags |=
1693 						    FCIP_DETACHED;
1694 						mutex_exit(&fptr->fcip_mutex);
1695 					}
1696 					(void) fcip_softstate_free(fport);
1697 
1698 					fcip_check_remove_minor_node();
1699 					mutex_exit(&fcip_global_mutex);
1700 					return (FC_SUCCESS);
1701 				}
1702 				fport = fport->fcipp_next;
1703 			}
1704 			ASSERT(fport == NULL);
1705 		}
1706 		mutex_exit(&fcip_global_mutex);
1707 		break;
1708 	}
1709 	case FC_CMD_POWER_DOWN:
1710 	/* FALLTHROUGH */
1711 	case FC_CMD_SUSPEND:
1712 		mutex_enter(&fcip_global_mutex);
1713 		fport = fcip_port_head;
1714 		while (fport != NULL) {
1715 			if (fport->fcipp_handle == port_info->port_handle) {
1716 				break;
1717 			}
1718 			fport = fport->fcipp_next;
1719 		}
1720 		if (fport == NULL) {
1721 			mutex_exit(&fcip_global_mutex);
1722 			break;
1723 		}
1724 		rval = fcip_handle_suspend(fport, cmd);
1725 		mutex_exit(&fcip_global_mutex);
1726 		break;
1727 	default:
1728 		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1729 		    (CE_WARN, "unknown port detach command!!"));
1730 		break;
1731 	}
1732 	return (rval);
1733 }
1734 
1735 
1736 /*
1737  * Returns 0 if the port is not busy, else returns non zero.
1738  */
1739 static int
fcip_check_port_busy(struct fcip * fptr)1740 fcip_check_port_busy(struct fcip *fptr)
1741 {
1742 	int rval = 0, num_pkts = 0;
1743 
1744 	ASSERT(fptr != NULL);
1745 	ASSERT(MUTEX_HELD(&fptr->fcip_mutex));
1746 
1747 	mutex_enter(&fptr->fcip_dest_mutex);
1748 
1749 	if (fptr->fcip_flags & FCIP_PORT_BUSY ||
1750 	    ((num_pkts = fcip_port_get_num_pkts(fptr)) > 0) ||
1751 	    fptr->fcip_num_ipkts_pending) {
1752 		rval = 1;
1753 		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1754 		    (CE_NOTE, "!fcip_check_port_busy: port is busy "
1755 		    "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!",
1756 		    fptr->fcip_flags, num_pkts, fptr->fcip_num_ipkts_pending));
1757 	}
1758 
1759 	mutex_exit(&fptr->fcip_dest_mutex);
1760 	return (rval);
1761 }
1762 
1763 /*
1764  * Helper routine to remove fcip's minor node
1765  * There is one minor node per system and it should be removed if there are no
1766  * other fcip instances (which has a 1:1 mapping for fp instances) present
1767  */
1768 static void
fcip_check_remove_minor_node(void)1769 fcip_check_remove_minor_node(void)
1770 {
1771 	ASSERT(MUTEX_HELD(&fcip_global_mutex));
1772 
1773 	/*
1774 	 * If there are no more fcip (fp) instances, remove the
1775 	 * minor node for fcip.
1776 	 * Reset fcip_minor_node_created to invalidate it.
1777 	 */
1778 	if (fcip_num_instances == 0 && (fcip_module_dip != NULL)) {
1779 		ddi_remove_minor_node(fcip_module_dip, NULL);
1780 		fcip_minor_node_created = 0;
1781 	}
1782 }
1783 
1784 /*
1785  * This routine permits the suspend operation during a CPR/System
1786  * power management operation. The routine basically quiesces I/Os
1787  * on all active interfaces
1788  */
1789 static int
fcip_handle_suspend(fcip_port_info_t * fport,fc_detach_cmd_t cmd)1790 fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd)
1791 {
1792 	struct fcip	*fptr = fport->fcipp_fcip;
1793 	timeout_id_t	tid;
1794 	int 		index;
1795 	int		tryagain = 0;
1796 	int		count;
1797 	struct fcipstr	*tslp;
1798 
1799 
1800 	ASSERT(fptr != NULL);
1801 	mutex_enter(&fptr->fcip_mutex);
1802 
1803 	/*
1804 	 * Fail if we are in the middle of a callback. Don't use delay during
1805 	 * suspend since clock intrs are not available so busy wait
1806 	 */
1807 	count = 0;
1808 	while (count++ < 15 &&
1809 	    ((fptr->fcip_flags & FCIP_IN_CALLBACK) ||
1810 	    (fptr->fcip_flags & FCIP_IN_TIMEOUT))) {
1811 		mutex_exit(&fptr->fcip_mutex);
1812 		drv_usecwait(1000000);
1813 		mutex_enter(&fptr->fcip_mutex);
1814 	}
1815 
1816 	if (fptr->fcip_flags & FCIP_IN_CALLBACK ||
1817 	    fptr->fcip_flags & FCIP_IN_TIMEOUT) {
1818 		mutex_exit(&fptr->fcip_mutex);
1819 		return (FC_FAILURE);
1820 	}
1821 
1822 	if (cmd == FC_CMD_POWER_DOWN) {
1823 		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1824 			fptr->fcip_flags |= FCIP_POWER_DOWN;
1825 			mutex_exit(&fptr->fcip_mutex);
1826 			goto success;
1827 		} else {
1828 			fptr->fcip_flags |= FCIP_POWER_DOWN;
1829 		}
1830 	} else if (cmd == FC_CMD_SUSPEND) {
1831 		fptr->fcip_flags |= FCIP_SUSPENDED;
1832 	} else {
1833 		mutex_exit(&fptr->fcip_mutex);
1834 		return (FC_FAILURE);
1835 	}
1836 
1837 	mutex_exit(&fptr->fcip_mutex);
1838 	/*
1839 	 * If no streams are plumbed - its the easiest case - Just
1840 	 * bail out without having to do much
1841 	 */
1842 
1843 	rw_enter(&fcipstruplock, RW_READER);
1844 	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1845 		if (tslp->sl_fcip == fptr) {
1846 			break;
1847 		}
1848 	}
1849 	rw_exit(&fcipstruplock);
1850 
1851 	/*
1852 	 * No active streams on this port
1853 	 */
1854 	if (tslp == NULL) {
1855 		goto success;
1856 	}
1857 
1858 	/*
1859 	 * Walk through each Routing table structure and check if
1860 	 * the destination table has any outstanding commands. If yes
1861 	 * wait for the commands to drain. Since we go through each
1862 	 * routing table entry in succession, it may be wise to wait
1863 	 * only a few seconds for each entry.
1864 	 */
1865 	mutex_enter(&fptr->fcip_rt_mutex);
1866 	while (!tryagain) {
1867 
1868 		tryagain = 0;
1869 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1870 			struct fcip_routing_table 	*frp;
1871 			struct fcip_dest 		*fdestp;
1872 			la_wwn_t			*pwwn;
1873 			int				hash_bucket;
1874 
1875 			frp = fptr->fcip_rtable[index];
1876 			while (frp) {
1877 				/*
1878 				 * Mark the routing table as SUSPENDED. Even
1879 				 * mark the broadcast entry SUSPENDED to
1880 				 * prevent any ARP or other broadcasts. We
1881 				 * can reset the state of the broadcast
1882 				 * RTE when we resume.
1883 				 */
1884 				frp->fcipr_state = FCIP_RT_SUSPENDED;
1885 				pwwn = &frp->fcipr_pwwn;
1886 
1887 				/*
1888 				 * Get hold of destination pointer
1889 				 */
1890 				mutex_enter(&fptr->fcip_dest_mutex);
1891 
1892 				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
1893 				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
1894 
1895 				fdestp = fptr->fcip_dest[hash_bucket];
1896 				while (fdestp != NULL) {
1897 					mutex_enter(&fdestp->fcipd_mutex);
1898 					if (fdestp->fcipd_rtable) {
1899 						if (fcip_wwn_compare(pwwn,
1900 						    &fdestp->fcipd_pwwn,
1901 						    FCIP_COMPARE_PWWN) == 0) {
1902 							mutex_exit(
1903 							&fdestp->fcipd_mutex);
1904 							break;
1905 						}
1906 					}
1907 					mutex_exit(&fdestp->fcipd_mutex);
1908 					fdestp = fdestp->fcipd_next;
1909 				}
1910 
1911 				mutex_exit(&fptr->fcip_dest_mutex);
1912 				if (fdestp == NULL) {
1913 					frp = frp->fcipr_next;
1914 					continue;
1915 				}
1916 
1917 				/*
1918 				 * Wait for fcip_wait_cmds seconds for
1919 				 * the commands to drain.
1920 				 */
1921 				count = 0;
1922 				mutex_enter(&fdestp->fcipd_mutex);
1923 				while (fdestp->fcipd_ncmds &&
1924 				    count < fcip_wait_cmds) {
1925 					mutex_exit(&fdestp->fcipd_mutex);
1926 					mutex_exit(&fptr->fcip_rt_mutex);
1927 					drv_usecwait(1000000);
1928 					mutex_enter(&fptr->fcip_rt_mutex);
1929 					mutex_enter(&fdestp->fcipd_mutex);
1930 					count++;
1931 				}
1932 				/*
1933 				 * Check if we were able to drain all cmds
1934 				 * successfully. Else continue with other
1935 				 * ports and try during the second pass
1936 				 */
1937 				if (fdestp->fcipd_ncmds) {
1938 					tryagain++;
1939 				}
1940 				mutex_exit(&fdestp->fcipd_mutex);
1941 
1942 				frp = frp->fcipr_next;
1943 			}
1944 		}
1945 		if (tryagain == 0) {
1946 			break;
1947 		}
1948 	}
1949 	mutex_exit(&fptr->fcip_rt_mutex);
1950 
1951 	if (tryagain) {
1952 		mutex_enter(&fptr->fcip_mutex);
1953 		fptr->fcip_flags &= ~(FCIP_SUSPENDED | FCIP_POWER_DOWN);
1954 		mutex_exit(&fptr->fcip_mutex);
1955 		return (FC_FAILURE);
1956 	}
1957 
1958 success:
1959 	mutex_enter(&fptr->fcip_mutex);
1960 	tid = fptr->fcip_timeout_id;
1961 	fptr->fcip_timeout_id = NULL;
1962 	mutex_exit(&fptr->fcip_mutex);
1963 
1964 	(void) untimeout(tid);
1965 
1966 	return (FC_SUCCESS);
1967 }
1968 
1969 /*
1970  * the getinfo(9E) entry point
1971  */
1972 /* ARGSUSED */
1973 static int
fcip_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)1974 fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1975 {
1976 	int rval = DDI_FAILURE;
1977 
1978 	switch (cmd) {
1979 	case DDI_INFO_DEVT2DEVINFO:
1980 		*result = fcip_module_dip;
1981 		if (*result)
1982 			rval = DDI_SUCCESS;
1983 		break;
1984 
1985 	case DDI_INFO_DEVT2INSTANCE:
1986 		*result = (void *)0;
1987 		rval = DDI_SUCCESS;
1988 		break;
1989 	default:
1990 		break;
1991 	}
1992 
1993 	return (rval);
1994 }
1995 
1996 /*
1997  * called from fcip_attach to initialize kstats for the link
1998  */
1999 /* ARGSUSED */
2000 static void
fcip_kstat_init(struct fcip * fptr)2001 fcip_kstat_init(struct fcip *fptr)
2002 {
2003 	int instance;
2004 	char buf[16];
2005 	struct fcipstat	*fcipstatp;
2006 
2007 	ASSERT(mutex_owned(&fptr->fcip_mutex));
2008 
2009 	instance = ddi_get_instance(fptr->fcip_dip);
2010 	(void) sprintf(buf, "fcip%d", instance);
2011 
2012 #ifdef	kstat
2013 	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2014 	    KSTAT_TYPE_NAMED,
2015 	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)),
2016 	    KSTAT_FLAG_PERSISTENT);
2017 #else
2018 	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2019 	    KSTAT_TYPE_NAMED,
2020 	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 0);
2021 #endif
2022 	if (fptr->fcip_kstatp == NULL) {
2023 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "kstat created failed"));
2024 		return;
2025 	}
2026 
2027 	fcipstatp = (struct  fcipstat *)fptr->fcip_kstatp->ks_data;
2028 	kstat_named_init(&fcipstatp->fcips_ipackets,	"ipackets",
2029 		KSTAT_DATA_ULONG);
2030 	kstat_named_init(&fcipstatp->fcips_ierrors,	"ierrors",
2031 		KSTAT_DATA_ULONG);
2032 	kstat_named_init(&fcipstatp->fcips_opackets,	"opackets",
2033 		KSTAT_DATA_ULONG);
2034 	kstat_named_init(&fcipstatp->fcips_oerrors,	"oerrors",
2035 		KSTAT_DATA_ULONG);
2036 	kstat_named_init(&fcipstatp->fcips_collisions,	"collisions",
2037 		KSTAT_DATA_ULONG);
2038 	kstat_named_init(&fcipstatp->fcips_nocanput,	"nocanput",
2039 		KSTAT_DATA_ULONG);
2040 	kstat_named_init(&fcipstatp->fcips_allocbfail,	"allocbfail",
2041 		KSTAT_DATA_ULONG);
2042 
2043 	kstat_named_init(&fcipstatp->fcips_defer, "defer",
2044 		KSTAT_DATA_ULONG);
2045 	kstat_named_init(&fcipstatp->fcips_fram, "fram",
2046 		KSTAT_DATA_ULONG);
2047 	kstat_named_init(&fcipstatp->fcips_crc, "crc",
2048 		KSTAT_DATA_ULONG);
2049 	kstat_named_init(&fcipstatp->fcips_oflo, "oflo",
2050 		KSTAT_DATA_ULONG);
2051 	kstat_named_init(&fcipstatp->fcips_uflo, "uflo",
2052 		KSTAT_DATA_ULONG);
2053 	kstat_named_init(&fcipstatp->fcips_missed, "missed",
2054 		KSTAT_DATA_ULONG);
2055 	kstat_named_init(&fcipstatp->fcips_tlcol, "tlcol",
2056 		KSTAT_DATA_ULONG);
2057 	kstat_named_init(&fcipstatp->fcips_trtry, "trtry",
2058 		KSTAT_DATA_ULONG);
2059 	kstat_named_init(&fcipstatp->fcips_tnocar, "tnocar",
2060 		KSTAT_DATA_ULONG);
2061 	kstat_named_init(&fcipstatp->fcips_inits, "inits",
2062 		KSTAT_DATA_ULONG);
2063 	kstat_named_init(&fcipstatp->fcips_notbufs, "notbufs",
2064 		KSTAT_DATA_ULONG);
2065 	kstat_named_init(&fcipstatp->fcips_norbufs, "norbufs",
2066 		KSTAT_DATA_ULONG);
2067 	kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail",
2068 		KSTAT_DATA_ULONG);
2069 
2070 	/*
2071 	 * required by kstat for MIB II objects(RFC 1213)
2072 	 */
2073 	kstat_named_init(&fcipstatp->fcips_rcvbytes, "fcips_rcvbytes",
2074 		KSTAT_DATA_ULONG);	/* # octets received */
2075 					/* MIB - ifInOctets */
2076 	kstat_named_init(&fcipstatp->fcips_xmtbytes, "fcips_xmtbytes",
2077 		KSTAT_DATA_ULONG);	/* # octets xmitted */
2078 					/* MIB - ifOutOctets */
2079 	kstat_named_init(&fcipstatp->fcips_multircv,	"fcips_multircv",
2080 		KSTAT_DATA_ULONG);	/* # multicast packets */
2081 					/* delivered to upper layer */
2082 					/* MIB - ifInNUcastPkts */
2083 	kstat_named_init(&fcipstatp->fcips_multixmt,	"fcips_multixmt",
2084 		KSTAT_DATA_ULONG);	/* # multicast packets */
2085 					/* requested to be sent */
2086 					/* MIB - ifOutNUcastPkts */
2087 	kstat_named_init(&fcipstatp->fcips_brdcstrcv, "fcips_brdcstrcv",
2088 		KSTAT_DATA_ULONG); /* # broadcast packets */
2089 					/* delivered to upper layer */
2090 					/* MIB - ifInNUcastPkts */
2091 	kstat_named_init(&fcipstatp->fcips_brdcstxmt, "fcips_brdcstxmt",
2092 		KSTAT_DATA_ULONG);	/* # broadcast packets */
2093 					/* requested to be sent */
2094 					/* MIB - ifOutNUcastPkts */
2095 	kstat_named_init(&fcipstatp->fcips_norcvbuf,	"fcips_norcvbuf",
2096 		KSTAT_DATA_ULONG);	/* # rcv packets discarded */
2097 					/* MIB - ifInDiscards */
2098 	kstat_named_init(&fcipstatp->fcips_noxmtbuf,	"fcips_noxmtbuf",
2099 		KSTAT_DATA_ULONG);	/* # xmt packets discarded */
2100 
2101 	fptr->fcip_kstatp->ks_update = fcip_stat_update;
2102 	fptr->fcip_kstatp->ks_private = (void *) fptr;
2103 	kstat_install(fptr->fcip_kstatp);
2104 }
2105 
2106 /*
2107  * Update the defined kstats for netstat et al to use
2108  */
2109 /* ARGSUSED */
2110 static int
fcip_stat_update(kstat_t * fcip_statp,int val)2111 fcip_stat_update(kstat_t *fcip_statp, int val)
2112 {
2113 	struct fcipstat	*fcipstatp;
2114 	struct fcip	*fptr;
2115 
2116 	fptr = (struct fcip *)fcip_statp->ks_private;
2117 	fcipstatp = (struct fcipstat *)fcip_statp->ks_data;
2118 
2119 	if (val == KSTAT_WRITE) {
2120 		fptr->fcip_ipackets	= fcipstatp->fcips_ipackets.value.ul;
2121 		fptr->fcip_ierrors	= fcipstatp->fcips_ierrors.value.ul;
2122 		fptr->fcip_opackets	= fcipstatp->fcips_opackets.value.ul;
2123 		fptr->fcip_oerrors	= fcipstatp->fcips_oerrors.value.ul;
2124 		fptr->fcip_collisions	= fcipstatp->fcips_collisions.value.ul;
2125 		fptr->fcip_defer	= fcipstatp->fcips_defer.value.ul;
2126 		fptr->fcip_fram	= fcipstatp->fcips_fram.value.ul;
2127 		fptr->fcip_crc	= fcipstatp->fcips_crc.value.ul;
2128 		fptr->fcip_oflo	= fcipstatp->fcips_oflo.value.ul;
2129 		fptr->fcip_uflo	= fcipstatp->fcips_uflo.value.ul;
2130 		fptr->fcip_missed	= fcipstatp->fcips_missed.value.ul;
2131 		fptr->fcip_tlcol	= fcipstatp->fcips_tlcol.value.ul;
2132 		fptr->fcip_trtry	= fcipstatp->fcips_trtry.value.ul;
2133 		fptr->fcip_tnocar	= fcipstatp->fcips_tnocar.value.ul;
2134 		fptr->fcip_inits	= fcipstatp->fcips_inits.value.ul;
2135 		fptr->fcip_notbufs	= fcipstatp->fcips_notbufs.value.ul;
2136 		fptr->fcip_norbufs	= fcipstatp->fcips_norbufs.value.ul;
2137 		fptr->fcip_nocanput	= fcipstatp->fcips_nocanput.value.ul;
2138 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2139 		fptr->fcip_rcvbytes	= fcipstatp->fcips_rcvbytes.value.ul;
2140 		fptr->fcip_xmtbytes	= fcipstatp->fcips_xmtbytes.value.ul;
2141 		fptr->fcip_multircv	= fcipstatp->fcips_multircv.value.ul;
2142 		fptr->fcip_multixmt	= fcipstatp->fcips_multixmt.value.ul;
2143 		fptr->fcip_brdcstrcv	= fcipstatp->fcips_brdcstrcv.value.ul;
2144 		fptr->fcip_norcvbuf	= fcipstatp->fcips_norcvbuf.value.ul;
2145 		fptr->fcip_noxmtbuf	= fcipstatp->fcips_noxmtbuf.value.ul;
2146 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2147 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2148 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2149 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2150 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2151 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2152 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2153 		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2154 
2155 	} else {
2156 		fcipstatp->fcips_ipackets.value.ul	= fptr->fcip_ipackets;
2157 		fcipstatp->fcips_ierrors.value.ul	= fptr->fcip_ierrors;
2158 		fcipstatp->fcips_opackets.value.ul	= fptr->fcip_opackets;
2159 		fcipstatp->fcips_oerrors.value.ul	= fptr->fcip_oerrors;
2160 		fcipstatp->fcips_collisions.value.ul	= fptr->fcip_collisions;
2161 		fcipstatp->fcips_nocanput.value.ul	= fptr->fcip_nocanput;
2162 		fcipstatp->fcips_allocbfail.value.ul	= fptr->fcip_allocbfail;
2163 		fcipstatp->fcips_defer.value.ul	= fptr->fcip_defer;
2164 		fcipstatp->fcips_fram.value.ul	= fptr->fcip_fram;
2165 		fcipstatp->fcips_crc.value.ul	= fptr->fcip_crc;
2166 		fcipstatp->fcips_oflo.value.ul	= fptr->fcip_oflo;
2167 		fcipstatp->fcips_uflo.value.ul	= fptr->fcip_uflo;
2168 		fcipstatp->fcips_missed.value.ul	= fptr->fcip_missed;
2169 		fcipstatp->fcips_tlcol.value.ul	= fptr->fcip_tlcol;
2170 		fcipstatp->fcips_trtry.value.ul	= fptr->fcip_trtry;
2171 		fcipstatp->fcips_tnocar.value.ul	= fptr->fcip_tnocar;
2172 		fcipstatp->fcips_inits.value.ul	= fptr->fcip_inits;
2173 		fcipstatp->fcips_norbufs.value.ul	= fptr->fcip_norbufs;
2174 		fcipstatp->fcips_notbufs.value.ul	= fptr->fcip_notbufs;
2175 		fcipstatp->fcips_rcvbytes.value.ul	= fptr->fcip_rcvbytes;
2176 		fcipstatp->fcips_xmtbytes.value.ul	= fptr->fcip_xmtbytes;
2177 		fcipstatp->fcips_multircv.value.ul	= fptr->fcip_multircv;
2178 		fcipstatp->fcips_multixmt.value.ul	= fptr->fcip_multixmt;
2179 		fcipstatp->fcips_brdcstrcv.value.ul	= fptr->fcip_brdcstrcv;
2180 		fcipstatp->fcips_brdcstxmt.value.ul	= fptr->fcip_brdcstxmt;
2181 		fcipstatp->fcips_norcvbuf.value.ul	= fptr->fcip_norcvbuf;
2182 		fcipstatp->fcips_noxmtbuf.value.ul	= fptr->fcip_noxmtbuf;
2183 
2184 	}
2185 	return (0);
2186 }
2187 
2188 
2189 /*
2190  * fcip_statec_cb: handles all required state change callback notifications
2191  * it receives from the transport
2192  */
2193 /* ARGSUSED */
2194 static void
fcip_statec_cb(opaque_t ulp_handle,opaque_t phandle,uint32_t port_state,uint32_t port_top,fc_portmap_t changelist[],uint32_t listlen,uint32_t sid)2195 fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
2196     uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
2197     uint32_t listlen, uint32_t sid)
2198 {
2199 	fcip_port_info_t	*fport;
2200 	struct fcip 		*fptr;
2201 	struct fcipstr		*slp;
2202 	queue_t			*wrq;
2203 	int			instance;
2204 	int 			index;
2205 	struct fcip_routing_table 	*frtp;
2206 
2207 	fport = fcip_get_port(phandle);
2208 
2209 	if (fport == NULL) {
2210 		return;
2211 	}
2212 
2213 	fptr = fport->fcipp_fcip;
2214 	ASSERT(fptr != NULL);
2215 
2216 	if (fptr == NULL) {
2217 		return;
2218 	}
2219 
2220 	instance = ddi_get_instance(fport->fcipp_dip);
2221 
2222 	FCIP_DEBUG(FCIP_DEBUG_ELS,
2223 	    (CE_NOTE, "fcip%d, state change callback: state:0x%x, "
2224 	    "S_ID:0x%x, count:0x%x", instance, port_state, sid, listlen));
2225 
2226 	mutex_enter(&fptr->fcip_mutex);
2227 
2228 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2229 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2230 		mutex_exit(&fptr->fcip_mutex);
2231 		return;
2232 	}
2233 
2234 	/*
2235 	 * set fcip flags to indicate we are in the middle of a
2236 	 * state change callback so we can wait till the statechange
2237 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2238 	 */
2239 	fptr->fcip_flags |= FCIP_IN_SC_CB;
2240 
2241 	fport->fcipp_pstate = port_state;
2242 
2243 	/*
2244 	 * Check if topology changed. If Yes - Modify the broadcast
2245 	 * RTE entries to understand the new broadcast D_IDs
2246 	 */
2247 	if (fport->fcipp_topology != port_top &&
2248 	    (port_top != FC_TOP_UNKNOWN)) {
2249 		/* REMOVE later */
2250 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2251 		    "topology changed: Old topology: 0x%x New topology 0x%x",
2252 		    fport->fcipp_topology, port_top));
2253 		/*
2254 		 * If topology changed - attempt a rediscovery of
2255 		 * devices. Helps specially in Fabric/Public loops
2256 		 * and if on_demand_node_creation is disabled
2257 		 */
2258 		fport->fcipp_topology = port_top;
2259 		fcip_handle_topology(fptr);
2260 	}
2261 
2262 	mutex_exit(&fptr->fcip_mutex);
2263 
2264 	switch (FC_PORT_STATE_MASK(port_state)) {
2265 	case FC_STATE_ONLINE:
2266 	/* FALLTHROUGH */
2267 	case FC_STATE_LIP:
2268 	/* FALLTHROUGH */
2269 	case FC_STATE_LIP_LBIT_SET:
2270 
2271 		/*
2272 		 * nothing to do here actually other than if we
2273 		 * were actually logged onto a port in the devlist
2274 		 * (which indicates active communication between
2275 		 * the host port and the port in the changelist).
2276 		 * If however we are in a private loop or point to
2277 		 * point mode, we need to check for any IP capable
2278 		 * ports and update our routing table.
2279 		 */
2280 		switch (port_top) {
2281 		case FC_TOP_FABRIC:
2282 			/*
2283 			 * This indicates a fabric port with a NameServer.
2284 			 * Check the devlist to see if we are in active
2285 			 * communication with a port on the devlist.
2286 			 */
2287 			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2288 			    "Statec_cb: fabric topology"));
2289 			fcip_rt_update(fptr, changelist, listlen);
2290 			break;
2291 		case FC_TOP_NO_NS:
2292 			/*
2293 			 * No nameserver - so treat it like a Private loop
2294 			 * or point to point topology and get a map of
2295 			 * devices on the link and get IP capable ports to
2296 			 * to update the routing table.
2297 			 */
2298 			FCIP_DEBUG(FCIP_DEBUG_ELS,
2299 			    (CE_NOTE, "Statec_cb: NO_NS topology"));
2300 		/* FALLTHROUGH */
2301 		case FC_TOP_PRIVATE_LOOP:
2302 			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2303 			    "Statec_cb: Pvt_Loop topology"));
2304 		/* FALLTHROUGH */
2305 		case FC_TOP_PT_PT:
2306 			/*
2307 			 * call get_port_map() and update routing table
2308 			 */
2309 			fcip_rt_update(fptr, changelist, listlen);
2310 			break;
2311 		default:
2312 			FCIP_DEBUG(FCIP_DEBUG_ELS,
2313 			    (CE_NOTE, "Statec_cb: Unknown topology"));
2314 		}
2315 
2316 		/*
2317 		 * We should now enable the Queues and permit I/Os
2318 		 * to flow through downstream. The update of routing
2319 		 * table should have flushed out any port entries that
2320 		 * don't exist or are not available after the state change
2321 		 */
2322 		mutex_enter(&fptr->fcip_mutex);
2323 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
2324 		if (fptr->fcip_flags & FCIP_LINK_DOWN) {
2325 			fptr->fcip_flags &= ~FCIP_LINK_DOWN;
2326 		}
2327 		mutex_exit(&fptr->fcip_mutex);
2328 
2329 		/*
2330 		 * Enable write queues
2331 		 */
2332 		rw_enter(&fcipstruplock, RW_READER);
2333 		for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
2334 			if (slp && slp->sl_fcip == fptr) {
2335 				wrq = WR(slp->sl_rq);
2336 				if (wrq->q_flag & QFULL) {
2337 					qenable(wrq);
2338 				}
2339 			}
2340 		}
2341 		rw_exit(&fcipstruplock);
2342 		break;
2343 	case FC_STATE_OFFLINE:
2344 		/*
2345 		 * mark the port_state OFFLINE and wait for it to
2346 		 * become online. Any new messages in this state will
2347 		 * simply be queued back up. If the port does not
2348 		 * come online in a short while, we can begin failing
2349 		 * messages and flush the routing table
2350 		 */
2351 		mutex_enter(&fptr->fcip_mutex);
2352 		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2353 		    FCIP_OFFLINE_TIMEOUT;
2354 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2355 		mutex_exit(&fptr->fcip_mutex);
2356 
2357 		/*
2358 		 * Mark all Routing table entries as invalid to prevent
2359 		 * any commands from trickling through to ports that
2360 		 * have disappeared from under us
2361 		 */
2362 		mutex_enter(&fptr->fcip_rt_mutex);
2363 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
2364 			frtp = fptr->fcip_rtable[index];
2365 			while (frtp) {
2366 				frtp->fcipr_state = PORT_DEVICE_INVALID;
2367 				frtp = frtp->fcipr_next;
2368 			}
2369 		}
2370 		mutex_exit(&fptr->fcip_rt_mutex);
2371 
2372 		break;
2373 
2374 	case FC_STATE_RESET_REQUESTED:
2375 		/*
2376 		 * Release all Unsolicited buffers back to transport/FCA.
2377 		 * This also means the port state is marked offline - so
2378 		 * we may have to do what OFFLINE state requires us to do.
2379 		 * Care must be taken to wait for any active unsolicited
2380 		 * buffer with the other Streams modules - so wait for
2381 		 * a freeb if the unsolicited buffer is passed back all
2382 		 * the way upstream.
2383 		 */
2384 		mutex_enter(&fptr->fcip_mutex);
2385 
2386 #ifdef FCIP_ESBALLOC
2387 		while (fptr->fcip_ub_upstream) {
2388 			cv_wait(&fptr->fcip_ub_cv, &fptr->fcip_mutex);
2389 		}
2390 #endif	/* FCIP_ESBALLOC */
2391 
2392 		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2393 		    FCIP_OFFLINE_TIMEOUT;
2394 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2395 		mutex_exit(&fptr->fcip_mutex);
2396 		break;
2397 
2398 	case FC_STATE_DEVICE_CHANGE:
2399 		if (listlen) {
2400 			fcip_rt_update(fptr, changelist, listlen);
2401 		}
2402 		break;
2403 	case FC_STATE_RESET:
2404 		/*
2405 		 * Not much to do I guess - wait for port to become
2406 		 * ONLINE. If the port doesn't become online in a short
2407 		 * while, the upper layers abort any request themselves.
2408 		 * We can just putback the messages in the streams queues
2409 		 * if the link is offline
2410 		 */
2411 		break;
2412 	}
2413 	mutex_enter(&fptr->fcip_mutex);
2414 	fptr->fcip_flags &= ~(FCIP_IN_SC_CB);
2415 	mutex_exit(&fptr->fcip_mutex);
2416 }
2417 
2418 /*
2419  * Given a port handle, return the fcip_port_info structure corresponding
2420  * to that port handle. The transport allocates and communicates with
2421  * ULPs using port handles
2422  */
2423 static fcip_port_info_t *
fcip_get_port(opaque_t phandle)2424 fcip_get_port(opaque_t phandle)
2425 {
2426 	fcip_port_info_t *fport;
2427 
2428 	ASSERT(phandle != NULL);
2429 
2430 	mutex_enter(&fcip_global_mutex);
2431 	fport = fcip_port_head;
2432 
2433 	while (fport != NULL) {
2434 		if (fport->fcipp_handle == phandle) {
2435 			/* found */
2436 			break;
2437 		}
2438 		fport = fport->fcipp_next;
2439 	}
2440 
2441 	mutex_exit(&fcip_global_mutex);
2442 
2443 	return (fport);
2444 }
2445 
2446 /*
2447  * Handle inbound ELS requests received by the transport. We are only
2448  * intereseted in FARP/InARP mostly.
2449  */
2450 /* ARGSUSED */
2451 static int
fcip_els_cb(opaque_t ulp_handle,opaque_t phandle,fc_unsol_buf_t * buf,uint32_t claimed)2452 fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
2453     fc_unsol_buf_t *buf, uint32_t claimed)
2454 {
2455 	fcip_port_info_t	*fport;
2456 	struct fcip 		*fptr;
2457 	int			instance;
2458 	uchar_t			r_ctl;
2459 	uchar_t			ls_code;
2460 	la_els_farp_t		farp_cmd;
2461 	la_els_farp_t		*fcmd;
2462 	int			rval = FC_UNCLAIMED;
2463 
2464 	fport = fcip_get_port(phandle);
2465 	if (fport == NULL) {
2466 		return (FC_UNCLAIMED);
2467 	}
2468 
2469 	fptr = fport->fcipp_fcip;
2470 	ASSERT(fptr != NULL);
2471 	if (fptr == NULL) {
2472 		return (FC_UNCLAIMED);
2473 	}
2474 
2475 	instance = ddi_get_instance(fport->fcipp_dip);
2476 
2477 	mutex_enter(&fptr->fcip_mutex);
2478 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2479 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2480 		mutex_exit(&fptr->fcip_mutex);
2481 		return (FC_UNCLAIMED);
2482 	}
2483 
2484 	/*
2485 	 * set fcip flags to indicate we are in the middle of a
2486 	 * ELS callback so we can wait till the statechange
2487 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2488 	 */
2489 	fptr->fcip_flags |= FCIP_IN_ELS_CB;
2490 	mutex_exit(&fptr->fcip_mutex);
2491 
2492 	FCIP_DEBUG(FCIP_DEBUG_ELS,
2493 	    (CE_NOTE, "fcip%d, ELS callback , ", instance));
2494 
2495 	r_ctl = buf->ub_frame.r_ctl;
2496 	switch (r_ctl & R_CTL_ROUTING) {
2497 	case R_CTL_EXTENDED_SVC:
2498 		if (r_ctl == R_CTL_ELS_REQ) {
2499 			ls_code = buf->ub_buffer[0];
2500 			if (ls_code == LA_ELS_FARP_REQ) {
2501 				/*
2502 				 * Inbound FARP broadcast request
2503 				 */
2504 				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2505 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2506 					    "Invalid FARP req buffer size "
2507 					    "expected 0x%lx, got 0x%x",
2508 					    (long)(sizeof (la_els_farp_t)),
2509 					    buf->ub_bufsize));
2510 					rval = FC_UNCLAIMED;
2511 					goto els_cb_done;
2512 				}
2513 				fcmd = (la_els_farp_t *)buf;
2514 				if (fcip_wwn_compare(&fcmd->resp_nwwn,
2515 				    &fport->fcipp_nwwn,
2516 				    FCIP_COMPARE_NWWN) != 0) {
2517 					rval = FC_UNCLAIMED;
2518 					goto els_cb_done;
2519 				}
2520 				/*
2521 				 * copy the FARP request and release the
2522 				 * unsolicited buffer
2523 				 */
2524 				fcmd = &farp_cmd;
2525 				bcopy((void *)buf, (void *)fcmd,
2526 				    sizeof (la_els_farp_t));
2527 				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2528 				    &buf->ub_token);
2529 
2530 				if (fcip_farp_supported &&
2531 				    fcip_handle_farp_request(fptr, fcmd) ==
2532 				    FC_SUCCESS) {
2533 					/*
2534 					 * We successfully sent out a FARP
2535 					 * reply to the requesting port
2536 					 */
2537 					rval = FC_SUCCESS;
2538 					goto els_cb_done;
2539 				} else {
2540 					rval = FC_UNCLAIMED;
2541 					goto els_cb_done;
2542 				}
2543 			}
2544 		} else if (r_ctl == R_CTL_ELS_RSP) {
2545 			ls_code = buf->ub_buffer[0];
2546 			if (ls_code == LA_ELS_FARP_REPLY) {
2547 				/*
2548 				 * We received a REPLY to our FARP request
2549 				 */
2550 				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2551 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2552 					    "Invalid FARP req buffer size "
2553 					    "expected 0x%lx, got 0x%x",
2554 					    (long)(sizeof (la_els_farp_t)),
2555 					    buf->ub_bufsize));
2556 					rval = FC_UNCLAIMED;
2557 					goto els_cb_done;
2558 				}
2559 				fcmd = &farp_cmd;
2560 				bcopy((void *)buf, (void *)fcmd,
2561 				    sizeof (la_els_farp_t));
2562 				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2563 				    &buf->ub_token);
2564 				if (fcip_farp_supported &&
2565 				    fcip_handle_farp_response(fptr, fcmd) ==
2566 				    FC_SUCCESS) {
2567 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2568 					    "Successfully recevied a FARP "
2569 					    "response"));
2570 					mutex_enter(&fptr->fcip_mutex);
2571 					fptr->fcip_farp_rsp_flag = 1;
2572 					cv_signal(&fptr->fcip_farp_cv);
2573 					mutex_exit(&fptr->fcip_mutex);
2574 					rval = FC_SUCCESS;
2575 					goto els_cb_done;
2576 				} else {
2577 					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2578 					    "Unable to handle a FARP response "
2579 					    "receive"));
2580 					rval = FC_UNCLAIMED;
2581 					goto els_cb_done;
2582 				}
2583 			}
2584 		}
2585 		break;
2586 	default:
2587 		break;
2588 	}
2589 els_cb_done:
2590 	mutex_enter(&fptr->fcip_mutex);
2591 	fptr->fcip_flags &= ~(FCIP_IN_ELS_CB);
2592 	mutex_exit(&fptr->fcip_mutex);
2593 	return (rval);
2594 }
2595 
2596 
2597 /*
2598  * Handle inbound FARP requests
2599  */
2600 static int
fcip_handle_farp_request(struct fcip * fptr,la_els_farp_t * fcmd)2601 fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd)
2602 {
2603 	fcip_pkt_t		*fcip_pkt;
2604 	fc_packet_t		*fc_pkt;
2605 	fcip_port_info_t	*fport = fptr->fcip_port_info;
2606 	int			rval = FC_FAILURE;
2607 	opaque_t		fca_dev;
2608 	fc_portmap_t 		map;
2609 	struct fcip_routing_table *frp;
2610 	struct fcip_dest *fdestp;
2611 
2612 	/*
2613 	 * Add an entry for the remote port into our routing and destination
2614 	 * tables.
2615 	 */
2616 	map.map_did = fcmd->req_id;
2617 	map.map_hard_addr.hard_addr = fcmd->req_id.port_id;
2618 	map.map_state = PORT_DEVICE_VALID;
2619 	map.map_type = PORT_DEVICE_NEW;
2620 	map.map_flags = 0;
2621 	map.map_pd = NULL;
2622 	bcopy((void *)&fcmd->req_pwwn, (void *)&map.map_pwwn,
2623 	    sizeof (la_wwn_t));
2624 	bcopy((void *)&fcmd->req_nwwn, (void *)&map.map_nwwn,
2625 	    sizeof (la_wwn_t));
2626 	fcip_rt_update(fptr, &map, 1);
2627 	mutex_enter(&fptr->fcip_rt_mutex);
2628 	frp = fcip_lookup_rtable(fptr, &fcmd->req_pwwn, FCIP_COMPARE_NWWN);
2629 	mutex_exit(&fptr->fcip_rt_mutex);
2630 
2631 	fdestp = fcip_add_dest(fptr, frp);
2632 
2633 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
2634 	    sizeof (la_els_farp_t), NULL, KM_SLEEP);
2635 	if (fcip_pkt == NULL) {
2636 		rval = FC_FAILURE;
2637 		goto farp_done;
2638 	}
2639 	/*
2640 	 * Fill in our port's PWWN and NWWN
2641 	 */
2642 	fcmd->resp_pwwn = fport->fcipp_pwwn;
2643 	fcmd->resp_nwwn = fport->fcipp_nwwn;
2644 
2645 	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
2646 	    fcmd->req_id, NULL);
2647 
2648 	fca_dev =
2649 	    fc_ulp_get_fca_device(fport->fcipp_handle, fcmd->req_id);
2650 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
2651 	fc_pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_RSP;
2652 	fc_pkt->pkt_fca_device = fca_dev;
2653 	fcip_pkt->fcip_pkt_dest = fdestp;
2654 
2655 	/*
2656 	 * Attempt a PLOGI again
2657 	 */
2658 	if (fcmd->resp_flags & FARP_INIT_P_LOGI) {
2659 		if (fcip_do_plogi(fptr, frp) != FC_SUCCESS) {
2660 			/*
2661 			 * Login to the remote port failed. There is no
2662 			 * point continuing with the FARP request further
2663 			 * so bail out here.
2664 			 */
2665 			frp->fcipr_state = PORT_DEVICE_INVALID;
2666 			rval = FC_FAILURE;
2667 			goto farp_done;
2668 		}
2669 	}
2670 
2671 	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
2672 	    sizeof (la_els_farp_t));
2673 
2674 	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
2675 	if (rval != FC_SUCCESS) {
2676 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2677 		    "fcip_transport of farp reply failed 0x%x", rval));
2678 	}
2679 
2680 farp_done:
2681 	return (rval);
2682 }
2683 
2684 
2685 /*
2686  * Handle FARP responses to our FARP requests. When we receive a FARP
2687  * reply, we need to add the entry for the Port that replied into our
2688  * routing and destination hash tables. It is possible that the remote
2689  * port did not login into us (FARP responses can be received without
2690  * a PLOGI)
2691  */
2692 static int
fcip_handle_farp_response(struct fcip * fptr,la_els_farp_t * fcmd)2693 fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd)
2694 {
2695 	int			rval = FC_FAILURE;
2696 	fc_portmap_t 		map;
2697 	struct fcip_routing_table *frp;
2698 	struct fcip_dest *fdestp;
2699 
2700 	/*
2701 	 * Add an entry for the remote port into our routing and destination
2702 	 * tables.
2703 	 */
2704 	map.map_did = fcmd->dest_id;
2705 	map.map_hard_addr.hard_addr = fcmd->dest_id.port_id;
2706 	map.map_state = PORT_DEVICE_VALID;
2707 	map.map_type = PORT_DEVICE_NEW;
2708 	map.map_flags = 0;
2709 	map.map_pd = NULL;
2710 	bcopy((void *)&fcmd->resp_pwwn, (void *)&map.map_pwwn,
2711 	    sizeof (la_wwn_t));
2712 	bcopy((void *)&fcmd->resp_nwwn, (void *)&map.map_nwwn,
2713 	    sizeof (la_wwn_t));
2714 	fcip_rt_update(fptr, &map, 1);
2715 	mutex_enter(&fptr->fcip_rt_mutex);
2716 	frp = fcip_lookup_rtable(fptr, &fcmd->resp_pwwn, FCIP_COMPARE_NWWN);
2717 	mutex_exit(&fptr->fcip_rt_mutex);
2718 
2719 	fdestp = fcip_add_dest(fptr, frp);
2720 
2721 	if (fdestp != NULL) {
2722 		rval = FC_SUCCESS;
2723 	}
2724 	return (rval);
2725 }
2726 
2727 
2728 #define	FCIP_HDRS_LENGTH	\
2729 	sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t)
2730 
2731 /*
2732  * fcip_data_cb is the heart of most IP operations. This routine is called
2733  * by the transport when any unsolicited IP data arrives at a port (which
2734  * is almost all IP data). This routine then strips off the Network header
2735  * from the payload (after authenticating the received payload ofcourse),
2736  * creates a message blk and sends the data upstream. You will see ugly
2737  * #defines because of problems with using esballoc() as opposed to
2738  * allocb to prevent an extra copy of data. We should probably move to
2739  * esballoc entirely when the MTU eventually will be larger than 1500 bytes
2740  * since copies will get more expensive then. At 1500 byte MTUs, there is
2741  * no noticable difference between using allocb and esballoc. The other
2742  * caveat is that the qlc firmware still cannot tell us accurately the
2743  * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort
2744  * to looking into the IP header and hoping that the no. of bytes speficified
2745  * in the header was actually received.
2746  */
2747 /* ARGSUSED */
2748 static int
fcip_data_cb(opaque_t ulp_handle,opaque_t phandle,fc_unsol_buf_t * buf,uint32_t claimed)2749 fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
2750     fc_unsol_buf_t *buf, uint32_t claimed)
2751 {
2752 	fcip_port_info_t		*fport;
2753 	struct fcip 			*fptr;
2754 	fcph_network_hdr_t		*nhdr;
2755 	llc_snap_hdr_t			*snaphdr;
2756 	mblk_t				*bp;
2757 	uint32_t 			len;
2758 	uint32_t			hdrlen;
2759 	ushort_t			type;
2760 	ipha_t				*iphdr;
2761 	int				rval;
2762 
2763 #ifdef FCIP_ESBALLOC
2764 	frtn_t				*free_ubuf;
2765 	struct fcip_esballoc_arg	*fesb_argp;
2766 #endif /* FCIP_ESBALLOC */
2767 
2768 	fport = fcip_get_port(phandle);
2769 	if (fport == NULL) {
2770 		return (FC_UNCLAIMED);
2771 	}
2772 
2773 	fptr = fport->fcipp_fcip;
2774 	ASSERT(fptr != NULL);
2775 
2776 	if (fptr == NULL) {
2777 		return (FC_UNCLAIMED);
2778 	}
2779 
2780 	mutex_enter(&fptr->fcip_mutex);
2781 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2782 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2783 		mutex_exit(&fptr->fcip_mutex);
2784 		rval = FC_UNCLAIMED;
2785 		goto data_cb_done;
2786 	}
2787 
2788 	/*
2789 	 * set fcip flags to indicate we are in the middle of a
2790 	 * data callback so we can wait till the statechange
2791 	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2792 	 */
2793 	fptr->fcip_flags |= FCIP_IN_DATA_CB;
2794 	mutex_exit(&fptr->fcip_mutex);
2795 
2796 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2797 	    (CE_NOTE, "fcip%d, data callback",
2798 	    ddi_get_instance(fport->fcipp_dip)));
2799 
2800 	/*
2801 	 * get to the network and snap headers in the payload
2802 	 */
2803 	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
2804 	snaphdr = (llc_snap_hdr_t *)(buf->ub_buffer +
2805 	    sizeof (fcph_network_hdr_t));
2806 
2807 	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
2808 
2809 	/*
2810 	 * get the IP header to obtain the no. of bytes we need to read
2811 	 * off from the unsol buffer. This obviously is because not all
2812 	 * data fills up the unsol buffer completely and the firmware
2813 	 * doesn't tell us how many valid bytes are in there as well
2814 	 */
2815 	iphdr = (ipha_t *)(buf->ub_buffer + hdrlen);
2816 	snaphdr->pid = BE_16(snaphdr->pid);
2817 	type = snaphdr->pid;
2818 
2819 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2820 	    (CE_CONT, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n",
2821 	    snaphdr->dsap, snaphdr->ssap, snaphdr->ctrl));
2822 
2823 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2824 	    (CE_CONT, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n",
2825 	    snaphdr->oui[0], snaphdr->oui[1], snaphdr->oui[2], snaphdr->pid));
2826 
2827 	/* Authneticate, Authenticate */
2828 	if (type == ETHERTYPE_IP) {
2829 		len = hdrlen + BE_16(iphdr->ipha_length);
2830 	} else if (type == ETHERTYPE_ARP) {
2831 		len = hdrlen + 28;
2832 	} else {
2833 		len = buf->ub_bufsize;
2834 	}
2835 
2836 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2837 	    (CE_CONT, "effective packet length is %d bytes.\n", len));
2838 
2839 	if (len < hdrlen || len > FCIP_UB_SIZE) {
2840 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2841 		    (CE_NOTE, "Incorrect buffer size %d bytes", len));
2842 		rval = FC_UNCLAIMED;
2843 		goto data_cb_done;
2844 	}
2845 
2846 	if (buf->ub_frame.type != FC_TYPE_IS8802_SNAP) {
2847 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Not IP/ARP data"));
2848 		rval = FC_UNCLAIMED;
2849 		goto data_cb_done;
2850 	}
2851 
2852 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "checking wwn"));
2853 
2854 	if ((fcip_wwn_compare(&nhdr->net_dest_addr, &fport->fcipp_pwwn,
2855 	    FCIP_COMPARE_NWWN) != 0) &&
2856 	    (!IS_BROADCAST_ADDR(&nhdr->net_dest_addr))) {
2857 		rval = FC_UNCLAIMED;
2858 		goto data_cb_done;
2859 	} else if (fcip_cache_on_arp_broadcast &&
2860 	    IS_BROADCAST_ADDR(&nhdr->net_dest_addr)) {
2861 		fcip_cache_arp_broadcast(fptr, buf);
2862 	}
2863 
2864 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Allocate streams block"));
2865 
2866 	/*
2867 	 * Using esballoc instead of allocb should be faster, atleast at
2868 	 * larger MTUs than 1500 bytes. Someday we'll get there :)
2869 	 */
2870 #if defined(FCIP_ESBALLOC)
2871 	/*
2872 	 * allocate memory for the frtn function arg. The Function
2873 	 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type
2874 	 * which contains pointers to the unsol buffer and the
2875 	 * opaque port handle for releasing the unsol buffer back to
2876 	 * the FCA for reuse
2877 	 */
2878 	fesb_argp = (struct fcip_esballoc_arg *)
2879 	    kmem_zalloc(sizeof (struct fcip_esballoc_arg), KM_NOSLEEP);
2880 
2881 	if (fesb_argp == NULL) {
2882 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2883 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2884 		rval = FC_UNCLAIMED;
2885 		goto data_cb_done;
2886 	}
2887 	/*
2888 	 * Check with KM_NOSLEEP
2889 	 */
2890 	free_ubuf = (frtn_t *)kmem_zalloc(sizeof (frtn_t), KM_NOSLEEP);
2891 	if (free_ubuf == NULL) {
2892 		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2893 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2894 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2895 		rval = FC_UNCLAIMED;
2896 		goto data_cb_done;
2897 	}
2898 
2899 	fesb_argp->frtnp = free_ubuf;
2900 	fesb_argp->buf = buf;
2901 	fesb_argp->phandle = phandle;
2902 	free_ubuf->free_func = fcip_ubfree;
2903 	free_ubuf->free_arg = (char *)fesb_argp;
2904 	if ((bp = (mblk_t *)esballoc((unsigned char *)buf->ub_buffer,
2905 	    len, BPRI_MED, free_ubuf)) == NULL) {
2906 		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2907 		kmem_free(free_ubuf, sizeof (frtn_t));
2908 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2909 		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2910 		rval = FC_UNCLAIMED;
2911 		goto data_cb_done;
2912 	}
2913 #elif !defined(FCIP_ESBALLOC)
2914 	/*
2915 	 * allocate streams mblk and copy the contents of the
2916 	 * unsolicited buffer into this newly alloc'ed mblk
2917 	 */
2918 	if ((bp = (mblk_t *)fcip_allocb((size_t)len, BPRI_LO)) == NULL) {
2919 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2920 		    (CE_WARN, "alloc of mblk failed in data_cb"));
2921 		rval = FC_UNCLAIMED;
2922 		goto data_cb_done;
2923 	}
2924 
2925 	/*
2926 	 * Unsolicited buffers handed up to us from the FCA must be
2927 	 * endian clean so just bcopy the data into our mblk. Else
2928 	 * we may have to either copy the data byte by byte or
2929 	 * use the ddi_rep_get* routines to do the copy for us.
2930 	 */
2931 	bcopy(buf->ub_buffer, bp->b_rptr, len);
2932 
2933 	/*
2934 	 * for esballoc'ed mblks - free the UB in the frtn function
2935 	 * along with the memory allocated for the function arg.
2936 	 * for allocb'ed mblk - release the unsolicited buffer here
2937 	 */
2938 	(void) fc_ulp_ubrelease(phandle, 1, &buf->ub_token);
2939 
2940 #endif	/* FCIP_ESBALLOC */
2941 
2942 	bp->b_wptr = bp->b_rptr + len;
2943 	fptr->fcip_ipackets++;
2944 
2945 	if (type == ETHERTYPE_IP) {
2946 		mutex_enter(&fptr->fcip_mutex);
2947 		fptr->fcip_ub_upstream++;
2948 		mutex_exit(&fptr->fcip_mutex);
2949 		bp->b_rptr += hdrlen;
2950 
2951 		/*
2952 		 * Check if ipq is valid in the sendup thread
2953 		 */
2954 		if (fcip_sendup_alloc_enque(fptr, bp, NULL) != FC_SUCCESS) {
2955 			freemsg(bp);
2956 		}
2957 	} else {
2958 		/*
2959 		 * We won't get ethernet 802.3 packets in FCIP but we may get
2960 		 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let
2961 		 * fcip_sendup() do the matching.
2962 		 */
2963 		mutex_enter(&fptr->fcip_mutex);
2964 		fptr->fcip_ub_upstream++;
2965 		mutex_exit(&fptr->fcip_mutex);
2966 		if (fcip_sendup_alloc_enque(fptr, bp,
2967 		    fcip_accept) != FC_SUCCESS) {
2968 			freemsg(bp);
2969 		}
2970 	}
2971 
2972 	rval = FC_SUCCESS;
2973 
2974 	/*
2975 	 * Unset fcip_flags to indicate we are out of callback and return
2976 	 */
2977 data_cb_done:
2978 	mutex_enter(&fptr->fcip_mutex);
2979 	fptr->fcip_flags &= ~(FCIP_IN_DATA_CB);
2980 	mutex_exit(&fptr->fcip_mutex);
2981 	return (rval);
2982 }
2983 
2984 #if !defined(FCIP_ESBALLOC)
2985 /*
2986  * Allocate a message block for the inbound data to be sent upstream.
2987  */
2988 static void *
fcip_allocb(size_t size,uint_t pri)2989 fcip_allocb(size_t size, uint_t pri)
2990 {
2991 	mblk_t	*mp;
2992 
2993 	if ((mp = allocb(size, pri)) == NULL) {
2994 		return (NULL);
2995 	}
2996 	return (mp);
2997 }
2998 
2999 #endif
3000 
3001 /*
3002  * This helper routine kmem cache alloc's a sendup element for enquing
3003  * into the sendup list for callbacks upstream from the dedicated sendup
3004  * thread. We enque the msg buf into the sendup list and cv_signal the
3005  * sendup thread to finish the callback for us.
3006  */
3007 static int
fcip_sendup_alloc_enque(struct fcip * fptr,mblk_t * mp,struct fcipstr * (* f)())3008 fcip_sendup_alloc_enque(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*f)())
3009 {
3010 	struct fcip_sendup_elem 	*msg_elem;
3011 	int				rval = FC_FAILURE;
3012 
3013 	msg_elem = kmem_cache_alloc(fptr->fcip_sendup_cache, KM_NOSLEEP);
3014 	if (msg_elem == NULL) {
3015 		/* drop pkt to floor - update stats */
3016 		rval = FC_FAILURE;
3017 		goto sendup_alloc_done;
3018 	}
3019 	msg_elem->fcipsu_mp = mp;
3020 	msg_elem->fcipsu_func = f;
3021 
3022 	mutex_enter(&fptr->fcip_sendup_mutex);
3023 	if (fptr->fcip_sendup_head == NULL) {
3024 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = msg_elem;
3025 	} else {
3026 		fptr->fcip_sendup_tail->fcipsu_next = msg_elem;
3027 		fptr->fcip_sendup_tail = msg_elem;
3028 	}
3029 	fptr->fcip_sendup_cnt++;
3030 	cv_signal(&fptr->fcip_sendup_cv);
3031 	mutex_exit(&fptr->fcip_sendup_mutex);
3032 	rval = FC_SUCCESS;
3033 
3034 sendup_alloc_done:
3035 	return (rval);
3036 }
3037 
3038 /*
3039  * One of the ways of performing the WWN to D_ID mapping required for
3040  * IPFC data is to cache the unsolicited ARP broadcast messages received
3041  * and update the routing table to add entry for the destination port
3042  * if we are the intended recipient of the ARP broadcast message. This is
3043  * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping
3044  * but is not typically used unless enabled. The driver prefers to use the
3045  * nameserver/lilp map to obtain this mapping.
3046  */
3047 static void
fcip_cache_arp_broadcast(struct fcip * fptr,fc_unsol_buf_t * buf)3048 fcip_cache_arp_broadcast(struct fcip *fptr, fc_unsol_buf_t *buf)
3049 {
3050 	fcip_port_info_t		*fport;
3051 	fcph_network_hdr_t		*nhdr;
3052 	struct fcip_routing_table	*frp;
3053 	fc_portmap_t			map;
3054 
3055 	fport = fptr->fcip_port_info;
3056 	if (fport == NULL) {
3057 		return;
3058 	}
3059 	ASSERT(fport != NULL);
3060 
3061 	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
3062 
3063 	mutex_enter(&fptr->fcip_rt_mutex);
3064 	frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, FCIP_COMPARE_NWWN);
3065 	mutex_exit(&fptr->fcip_rt_mutex);
3066 	if (frp == NULL) {
3067 		map.map_did.port_id = buf->ub_frame.s_id;
3068 		map.map_hard_addr.hard_addr = buf->ub_frame.s_id;
3069 		map.map_state = PORT_DEVICE_VALID;
3070 		map.map_type = PORT_DEVICE_NEW;
3071 		map.map_flags = 0;
3072 		map.map_pd = NULL;
3073 		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_pwwn,
3074 		    sizeof (la_wwn_t));
3075 		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_nwwn,
3076 		    sizeof (la_wwn_t));
3077 		fcip_rt_update(fptr, &map, 1);
3078 		mutex_enter(&fptr->fcip_rt_mutex);
3079 		frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr,
3080 		    FCIP_COMPARE_NWWN);
3081 		mutex_exit(&fptr->fcip_rt_mutex);
3082 
3083 		(void) fcip_add_dest(fptr, frp);
3084 	}
3085 
3086 }
3087 
3088 /*
3089  * This is a dedicated thread to do callbacks from fcip's data callback
3090  * routines into the modules upstream. The reason for this thread is
3091  * the data callback function can be called from an interrupt context and
3092  * the upstream modules *can* make calls downstream in the same thread
3093  * context. If the call is to a fabric port which is not yet in our
3094  * routing tables, we may have to query the nameserver/fabric for the
3095  * MAC addr to Port_ID mapping which may be blocking calls.
3096  */
3097 static void
fcip_sendup_thr(void * arg)3098 fcip_sendup_thr(void *arg)
3099 {
3100 	struct fcip		*fptr = (struct fcip *)arg;
3101 	struct fcip_sendup_elem	*msg_elem;
3102 	queue_t			*ip4q = NULL;
3103 
3104 	CALLB_CPR_INIT(&fptr->fcip_cpr_info, &fptr->fcip_sendup_mutex,
3105 	    callb_generic_cpr, "fcip_sendup_thr");
3106 
3107 	mutex_enter(&fptr->fcip_sendup_mutex);
3108 	for (;;) {
3109 
3110 		while (fptr->fcip_sendup_thr_initted &&
3111 		    fptr->fcip_sendup_head == NULL) {
3112 			CALLB_CPR_SAFE_BEGIN(&fptr->fcip_cpr_info);
3113 			cv_wait(&fptr->fcip_sendup_cv,
3114 			    &fptr->fcip_sendup_mutex);
3115 			CALLB_CPR_SAFE_END(&fptr->fcip_cpr_info,
3116 			    &fptr->fcip_sendup_mutex);
3117 		}
3118 
3119 		if (fptr->fcip_sendup_thr_initted == 0) {
3120 			break;
3121 		}
3122 
3123 		msg_elem = fptr->fcip_sendup_head;
3124 		fptr->fcip_sendup_head = msg_elem->fcipsu_next;
3125 		msg_elem->fcipsu_next = NULL;
3126 		mutex_exit(&fptr->fcip_sendup_mutex);
3127 
3128 		if (msg_elem->fcipsu_func == NULL) {
3129 			/*
3130 			 * Message for ipq. Check to see if the ipq is
3131 			 * is still valid. Since the thread is asynchronous,
3132 			 * there could have been a close on the stream
3133 			 */
3134 			mutex_enter(&fptr->fcip_mutex);
3135 			if (fptr->fcip_ipq && canputnext(fptr->fcip_ipq)) {
3136 				ip4q = fptr->fcip_ipq;
3137 				mutex_exit(&fptr->fcip_mutex);
3138 				putnext(ip4q, msg_elem->fcipsu_mp);
3139 			} else {
3140 				mutex_exit(&fptr->fcip_mutex);
3141 				freemsg(msg_elem->fcipsu_mp);
3142 			}
3143 		} else {
3144 			fcip_sendup(fptr, msg_elem->fcipsu_mp,
3145 			    msg_elem->fcipsu_func);
3146 		}
3147 
3148 #if !defined(FCIP_ESBALLOC)
3149 		/*
3150 		 * for allocb'ed mblk - decrement upstream count here
3151 		 */
3152 		mutex_enter(&fptr->fcip_mutex);
3153 		ASSERT(fptr->fcip_ub_upstream > 0);
3154 		fptr->fcip_ub_upstream--;
3155 		mutex_exit(&fptr->fcip_mutex);
3156 #endif /* FCIP_ESBALLOC */
3157 
3158 		kmem_cache_free(fptr->fcip_sendup_cache, (void *)msg_elem);
3159 		mutex_enter(&fptr->fcip_sendup_mutex);
3160 		fptr->fcip_sendup_cnt--;
3161 	}
3162 
3163 
3164 #ifndef	__lock_lint
3165 	CALLB_CPR_EXIT(&fptr->fcip_cpr_info);
3166 #else
3167 	mutex_exit(&fptr->fcip_sendup_mutex);
3168 #endif /* __lock_lint */
3169 
3170 	/* Wake up fcip detach thread by the end */
3171 	cv_signal(&fptr->fcip_sendup_cv);
3172 
3173 	thread_exit();
3174 }
3175 
3176 #ifdef FCIP_ESBALLOC
3177 
3178 /*
3179  * called from the stream head when it is done using an unsolicited buffer.
3180  * We release this buffer then to the FCA for reuse.
3181  */
3182 static void
fcip_ubfree(char * arg)3183 fcip_ubfree(char *arg)
3184 {
3185 	struct fcip_esballoc_arg *fesb_argp = (struct fcip_esballoc_arg *)arg;
3186 	fc_unsol_buf_t	*ubuf;
3187 	frtn_t		*frtnp;
3188 	fcip_port_info_t		*fport;
3189 	struct fcip 			*fptr;
3190 
3191 
3192 	fport = fcip_get_port(fesb_argp->phandle);
3193 	fptr = fport->fcipp_fcip;
3194 
3195 	ASSERT(fesb_argp != NULL);
3196 	ubuf = fesb_argp->buf;
3197 	frtnp = fesb_argp->frtnp;
3198 
3199 
3200 	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
3201 	    (CE_WARN, "freeing ubuf after esballoc in fcip_ubfree"));
3202 	(void) fc_ulp_ubrelease(fesb_argp->phandle, 1, &ubuf->ub_token);
3203 
3204 	mutex_enter(&fptr->fcip_mutex);
3205 	ASSERT(fptr->fcip_ub_upstream > 0);
3206 	fptr->fcip_ub_upstream--;
3207 	cv_signal(&fptr->fcip_ub_cv);
3208 	mutex_exit(&fptr->fcip_mutex);
3209 
3210 	kmem_free(frtnp, sizeof (frtn_t));
3211 	kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
3212 }
3213 
3214 #endif /* FCIP_ESBALLOC */
3215 
3216 /*
3217  * handle data other than that of type ETHERTYPE_IP and send it on its
3218  * way upstream to the right streams module to handle
3219  */
3220 static void
fcip_sendup(struct fcip * fptr,mblk_t * mp,struct fcipstr * (* acceptfunc)())3221 fcip_sendup(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*acceptfunc)())
3222 {
3223 	struct fcipstr	*slp, *nslp;
3224 	la_wwn_t	*dhostp;
3225 	mblk_t		*nmp;
3226 	uint32_t 	isgroupaddr;
3227 	int 		type;
3228 	uint32_t	hdrlen;
3229 	fcph_network_hdr_t	*nhdr;
3230 	llc_snap_hdr_t		*snaphdr;
3231 
3232 	nhdr = (fcph_network_hdr_t *)mp->b_rptr;
3233 	snaphdr =
3234 	    (llc_snap_hdr_t *)(mp->b_rptr + sizeof (fcph_network_hdr_t));
3235 	dhostp = &nhdr->net_dest_addr;
3236 	type = snaphdr->pid;
3237 	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
3238 
3239 	/* No group address with fibre channel */
3240 	isgroupaddr = 0;
3241 
3242 	/*
3243 	 * While holding a reader lock on the linked list of streams structures,
3244 	 * attempt to match the address criteria for each stream
3245 	 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
3246 	 */
3247 
3248 	rw_enter(&fcipstruplock, RW_READER);
3249 
3250 	if ((slp = (*acceptfunc)(fcipstrup, fptr, type, dhostp)) == NULL) {
3251 		rw_exit(&fcipstruplock);
3252 		freemsg(mp);
3253 		return;
3254 	}
3255 
3256 	/*
3257 	 * Loop on matching open streams until (*acceptfunc)() returns NULL.
3258 	 */
3259 	for (; nslp = (*acceptfunc)(slp->sl_nextp, fptr, type, dhostp);
3260 	    slp = nslp) {
3261 		if (canputnext(slp->sl_rq)) {
3262 			if (nmp = dupmsg(mp)) {
3263 				if ((slp->sl_flags & FCIP_SLFAST) &&
3264 							!isgroupaddr) {
3265 					nmp->b_rptr += hdrlen;
3266 					putnext(slp->sl_rq, nmp);
3267 				} else if (slp->sl_flags & FCIP_SLRAW) {
3268 					/* No headers when FCIP_SLRAW is set */
3269 					putnext(slp->sl_rq, nmp);
3270 				} else if ((nmp = fcip_addudind(fptr, nmp,
3271 				    nhdr, type))) {
3272 					putnext(slp->sl_rq, nmp);
3273 				}
3274 			}
3275 		}
3276 	}
3277 
3278 	/*
3279 	 * Do the last one.
3280 	 */
3281 	if (canputnext(slp->sl_rq)) {
3282 		if (slp->sl_flags & FCIP_SLFAST) {
3283 			mp->b_rptr += hdrlen;
3284 			putnext(slp->sl_rq, mp);
3285 		} else if (slp->sl_flags & FCIP_SLRAW) {
3286 			putnext(slp->sl_rq, mp);
3287 		} else if ((mp = fcip_addudind(fptr, mp, nhdr, type))) {
3288 			putnext(slp->sl_rq, mp);
3289 		}
3290 	} else {
3291 		freemsg(mp);
3292 	}
3293 
3294 	rw_exit(&fcipstruplock);
3295 }
3296 
3297 /*
3298  * Match the stream based on type and wwn if necessary.
3299  * Destination wwn dhostp is passed to this routine is reserved
3300  * for future usage. We don't need to use it right now since port
3301  * to fcip instance mapping is unique and wwn is already validated when
3302  * packet comes to fcip.
3303  */
3304 /* ARGSUSED */
3305 static struct fcipstr *
fcip_accept(struct fcipstr * slp,struct fcip * fptr,int type,la_wwn_t * dhostp)3306 fcip_accept(struct fcipstr *slp, struct fcip *fptr, int type, la_wwn_t *dhostp)
3307 {
3308 	t_uscalar_t 	sap;
3309 
3310 	for (; slp; slp = slp->sl_nextp) {
3311 		sap = slp->sl_sap;
3312 		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_CONT,
3313 		    "fcip_accept: checking next sap = %x, type = %x",
3314 		    sap, type));
3315 
3316 		if ((slp->sl_fcip == fptr) && (type == sap)) {
3317 			return (slp);
3318 		}
3319 	}
3320 	return (NULL);
3321 }
3322 
3323 /*
3324  * Handle DL_UNITDATA_IND messages
3325  */
3326 static mblk_t *
fcip_addudind(struct fcip * fptr,mblk_t * mp,fcph_network_hdr_t * nhdr,int type)3327 fcip_addudind(struct fcip *fptr, mblk_t *mp, fcph_network_hdr_t *nhdr,
3328     int type)
3329 {
3330 	dl_unitdata_ind_t	*dludindp;
3331 	struct	fcipdladdr	*dlap;
3332 	mblk_t	*nmp;
3333 	int	size;
3334 	uint32_t hdrlen;
3335 	struct ether_addr	src_addr;
3336 	struct ether_addr	dest_addr;
3337 
3338 
3339 	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
3340 	mp->b_rptr += hdrlen;
3341 
3342 	/*
3343 	 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
3344 	 */
3345 	size = sizeof (dl_unitdata_ind_t) + FCIPADDRL + FCIPADDRL;
3346 	if ((nmp = allocb(size, BPRI_LO)) == NULL) {
3347 		fptr->fcip_allocbfail++;
3348 		freemsg(mp);
3349 		return (NULL);
3350 	}
3351 	DB_TYPE(nmp) = M_PROTO;
3352 	nmp->b_wptr = nmp->b_datap->db_lim;
3353 	nmp->b_rptr = nmp->b_wptr - size;
3354 
3355 	/*
3356 	 * Construct a DL_UNITDATA_IND primitive.
3357 	 */
3358 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3359 	dludindp->dl_primitive = DL_UNITDATA_IND;
3360 	dludindp->dl_dest_addr_length = FCIPADDRL;
3361 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3362 	dludindp->dl_src_addr_length = FCIPADDRL;
3363 	dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + FCIPADDRL;
3364 	dludindp->dl_group_address = 0;		/* not DL_MULTI */
3365 
3366 	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
3367 	wwn_to_ether(&nhdr->net_dest_addr, &dest_addr);
3368 	ether_bcopy(&dest_addr, &dlap->dl_phys);
3369 	dlap->dl_sap = (uint16_t)type;
3370 
3371 	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
3372 		+ FCIPADDRL);
3373 	wwn_to_ether(&nhdr->net_src_addr, &src_addr);
3374 	ether_bcopy(&src_addr, &dlap->dl_phys);
3375 	dlap->dl_sap = (uint16_t)type;
3376 
3377 	/*
3378 	 * Link the M_PROTO and M_DATA together.
3379 	 */
3380 	nmp->b_cont = mp;
3381 	return (nmp);
3382 }
3383 
3384 
3385 /*
3386  * The open routine. For clone opens, we return the next available minor
3387  * no. for the stream to use
3388  */
3389 /* ARGSUSED */
3390 static int
fcip_open(queue_t * rq,dev_t * devp,int flag,int sflag,cred_t * credp)3391 fcip_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
3392 {
3393 	struct fcipstr	*slp;
3394 	struct fcipstr	**prevslp;
3395 	minor_t	minor;
3396 
3397 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_open"));
3398 	/*
3399 	 * We need to ensure that the port driver is loaded before
3400 	 * we proceed
3401 	 */
3402 	if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER)) == NULL) {
3403 		/* no port driver instances found */
3404 		FCIP_DEBUG(FCIP_DEBUG_STARTUP, (CE_WARN,
3405 		    "!ddi_hold_installed_driver of fp failed\n"));
3406 		return (ENXIO);
3407 	}
3408 	/* serialize opens */
3409 	rw_enter(&fcipstruplock, RW_WRITER);
3410 
3411 	prevslp = &fcipstrup;
3412 	if (sflag == CLONEOPEN) {
3413 		minor = 0;
3414 		for (; (slp = *prevslp) != NULL; prevslp = &slp->sl_nextp) {
3415 			if (minor < slp->sl_minor) {
3416 				break;
3417 			}
3418 			minor ++;
3419 		}
3420 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3421 		    "getmajor returns 0x%x", getmajor(*devp)));
3422 		*devp = makedevice(getmajor(*devp), minor);
3423 	} else {
3424 		minor = getminor(*devp);
3425 	}
3426 
3427 	/*
3428 	 * check if our qp's private area is already initialized. If yes
3429 	 * the stream is already open - just return
3430 	 */
3431 	if (rq->q_ptr) {
3432 		goto done;
3433 	}
3434 
3435 	slp = GETSTRUCT(struct fcipstr, 1);
3436 	slp->sl_minor = minor;
3437 	slp->sl_rq = rq;
3438 	slp->sl_sap = 0;
3439 	slp->sl_flags = 0;
3440 	slp->sl_state = DL_UNATTACHED;
3441 	slp->sl_fcip = NULL;
3442 
3443 	mutex_init(&slp->sl_lock, NULL, MUTEX_DRIVER, NULL);
3444 
3445 	/*
3446 	 * link this new stream entry into list of active streams
3447 	 */
3448 	slp->sl_nextp = *prevslp;
3449 	*prevslp = slp;
3450 
3451 	rq->q_ptr = WR(rq)->q_ptr = (char *)slp;
3452 
3453 	/*
3454 	 * Disable automatic enabling of our write service procedures
3455 	 * we need to control this explicitly. This will prevent
3456 	 * anyone scheduling of our write service procedures.
3457 	 */
3458 	noenable(WR(rq));
3459 
3460 done:
3461 	rw_exit(&fcipstruplock);
3462 	/*
3463 	 * enable our put and service routines on the read side
3464 	 */
3465 	qprocson(rq);
3466 
3467 	/*
3468 	 * There is only one instance of fcip (instance = 0)
3469 	 * for multiple instances of hardware
3470 	 */
3471 	(void) qassociate(rq, 0);	/* don't allow drcompat to be pushed */
3472 	return (0);
3473 }
3474 
3475 /*
3476  * close an opened stream. The minor no. will then be available for
3477  * future opens.
3478  */
3479 /* ARGSUSED */
3480 static int
fcip_close(queue_t * rq,int flag,cred_t * credp)3481 fcip_close(queue_t *rq, int flag, cred_t *credp)
3482 {
3483 	struct fcipstr *slp;
3484 	struct fcipstr **prevslp;
3485 
3486 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_close"));
3487 	ASSERT(rq);
3488 	/* we should also have the active stream pointer in q_ptr */
3489 	ASSERT(rq->q_ptr);
3490 
3491 	ddi_rele_driver(ddi_name_to_major(PORT_DRIVER));
3492 	/*
3493 	 * disable our put and service procedures. We had enabled them
3494 	 * on open
3495 	 */
3496 	qprocsoff(rq);
3497 	slp = (struct fcipstr *)rq->q_ptr;
3498 
3499 	/*
3500 	 * Implicitly detach stream  a stream from an interface.
3501 	 */
3502 	if (slp->sl_fcip) {
3503 		fcip_dodetach(slp);
3504 	}
3505 
3506 	(void) qassociate(rq, -1);	/* undo association in open */
3507 
3508 	rw_enter(&fcipstruplock, RW_WRITER);
3509 
3510 	/*
3511 	 * unlink this stream from the active stream list and free it
3512 	 */
3513 	for (prevslp = &fcipstrup; (slp = *prevslp) != NULL;
3514 	    prevslp = &slp->sl_nextp) {
3515 		if (slp == (struct fcipstr *)rq->q_ptr) {
3516 			break;
3517 		}
3518 	}
3519 
3520 	/* we should have found slp */
3521 	ASSERT(slp);
3522 
3523 	*prevslp = slp->sl_nextp;
3524 	mutex_destroy(&slp->sl_lock);
3525 	kmem_free(slp, sizeof (struct fcipstr));
3526 	rq->q_ptr = WR(rq)->q_ptr = NULL;
3527 
3528 	rw_exit(&fcipstruplock);
3529 	return (0);
3530 }
3531 
3532 /*
3533  * This is not an extension of the DDI_DETACH request. This routine
3534  * only detaches a stream from an interface
3535  */
3536 static void
fcip_dodetach(struct fcipstr * slp)3537 fcip_dodetach(struct fcipstr *slp)
3538 {
3539 	struct fcipstr	*tslp;
3540 	struct fcip	*fptr;
3541 
3542 	FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_NOTE, "in fcip_dodetach"));
3543 	ASSERT(slp->sl_fcip != NULL);
3544 
3545 	fptr = slp->sl_fcip;
3546 	slp->sl_fcip = NULL;
3547 
3548 	/*
3549 	 * we don't support promiscuous mode currently but check
3550 	 * for and disable any promiscuous mode operation
3551 	 */
3552 	if (slp->sl_flags & SLALLPHYS) {
3553 		slp->sl_flags &= ~SLALLPHYS;
3554 	}
3555 
3556 	/*
3557 	 * disable ALLMULTI mode if all mulitcast addr are ON
3558 	 */
3559 	if (slp->sl_flags & SLALLMULTI) {
3560 		slp->sl_flags &= ~SLALLMULTI;
3561 	}
3562 
3563 	/*
3564 	 * we are most likely going to perform multicast by
3565 	 * broadcasting to the well known addr (D_ID) 0xFFFFFF or
3566 	 * ALPA 0x00 in case of public loops
3567 	 */
3568 
3569 
3570 	/*
3571 	 * detach unit from device structure.
3572 	 */
3573 	for (tslp = fcipstrup; tslp != NULL; tslp = tslp->sl_nextp) {
3574 		if (tslp->sl_fcip == fptr) {
3575 			break;
3576 		}
3577 	}
3578 	if (tslp == NULL) {
3579 		FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
3580 		"fcip_dodeatch - active stream struct not found"));
3581 
3582 		/* unregister with Fabric nameserver?? */
3583 	}
3584 	slp->sl_state = DL_UNATTACHED;
3585 
3586 	fcip_setipq(fptr);
3587 }
3588 
3589 
3590 /*
3591  * Set or clear device ipq pointer.
3592  * Walk thru all the streams on this device, if a ETHERTYPE_IP
3593  * stream is found, assign device ipq to its sl_rq.
3594  */
3595 static void
fcip_setipq(struct fcip * fptr)3596 fcip_setipq(struct fcip *fptr)
3597 {
3598 	struct fcipstr	*slp;
3599 	int		ok = 1;
3600 	queue_t		*ipq = NULL;
3601 
3602 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "entered fcip_setipq"));
3603 
3604 	rw_enter(&fcipstruplock, RW_READER);
3605 
3606 	for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
3607 		if (slp->sl_fcip == fptr) {
3608 			if (slp->sl_flags & (SLALLPHYS|SLALLSAP)) {
3609 				ok = 0;
3610 			}
3611 			if (slp->sl_sap == ETHERTYPE_IP) {
3612 				if (ipq == NULL) {
3613 					ipq = slp->sl_rq;
3614 				} else {
3615 					ok = 0;
3616 				}
3617 			}
3618 		}
3619 	}
3620 
3621 	rw_exit(&fcipstruplock);
3622 
3623 	if (fcip_check_port_exists(fptr)) {
3624 		/* fptr passed to us is stale */
3625 		return;
3626 	}
3627 
3628 	mutex_enter(&fptr->fcip_mutex);
3629 	if (ok) {
3630 		fptr->fcip_ipq = ipq;
3631 	} else {
3632 		fptr->fcip_ipq = NULL;
3633 	}
3634 	mutex_exit(&fptr->fcip_mutex);
3635 }
3636 
3637 
3638 /* ARGSUSED */
3639 static void
fcip_ioctl(queue_t * wq,mblk_t * mp)3640 fcip_ioctl(queue_t *wq, mblk_t *mp)
3641 {
3642 	struct iocblk		*iocp = (struct iocblk *)mp->b_rptr;
3643 	struct fcipstr		*slp = (struct fcipstr *)wq->q_ptr;
3644 
3645 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3646 	    (CE_NOTE, "in fcip ioctl : %d", iocp->ioc_cmd));
3647 
3648 	switch (iocp->ioc_cmd) {
3649 	case DLIOCRAW:
3650 		slp->sl_flags |= FCIP_SLRAW;
3651 		miocack(wq, mp, 0, 0);
3652 		break;
3653 
3654 	case DL_IOC_HDR_INFO:
3655 		fcip_dl_ioc_hdr_info(wq, mp);
3656 		break;
3657 
3658 	default:
3659 		miocnak(wq, mp, 0, EINVAL);
3660 		break;
3661 	}
3662 }
3663 
3664 /*
3665  * The streams 'Put' routine.
3666  */
3667 /* ARGSUSED */
3668 static int
fcip_wput(queue_t * wq,mblk_t * mp)3669 fcip_wput(queue_t *wq, mblk_t *mp)
3670 {
3671 	struct fcipstr *slp = (struct fcipstr *)wq->q_ptr;
3672 	struct fcip *fptr;
3673 	struct fcip_dest *fdestp;
3674 	fcph_network_hdr_t *headerp;
3675 
3676 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3677 	    (CE_NOTE, "in fcip_wput :: type:%x", DB_TYPE(mp)));
3678 
3679 	switch (DB_TYPE(mp)) {
3680 	case M_DATA: {
3681 
3682 		fptr = slp->sl_fcip;
3683 
3684 		if (((slp->sl_flags & (FCIP_SLFAST|FCIP_SLRAW)) == 0) ||
3685 		    (slp->sl_state != DL_IDLE) ||
3686 		    (fptr == NULL)) {
3687 			/*
3688 			 * set error in the message block and send a reply
3689 			 * back upstream. Sun's merror routine does this
3690 			 * for us more cleanly.
3691 			 */
3692 			merror(wq, mp, EPROTO);
3693 			break;
3694 		}
3695 
3696 		/*
3697 		 * if any messages are already enqueued or if the interface
3698 		 * is in promiscuous mode, causing the packets to loop back
3699 		 * up, then enqueue the message. Otherwise just transmit
3700 		 * the message. putq() puts the message on fcip's
3701 		 * write queue and qenable() puts the queue (wq) on
3702 		 * the list of queues to be called by the streams scheduler.
3703 		 */
3704 		if (wq->q_first) {
3705 			(void) putq(wq, mp);
3706 			fptr->fcip_wantw = 1;
3707 			qenable(wq);
3708 		} else if (fptr->fcip_flags & FCIP_PROMISC) {
3709 			/*
3710 			 * Promiscous mode not supported but add this code in
3711 			 * case it will be supported in future.
3712 			 */
3713 			(void) putq(wq, mp);
3714 			qenable(wq);
3715 		} else {
3716 
3717 			headerp = (fcph_network_hdr_t *)mp->b_rptr;
3718 			fdestp = fcip_get_dest(fptr, &headerp->net_dest_addr);
3719 
3720 			if (fdestp == NULL) {
3721 				merror(wq, mp, EPROTO);
3722 				break;
3723 			}
3724 
3725 			ASSERT(fdestp != NULL);
3726 
3727 			(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
3728 		}
3729 		break;
3730 	}
3731 	case M_PROTO:
3732 	case M_PCPROTO:
3733 		/*
3734 		 * to prevent recursive calls into fcip_proto
3735 		 * (PROTO and PCPROTO messages are handled by fcip_proto)
3736 		 * let the service procedure handle these messages by
3737 		 * calling putq here.
3738 		 */
3739 		(void) putq(wq, mp);
3740 		qenable(wq);
3741 		break;
3742 
3743 	case M_IOCTL:
3744 		fcip_ioctl(wq, mp);
3745 		break;
3746 
3747 	case M_FLUSH:
3748 		if (*mp->b_rptr & FLUSHW) {
3749 			flushq(wq, FLUSHALL);
3750 			*mp->b_rptr &= ~FLUSHW;
3751 		}
3752 		/*
3753 		 * we have both FLUSHW and FLUSHR set with FLUSHRW
3754 		 */
3755 		if (*mp->b_rptr & FLUSHR) {
3756 			/*
3757 			 * send msg back upstream. qreply() takes care
3758 			 * of using the RD(wq) queue on its reply
3759 			 */
3760 			qreply(wq, mp);
3761 		} else {
3762 			freemsg(mp);
3763 		}
3764 		break;
3765 
3766 	default:
3767 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3768 		    (CE_NOTE, "default msg type: %x", DB_TYPE(mp)));
3769 		freemsg(mp);
3770 		break;
3771 	}
3772 	return (0);
3773 }
3774 
3775 
3776 /*
3777  * Handle M_PROTO and M_PCPROTO messages
3778  */
3779 /* ARGSUSED */
3780 static void
fcip_proto(queue_t * wq,mblk_t * mp)3781 fcip_proto(queue_t *wq, mblk_t *mp)
3782 {
3783 	union DL_primitives	*dlp;
3784 	struct fcipstr		*slp;
3785 	t_uscalar_t		prim;
3786 
3787 	slp = (struct fcipstr *)wq->q_ptr;
3788 	dlp = (union DL_primitives *)mp->b_rptr;
3789 	prim = dlp->dl_primitive;		/* the DLPI command */
3790 
3791 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "dl_primitve : %x", prim));
3792 
3793 	mutex_enter(&slp->sl_lock);
3794 
3795 	switch (prim) {
3796 	case DL_UNITDATA_REQ:
3797 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unit data request"));
3798 		fcip_udreq(wq, mp);
3799 		break;
3800 
3801 	case DL_ATTACH_REQ:
3802 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Attach request"));
3803 		fcip_areq(wq, mp);
3804 		break;
3805 
3806 	case DL_DETACH_REQ:
3807 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Detach request"));
3808 		fcip_dreq(wq, mp);
3809 		break;
3810 
3811 	case DL_BIND_REQ:
3812 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Bind request"));
3813 		fcip_breq(wq, mp);
3814 		break;
3815 
3816 	case DL_UNBIND_REQ:
3817 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unbind request"));
3818 		fcip_ubreq(wq, mp);
3819 		break;
3820 
3821 	case DL_INFO_REQ:
3822 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Info request"));
3823 		fcip_ireq(wq, mp);
3824 		break;
3825 
3826 	case DL_SET_PHYS_ADDR_REQ:
3827 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3828 		    (CE_NOTE, "set phy addr request"));
3829 		fcip_spareq(wq, mp);
3830 		break;
3831 
3832 	case DL_PHYS_ADDR_REQ:
3833 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "phy addr request"));
3834 		fcip_pareq(wq, mp);
3835 		break;
3836 
3837 	case DL_ENABMULTI_REQ:
3838 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3839 		    (CE_NOTE, "Enable Multicast request"));
3840 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3841 		break;
3842 
3843 	case DL_DISABMULTI_REQ:
3844 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3845 		    (CE_NOTE, "Disable Multicast request"));
3846 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3847 		break;
3848 
3849 	case DL_PROMISCON_REQ:
3850 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3851 		    (CE_NOTE, "Promiscuous mode ON request"));
3852 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3853 		break;
3854 
3855 	case DL_PROMISCOFF_REQ:
3856 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3857 		    (CE_NOTE, "Promiscuous mode OFF request"));
3858 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3859 		break;
3860 
3861 	default:
3862 		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3863 		break;
3864 	}
3865 	mutex_exit(&slp->sl_lock);
3866 }
3867 
3868 /*
3869  * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA
3870  * messages sometimes. Processing of M_PROTO and M_PCPROTO messages
3871  * require us to hold fcip's internal locks across (upstream) putnext
3872  * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock
3873  * when it calls putnext(). That thread could loop back around to call
3874  * fcip_wput and eventually fcip_init() to cause a recursive mutex panic
3875  *
3876  * M_DATA messages are enqueued only if we are out of xmit resources. Once
3877  * the transmit resources are available the service procedure is enabled
3878  * and an attempt is made to xmit all messages on the wq.
3879  */
3880 /* ARGSUSED */
3881 static int
fcip_wsrv(queue_t * wq)3882 fcip_wsrv(queue_t *wq)
3883 {
3884 	mblk_t		*mp;
3885 	struct fcipstr	*slp;
3886 	struct fcip	*fptr;
3887 	struct fcip_dest *fdestp;
3888 	fcph_network_hdr_t *headerp;
3889 
3890 	slp = (struct fcipstr *)wq->q_ptr;
3891 	fptr = slp->sl_fcip;
3892 
3893 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "fcip wsrv"));
3894 
3895 	while (mp = getq(wq)) {
3896 		switch (DB_TYPE(mp)) {
3897 		case M_DATA:
3898 			if (fptr && mp) {
3899 				headerp = (fcph_network_hdr_t *)mp->b_rptr;
3900 				fdestp = fcip_get_dest(fptr,
3901 				    &headerp->net_dest_addr);
3902 				if (fdestp == NULL) {
3903 					freemsg(mp);
3904 					goto done;
3905 				}
3906 				if (fcip_start(wq, mp, fptr, fdestp,
3907 				    KM_SLEEP)) {
3908 					goto done;
3909 				}
3910 			} else {
3911 				freemsg(mp);
3912 			}
3913 			break;
3914 
3915 		case M_PROTO:
3916 		case M_PCPROTO:
3917 			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3918 			    (CE_NOTE, "PROT msg in wsrv"));
3919 			fcip_proto(wq, mp);
3920 			break;
3921 		default:
3922 			break;
3923 		}
3924 	}
3925 done:
3926 	return (0);
3927 }
3928 
3929 
3930 /*
3931  * This routine is called from fcip_wsrv to send a message downstream
3932  * on the fibre towards its destination. This routine performs the
3933  * actual WWN to D_ID mapping by looking up the routing and destination
3934  * tables.
3935  */
3936 /* ARGSUSED */
3937 static int
fcip_start(queue_t * wq,mblk_t * mp,struct fcip * fptr,struct fcip_dest * fdestp,int flags)3938 fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
3939     struct fcip_dest *fdestp, int flags)
3940 {
3941 	int			rval;
3942 	int			free;
3943 	fcip_pkt_t		*fcip_pkt;
3944 	fc_packet_t		*fc_pkt;
3945 	fcip_port_info_t	*fport = fptr->fcip_port_info;
3946 	size_t			datalen;
3947 
3948 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcipstart"));
3949 
3950 	ASSERT(fdestp != NULL);
3951 
3952 	/*
3953 	 * Only return if port has gone offline and not come back online
3954 	 * in a while
3955 	 */
3956 	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
3957 		freemsg(mp);
3958 		return (0);
3959 	}
3960 
3961 	/*
3962 	 * The message block coming in here already has the network and
3963 	 * llc_snap hdr stuffed in
3964 	 */
3965 	/*
3966 	 * Traditionally ethernet drivers at sun handle 3 cases here -
3967 	 * 1. messages with one mblk
3968 	 * 2. messages with 2 mblks
3969 	 * 3. messages with >2 mblks
3970 	 * For now lets handle all the 3 cases in a single case where we
3971 	 * put them together in one mblk that has all the data
3972 	 */
3973 
3974 	if (mp->b_cont != NULL) {
3975 		if (!pullupmsg(mp, -1)) {
3976 			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3977 			    (CE_WARN, "failed to concat message"));
3978 			freemsg(mp);
3979 			return (1);
3980 		}
3981 	}
3982 
3983 	datalen = msgsize(mp);
3984 
3985 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3986 	    "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx",
3987 	    datalen));
3988 
3989 	/*
3990 	 * We cannot have requests larger than FCIPMTU+Headers
3991 	 */
3992 	if (datalen > (FCIPMTU + sizeof (llc_snap_hdr_t) +
3993 		sizeof (fcph_network_hdr_t))) {
3994 		freemsg(mp);
3995 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3996 		    "fcip_pkt_alloc: datalen is larger than "
3997 		    "max possible size."));
3998 		return (1);
3999 	}
4000 
4001 	fcip_pkt = fcip_pkt_alloc(fptr, mp, flags, datalen);
4002 	if (fcip_pkt == NULL) {
4003 		(void) putbq(wq, mp);
4004 		return (1);
4005 	}
4006 
4007 	fcip_pkt->fcip_pkt_mp = mp;
4008 	fcip_pkt->fcip_pkt_wq = wq;
4009 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4010 
4011 	mutex_enter(&fdestp->fcipd_mutex);
4012 	/*
4013 	 * If the device dynamically disappeared, just fail the request.
4014 	 */
4015 	if (fdestp->fcipd_rtable == NULL) {
4016 		mutex_exit(&fdestp->fcipd_mutex);
4017 		fcip_pkt_free(fcip_pkt, 1);
4018 		return (1);
4019 	}
4020 
4021 	/*
4022 	 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet
4023 	 */
4024 
4025 	fc_pkt->pkt_pd = fdestp->fcipd_pd;
4026 
4027 	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
4028 	    fc_pkt, flags) != FC_SUCCESS) {
4029 		mutex_exit(&fdestp->fcipd_mutex);
4030 		fcip_pkt_free(fcip_pkt, 1);
4031 		return (1);
4032 	}
4033 
4034 	fcip_fdestp_enqueue_pkt(fdestp, fcip_pkt);
4035 	fcip_pkt->fcip_pkt_dest = fdestp;
4036 	fc_pkt->pkt_fca_device = fdestp->fcipd_fca_dev;
4037 
4038 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4039 	    "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x",
4040 	    fc_pkt->pkt_cmdlen, fc_pkt->pkt_rsplen, fc_pkt->pkt_datalen));
4041 
4042 	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
4043 	    fdestp->fcipd_did, fcip_pkt_callback);
4044 
4045 	fdestp->fcipd_ncmds++;
4046 
4047 	mutex_exit(&fdestp->fcipd_mutex);
4048 	if ((rval = fcip_transport(fcip_pkt)) == FC_SUCCESS) {
4049 		fptr->fcip_opackets++;
4050 		return (0);
4051 	}
4052 
4053 	free = (rval == FC_STATEC_BUSY || rval == FC_OFFLINE ||
4054 	    rval == FC_TRAN_BUSY) ? 0 : 1;
4055 
4056 	mutex_enter(&fdestp->fcipd_mutex);
4057 	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4058 
4059 	if (!rval) {
4060 		fcip_pkt = NULL;
4061 	} else {
4062 		fdestp->fcipd_ncmds--;
4063 	}
4064 	mutex_exit(&fdestp->fcipd_mutex);
4065 
4066 	if (fcip_pkt != NULL) {
4067 		fcip_pkt_free(fcip_pkt, free);
4068 	}
4069 
4070 	if (!free) {
4071 		(void) putbq(wq, mp);
4072 	}
4073 
4074 	return (1);
4075 }
4076 
4077 
4078 /*
4079  * This routine enqueus a packet marked to be issued to the
4080  * transport in the dest structure. This enables us to timeout any
4081  * request stuck with the FCA/transport for long periods of time
4082  * without a response. fcip_pkt_timeout will attempt to clean up
4083  * any packets hung in this state of limbo.
4084  */
4085 static void
fcip_fdestp_enqueue_pkt(struct fcip_dest * fdestp,fcip_pkt_t * fcip_pkt)4086 fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4087 {
4088 	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4089 
4090 	/*
4091 	 * Just hang it off the head of packet list
4092 	 */
4093 	fcip_pkt->fcip_pkt_next = fdestp->fcipd_head;
4094 	fcip_pkt->fcip_pkt_prev = NULL;
4095 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
4096 
4097 	if (fdestp->fcipd_head != NULL) {
4098 		ASSERT(fdestp->fcipd_head->fcip_pkt_prev == NULL);
4099 		fdestp->fcipd_head->fcip_pkt_prev = fcip_pkt;
4100 	}
4101 
4102 	fdestp->fcipd_head = fcip_pkt;
4103 }
4104 
4105 
4106 /*
4107  * dequeues any packets after the transport/FCA tells us it has
4108  * been successfully sent on its way. Ofcourse it doesn't mean that
4109  * the packet will actually reach its destination but its atleast
4110  * a step closer in that direction
4111  */
4112 static int
fcip_fdestp_dequeue_pkt(struct fcip_dest * fdestp,fcip_pkt_t * fcip_pkt)4113 fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4114 {
4115 	fcip_pkt_t	*fcipd_pkt;
4116 
4117 	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4118 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4119 		fcipd_pkt = fdestp->fcipd_head;
4120 		while (fcipd_pkt) {
4121 			if (fcipd_pkt == fcip_pkt) {
4122 				fcip_pkt_t	*pptr = NULL;
4123 
4124 				if (fcipd_pkt == fdestp->fcipd_head) {
4125 					ASSERT(fcipd_pkt->fcip_pkt_prev ==
4126 					    NULL);
4127 					fdestp->fcipd_head =
4128 					    fcipd_pkt->fcip_pkt_next;
4129 				} else {
4130 					pptr = fcipd_pkt->fcip_pkt_prev;
4131 					ASSERT(pptr != NULL);
4132 					pptr->fcip_pkt_next =
4133 					    fcipd_pkt->fcip_pkt_next;
4134 				}
4135 				if (fcipd_pkt->fcip_pkt_next) {
4136 					pptr = fcipd_pkt->fcip_pkt_next;
4137 					pptr->fcip_pkt_prev =
4138 					    fcipd_pkt->fcip_pkt_prev;
4139 				}
4140 				fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4141 				break;
4142 			}
4143 			fcipd_pkt = fcipd_pkt->fcip_pkt_next;
4144 		}
4145 	} else {
4146 		if (fcip_pkt->fcip_pkt_prev == NULL) {
4147 			ASSERT(fdestp->fcipd_head == fcip_pkt);
4148 			fdestp->fcipd_head = fcip_pkt->fcip_pkt_next;
4149 		} else {
4150 			fcip_pkt->fcip_pkt_prev->fcip_pkt_next =
4151 			    fcip_pkt->fcip_pkt_next;
4152 		}
4153 
4154 		if (fcip_pkt->fcip_pkt_next) {
4155 			fcip_pkt->fcip_pkt_next->fcip_pkt_prev =
4156 			    fcip_pkt->fcip_pkt_prev;
4157 		}
4158 
4159 		fcipd_pkt = fcip_pkt;
4160 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4161 	}
4162 
4163 	return (fcipd_pkt == fcip_pkt);
4164 }
4165 
4166 /*
4167  * The transport routine - this is the routine that actually calls
4168  * into the FCA driver (through the transport ofcourse) to transmit a
4169  * datagram on the fibre. The dest struct assoicated with the port to
4170  * which the data is intended is already bound to the packet, this routine
4171  * only takes care of marking the packet a broadcast packet if it is
4172  * intended to be a broadcast request. This permits the transport to send
4173  * the packet down on the wire even if it doesn't have an entry for the
4174  * D_ID in its d_id hash tables.
4175  */
4176 static int
fcip_transport(fcip_pkt_t * fcip_pkt)4177 fcip_transport(fcip_pkt_t *fcip_pkt)
4178 {
4179 	struct fcip		*fptr;
4180 	fc_packet_t		*fc_pkt;
4181 	fcip_port_info_t	*fport;
4182 	struct fcip_dest	*fdestp;
4183 	uint32_t		did;
4184 	int			rval = FC_FAILURE;
4185 	struct fcip_routing_table *frp = NULL;
4186 
4187 	fptr = fcip_pkt->fcip_pkt_fptr;
4188 	fport = fptr->fcip_port_info;
4189 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4190 	fdestp = fcip_pkt->fcip_pkt_dest;
4191 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, "fcip_transport called"));
4192 
4193 	did = fptr->fcip_broadcast_did;
4194 	if (fc_pkt->pkt_cmd_fhdr.d_id == did &&
4195 	    fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) {
4196 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4197 		    (CE_NOTE, "trantype set to BROADCAST"));
4198 		fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
4199 	}
4200 
4201 	mutex_enter(&fptr->fcip_mutex);
4202 	if ((fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) &&
4203 	    (fc_pkt->pkt_pd == NULL)) {
4204 		mutex_exit(&fptr->fcip_mutex);
4205 		return (rval);
4206 	} else if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
4207 		mutex_exit(&fptr->fcip_mutex);
4208 		return (FC_TRAN_BUSY);
4209 	}
4210 	mutex_exit(&fptr->fcip_mutex);
4211 
4212 	if (fdestp) {
4213 		struct fcip_routing_table 	*frp;
4214 
4215 		frp = fdestp->fcipd_rtable;
4216 		mutex_enter(&fptr->fcip_rt_mutex);
4217 		mutex_enter(&fdestp->fcipd_mutex);
4218 		if (fc_pkt->pkt_pd != NULL) {
4219 			if ((frp == NULL) ||
4220 			    (frp && FCIP_RTE_UNAVAIL(frp->fcipr_state))) {
4221 				mutex_exit(&fdestp->fcipd_mutex);
4222 				mutex_exit(&fptr->fcip_rt_mutex);
4223 				if (frp &&
4224 				    (frp->fcipr_state == FCIP_RT_INVALID)) {
4225 					return (FC_TRAN_BUSY);
4226 				} else {
4227 					return (rval);
4228 				}
4229 			}
4230 		}
4231 		mutex_exit(&fdestp->fcipd_mutex);
4232 		mutex_exit(&fptr->fcip_rt_mutex);
4233 		ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4234 	}
4235 
4236 	/* Explicitly invalidate this field till fcip decides to use it */
4237 	fc_pkt->pkt_ulp_rscn_infop = NULL;
4238 
4239 	rval = fc_ulp_transport(fport->fcipp_handle, fc_pkt);
4240 	if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
4241 		/*
4242 		 * Need to queue up the command for retry
4243 		 */
4244 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4245 		    (CE_WARN, "ulp_transport failed: 0x%x", rval));
4246 	} else if (rval == FC_LOGINREQ && (frp != NULL)) {
4247 		(void) fcip_do_plogi(fptr, frp);
4248 	} else if (rval == FC_BADPACKET && (frp != NULL)) {
4249 		/*
4250 		 * There is a distinct possiblity in our scheme of things
4251 		 * that we have a routing table entry with a NULL pd struct.
4252 		 * Mark the routing table entry for removal if it is not a
4253 		 * broadcast entry
4254 		 */
4255 		if ((frp->fcipr_d_id.port_id != 0x0) &&
4256 		    (frp->fcipr_d_id.port_id != 0xffffff)) {
4257 			mutex_enter(&fptr->fcip_rt_mutex);
4258 			frp->fcipr_pd = NULL;
4259 			frp->fcipr_state = PORT_DEVICE_INVALID;
4260 			mutex_exit(&fptr->fcip_rt_mutex);
4261 		}
4262 	}
4263 
4264 	return (rval);
4265 }
4266 
4267 /*
4268  * Call back routine. Called by the FCA/transport when the messages
4269  * has been put onto the wire towards its intended destination. We can
4270  * now free the fc_packet associated with the message
4271  */
4272 static void
fcip_pkt_callback(fc_packet_t * fc_pkt)4273 fcip_pkt_callback(fc_packet_t *fc_pkt)
4274 {
4275 	int			rval;
4276 	fcip_pkt_t		*fcip_pkt;
4277 	struct fcip_dest	*fdestp;
4278 
4279 	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
4280 	fdestp = fcip_pkt->fcip_pkt_dest;
4281 
4282 	/*
4283 	 * take the lock early so that we don't have a race condition
4284 	 * with fcip_timeout
4285 	 *
4286 	 * fdestp->fcipd_mutex isn't really intended to lock per
4287 	 * packet struct - see bug 5105592 for permanent solution
4288 	 */
4289 	mutex_enter(&fdestp->fcipd_mutex);
4290 
4291 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_RETURNED;
4292 	fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
4293 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4294 		mutex_exit(&fdestp->fcipd_mutex);
4295 		return;
4296 	}
4297 
4298 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback"));
4299 
4300 	ASSERT(fdestp->fcipd_rtable != NULL);
4301 	ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4302 	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4303 	fdestp->fcipd_ncmds--;
4304 	mutex_exit(&fdestp->fcipd_mutex);
4305 
4306 	if (rval) {
4307 		fcip_pkt_free(fcip_pkt, 1);
4308 	}
4309 
4310 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback done"));
4311 }
4312 
4313 /*
4314  * Return 1 if the topology is supported, else return 0.
4315  * Topology support is consistent with what the whole
4316  * stack supports together.
4317  */
4318 static int
fcip_is_supported_fc_topology(int fc_topology)4319 fcip_is_supported_fc_topology(int fc_topology)
4320 {
4321 	switch (fc_topology) {
4322 
4323 	case FC_TOP_PRIVATE_LOOP :
4324 	case FC_TOP_PUBLIC_LOOP :
4325 	case FC_TOP_FABRIC :
4326 	case FC_TOP_NO_NS :
4327 		return (1);
4328 	default :
4329 		return (0);
4330 	}
4331 }
4332 
4333 /*
4334  * handle any topology specific initializations here
4335  * this routine must be called while holding fcip_mutex
4336  */
4337 /* ARGSUSED */
4338 static void
fcip_handle_topology(struct fcip * fptr)4339 fcip_handle_topology(struct fcip *fptr)
4340 {
4341 
4342 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4343 
4344 	ASSERT(mutex_owned(&fptr->fcip_mutex));
4345 
4346 	/*
4347 	 * Since we know the port's topology - handle topology
4348 	 * specific details here. In Point to Point and Private Loop
4349 	 * topologies - we would probably not have a name server
4350 	 */
4351 
4352 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "port state: %x, topology %x",
4353 		fport->fcipp_pstate, fport->fcipp_topology));
4354 
4355 	fptr->fcip_broadcast_did = fcip_get_broadcast_did(fptr);
4356 	mutex_exit(&fptr->fcip_mutex);
4357 	(void) fcip_dest_add_broadcast_entry(fptr, 0);
4358 	mutex_enter(&fptr->fcip_mutex);
4359 
4360 	if (!fcip_is_supported_fc_topology(fport->fcipp_topology)) {
4361 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4362 		    (CE_WARN, "fcip(0x%x): Unsupported port topology (0x%x)",
4363 		    fptr->fcip_instance, fport->fcipp_topology));
4364 		return;
4365 	}
4366 
4367 	switch (fport->fcipp_topology) {
4368 	case FC_TOP_PRIVATE_LOOP: {
4369 
4370 		fc_portmap_t		*port_map;
4371 		uint32_t		listlen, alloclen;
4372 		/*
4373 		 * we may have to maintain routing. Get a list of
4374 		 * all devices on this port that the transport layer is
4375 		 * aware of. Check if any of them is a IS8802 type port,
4376 		 * if yes get its WWN and DID mapping and cache it in
4377 		 * the purport routing table. Since there is no
4378 		 * State Change notification for private loop/point_point
4379 		 * topologies - this table may not be accurate. The static
4380 		 * routing table is updated on a state change callback.
4381 		 */
4382 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "port state valid!!"));
4383 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4384 		listlen = alloclen = FCIP_MAX_PORTS;
4385 		port_map = (fc_portmap_t *)
4386 		    kmem_zalloc((FCIP_MAX_PORTS * sizeof (fc_portmap_t)),
4387 		    KM_SLEEP);
4388 		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4389 		    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4390 			mutex_exit(&fptr->fcip_mutex);
4391 			fcip_rt_update(fptr, port_map, listlen);
4392 			mutex_enter(&fptr->fcip_mutex);
4393 		}
4394 		if (listlen > alloclen) {
4395 			alloclen = listlen;
4396 		}
4397 		kmem_free(port_map, (alloclen * sizeof (fc_portmap_t)));
4398 		/*
4399 		 * Now fall through and register with the transport
4400 		 * that this port is IP capable
4401 		 */
4402 	}
4403 	/* FALLTHROUGH */
4404 	case FC_TOP_NO_NS:
4405 		/*
4406 		 * If we don't have a nameserver, lets wait until we
4407 		 * have to send out a packet to a remote port and then
4408 		 * try and discover the port using ARP/FARP.
4409 		 */
4410 	/* FALLTHROUGH */
4411 	case FC_TOP_PUBLIC_LOOP:
4412 	case FC_TOP_FABRIC: {
4413 		fc_portmap_t	*port_map;
4414 		uint32_t	listlen, alloclen;
4415 
4416 		/* FC_TYPE of 0x05 goes to word 0, LSB */
4417 		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4418 
4419 		if (!(fptr->fcip_flags & FCIP_REG_INPROGRESS)) {
4420 			fptr->fcip_flags |= FCIP_REG_INPROGRESS;
4421 			if (taskq_dispatch(fptr->fcip_tq, fcip_port_ns,
4422 			    fptr, KM_NOSLEEP) == TASKQID_INVALID) {
4423 				fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4424 			}
4425 		}
4426 
4427 		/*
4428 		 * If fcip_create_nodes_on_demand is overridden to force
4429 		 * discovery of all nodes in Fabric/Public loop topologies
4430 		 * we need to query for and obtain all nodes and log into
4431 		 * them as with private loop devices
4432 		 */
4433 		if (!fcip_create_nodes_on_demand) {
4434 			fptr->fcip_port_state = FCIP_PORT_ONLINE;
4435 			listlen = alloclen = FCIP_MAX_PORTS;
4436 			port_map = (fc_portmap_t *)
4437 			    kmem_zalloc((FCIP_MAX_PORTS *
4438 			    sizeof (fc_portmap_t)), KM_SLEEP);
4439 			if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4440 			    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4441 				mutex_exit(&fptr->fcip_mutex);
4442 				fcip_rt_update(fptr, port_map, listlen);
4443 				mutex_enter(&fptr->fcip_mutex);
4444 			}
4445 			if (listlen > alloclen) {
4446 				alloclen = listlen;
4447 			}
4448 			kmem_free(port_map,
4449 			    (alloclen * sizeof (fc_portmap_t)));
4450 		}
4451 		break;
4452 	}
4453 
4454 	default:
4455 		break;
4456 	}
4457 }
4458 
4459 static void
fcip_port_ns(void * arg)4460 fcip_port_ns(void *arg)
4461 {
4462 	struct	fcip		*fptr = (struct fcip *)arg;
4463 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4464 	fc_ns_cmd_t		ns_cmd;
4465 	uint32_t		types[8];
4466 	ns_rfc_type_t		rfc;
4467 
4468 	mutex_enter(&fptr->fcip_mutex);
4469 	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
4470 	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
4471 		fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4472 		mutex_exit(&fptr->fcip_mutex);
4473 		return;
4474 	}
4475 	mutex_exit(&fptr->fcip_mutex);
4476 
4477 	/*
4478 	 * Prepare the Name server structure to
4479 	 * register with the transport in case of
4480 	 * Fabric configuration.
4481 	 */
4482 	bzero(&rfc, sizeof (rfc));
4483 	bzero(types, sizeof (types));
4484 
4485 	types[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP)] = (1 <<
4486 	    FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP));
4487 
4488 	rfc.rfc_port_id.port_id = fport->fcipp_sid.port_id;
4489 	bcopy(types, rfc.rfc_types, sizeof (types));
4490 
4491 	ns_cmd.ns_flags = 0;
4492 	ns_cmd.ns_cmd = NS_RFT_ID;
4493 	ns_cmd.ns_req_len = sizeof (rfc);
4494 	ns_cmd.ns_req_payload = (caddr_t)&rfc;
4495 	ns_cmd.ns_resp_len = 0;
4496 	ns_cmd.ns_resp_payload = NULL;
4497 
4498 	/*
4499 	 * Perform the Name Server Registration for FC IS8802_SNAP Type.
4500 	 * We don't expect a reply for registering port type
4501 	 */
4502 	(void) fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
4503 		(opaque_t)0, &ns_cmd);
4504 
4505 	mutex_enter(&fptr->fcip_mutex);
4506 	fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4507 	mutex_exit(&fptr->fcip_mutex);
4508 }
4509 
4510 /*
4511  * setup this instance of fcip. This routine inits kstats, allocates
4512  * unsolicited buffers, determines' this port's siblings and handles
4513  * topology specific details which includes registering with the name
4514  * server and also setting up the routing table for this port for
4515  * private loops and point to point topologies
4516  */
4517 static int
fcip_init_port(struct fcip * fptr)4518 fcip_init_port(struct fcip *fptr)
4519 {
4520 	int rval = FC_SUCCESS;
4521 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4522 	static char buf[64];
4523 	size_t	tok_buf_size = 0;
4524 
4525 	ASSERT(fport != NULL);
4526 
4527 	mutex_enter(&fptr->fcip_mutex);
4528 
4529 	/*
4530 	 * setup mac address for this port. Don't be too worried if
4531 	 * the WWN is zero, there is probably nothing attached to
4532 	 * to the port. There is no point allocating unsolicited buffers
4533 	 * for an unused port so return success if we don't have a MAC
4534 	 * address. Do the port init on a state change notification.
4535 	 */
4536 	if (fcip_setup_mac_addr(fptr) == FCIP_INVALID_WWN) {
4537 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4538 		rval = FC_SUCCESS;
4539 		goto done;
4540 	}
4541 
4542 	/*
4543 	 * clear routing table hash list for this port
4544 	 */
4545 	fcip_rt_flush(fptr);
4546 
4547 	/*
4548 	 * init kstats for this instance
4549 	 */
4550 	fcip_kstat_init(fptr);
4551 
4552 	/*
4553 	 * Allocate unsolicited buffers
4554 	 */
4555 	fptr->fcip_ub_nbufs = fcip_ub_nbufs;
4556 	tok_buf_size = sizeof (*fptr->fcip_ub_tokens) * fcip_ub_nbufs;
4557 
4558 	FCIP_DEBUG(FCIP_DEBUG_INIT,
4559 	    (CE_WARN, "tokBufsize: 0x%lx", tok_buf_size));
4560 
4561 	fptr->fcip_ub_tokens = kmem_zalloc(tok_buf_size, KM_SLEEP);
4562 
4563 	if (fptr->fcip_ub_tokens == NULL) {
4564 		rval = FC_FAILURE;
4565 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4566 		    (CE_WARN, "fcip(%d): failed to allocate unsol buf",
4567 		    fptr->fcip_instance));
4568 		goto done;
4569 	}
4570 	rval = fc_ulp_uballoc(fport->fcipp_handle, &fptr->fcip_ub_nbufs,
4571 		fcip_ub_size, FC_TYPE_IS8802_SNAP, fptr->fcip_ub_tokens);
4572 
4573 	if (rval != FC_SUCCESS) {
4574 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4575 		    (CE_WARN, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!",
4576 		    fptr->fcip_instance, rval));
4577 	}
4578 
4579 	switch (rval) {
4580 	case FC_SUCCESS:
4581 		break;
4582 
4583 	case FC_OFFLINE:
4584 		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4585 		rval = FC_FAILURE;
4586 		goto done;
4587 
4588 	case FC_UB_ERROR:
4589 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4590 		    (CE_WARN, "invalid ub alloc request !!"));
4591 		rval = FC_FAILURE;
4592 		goto done;
4593 
4594 	case FC_FAILURE:
4595 		/*
4596 		 * requested bytes could not be alloced
4597 		 */
4598 		if (fptr->fcip_ub_nbufs != fcip_ub_nbufs) {
4599 			cmn_err(CE_WARN,
4600 			    "!fcip(0x%x): Failed to alloc unsolicited bufs",
4601 			    ddi_get_instance(fport->fcipp_dip));
4602 			rval = FC_FAILURE;
4603 			goto done;
4604 		}
4605 		break;
4606 
4607 	default:
4608 		rval = FC_FAILURE;
4609 		break;
4610 	}
4611 
4612 	/*
4613 	 * Preallocate a Cache of fcip packets for transmit and receive
4614 	 * We don't want to be holding on to unsolicited buffers while
4615 	 * we transmit the message upstream
4616 	 */
4617 	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "allocating fcip_pkt cache"));
4618 
4619 	(void) sprintf(buf, "fcip%d_cache", fptr->fcip_instance);
4620 	fptr->fcip_xmit_cache = kmem_cache_create(buf,
4621 		(fport->fcipp_fca_pkt_size + sizeof (fcip_pkt_t)),
4622 		8, fcip_cache_constructor, fcip_cache_destructor,
4623 		NULL, (void *)fport, NULL, 0);
4624 
4625 	(void) sprintf(buf, "fcip%d_sendup_cache", fptr->fcip_instance);
4626 	fptr->fcip_sendup_cache = kmem_cache_create(buf,
4627 		sizeof (struct fcip_sendup_elem),
4628 		8, fcip_sendup_constructor, NULL, NULL, (void *)fport, NULL, 0);
4629 
4630 	if (fptr->fcip_xmit_cache == NULL) {
4631 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4632 		    (CE_WARN, "fcip%d unable to allocate xmit cache",
4633 		    fptr->fcip_instance));
4634 		rval = FC_FAILURE;
4635 		goto done;
4636 	}
4637 
4638 	/*
4639 	 * We may need to handle routing tables for point to point and
4640 	 * fcal topologies and register with NameServer for Fabric
4641 	 * topologies.
4642 	 */
4643 	fcip_handle_topology(fptr);
4644 	mutex_exit(&fptr->fcip_mutex);
4645 	if (fcip_dest_add_broadcast_entry(fptr, 1) != FC_SUCCESS) {
4646 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4647 		    (CE_WARN, "fcip(0x%x):add broadcast entry failed!!",
4648 		    fptr->fcip_instance));
4649 		mutex_enter(&fptr->fcip_mutex);
4650 		rval = FC_FAILURE;
4651 		goto done;
4652 	}
4653 
4654 	rval = FC_SUCCESS;
4655 	return (rval);
4656 
4657 done:
4658 	/*
4659 	 * we don't always come here from port_attach - so cleanup
4660 	 * anything done in the init_port routine
4661 	 */
4662 	if (fptr->fcip_kstatp) {
4663 		kstat_delete(fptr->fcip_kstatp);
4664 		fptr->fcip_kstatp = NULL;
4665 	}
4666 
4667 	if (fptr->fcip_xmit_cache) {
4668 		kmem_cache_destroy(fptr->fcip_xmit_cache);
4669 		fptr->fcip_xmit_cache = NULL;
4670 	}
4671 
4672 	if (fptr->fcip_sendup_cache) {
4673 		kmem_cache_destroy(fptr->fcip_sendup_cache);
4674 		fptr->fcip_sendup_cache = NULL;
4675 	}
4676 
4677 	/* release unsolicited buffers */
4678 	if (fptr->fcip_ub_tokens) {
4679 		uint64_t	*tokens = fptr->fcip_ub_tokens;
4680 		fptr->fcip_ub_tokens = NULL;
4681 
4682 		mutex_exit(&fptr->fcip_mutex);
4683 		(void) fc_ulp_ubfree(fport->fcipp_handle, fptr->fcip_ub_nbufs,
4684 			tokens);
4685 		kmem_free(tokens, tok_buf_size);
4686 
4687 	} else {
4688 		mutex_exit(&fptr->fcip_mutex);
4689 	}
4690 
4691 	return (rval);
4692 }
4693 
4694 /*
4695  * Sets up a port's MAC address from its WWN
4696  */
4697 static int
fcip_setup_mac_addr(struct fcip * fptr)4698 fcip_setup_mac_addr(struct fcip *fptr)
4699 {
4700 	fcip_port_info_t	*fport = fptr->fcip_port_info;
4701 
4702 	ASSERT(mutex_owned(&fptr->fcip_mutex));
4703 
4704 	fptr->fcip_addrflags = 0;
4705 
4706 	/*
4707 	 * we cannot choose a MAC address for our interface - we have
4708 	 * to live with whatever node WWN we get (minus the top two
4709 	 * MSbytes for the MAC address) from the transport layer. We will
4710 	 * treat the WWN as our factory MAC address.
4711 	 */
4712 
4713 	if ((fport->fcipp_nwwn.w.wwn_hi != 0) ||
4714 	    (fport->fcipp_nwwn.w.wwn_lo != 0)) {
4715 		char		etherstr[ETHERSTRL];
4716 
4717 		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
4718 		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
4719 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4720 		    (CE_NOTE, "setupmacaddr ouraddr %s", etherstr));
4721 
4722 		fptr->fcip_addrflags = (FCIP_FACTADDR_PRESENT |
4723 						FCIP_FACTADDR_USE);
4724 	} else {
4725 		/*
4726 		 * No WWN - just return failure - there's not much
4727 		 * we can do since we cannot set the WWN.
4728 		 */
4729 		FCIP_DEBUG(FCIP_DEBUG_INIT,
4730 		    (CE_WARN, "Port does not have a valid WWN"));
4731 		return (FCIP_INVALID_WWN);
4732 	}
4733 	return (FC_SUCCESS);
4734 }
4735 
4736 
4737 /*
4738  * flush routing table entries
4739  */
4740 static void
fcip_rt_flush(struct fcip * fptr)4741 fcip_rt_flush(struct fcip *fptr)
4742 {
4743 	int index;
4744 
4745 	mutex_enter(&fptr->fcip_rt_mutex);
4746 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
4747 		struct fcip_routing_table 	*frtp, *frtp_next;
4748 		frtp = fptr->fcip_rtable[index];
4749 		while (frtp) {
4750 			frtp_next = frtp->fcipr_next;
4751 			kmem_free(frtp, sizeof (struct fcip_routing_table));
4752 			frtp = frtp_next;
4753 		}
4754 		fptr->fcip_rtable[index] = NULL;
4755 	}
4756 	mutex_exit(&fptr->fcip_rt_mutex);
4757 }
4758 
4759 /*
4760  * Free up the fcip softstate and all allocated resources for the
4761  * fcip instance assoicated with a given port driver instance
4762  *
4763  * Given that the list of structures pointed to by fcip_port_head,
4764  * this function is called from multiple sources, and the
4765  * fcip_global_mutex that protects fcip_port_head must be dropped,
4766  * our best solution is to return a value that indicates the next
4767  * port in the list.  This way the caller doesn't need to worry
4768  * about the race condition where it saves off a pointer to the
4769  * next structure in the list and by the time this routine returns,
4770  * that next structure has already been freed.
4771  */
4772 static fcip_port_info_t *
fcip_softstate_free(fcip_port_info_t * fport)4773 fcip_softstate_free(fcip_port_info_t *fport)
4774 {
4775 	struct fcip		*fptr = NULL;
4776 	int 			instance;
4777 	timeout_id_t		tid;
4778 	opaque_t		phandle = NULL;
4779 	fcip_port_info_t	*prev_fport, *cur_fport, *next_fport = NULL;
4780 
4781 	ASSERT(MUTEX_HELD(&fcip_global_mutex));
4782 
4783 	if (fport) {
4784 		phandle = fport->fcipp_handle;
4785 		fptr = fport->fcipp_fcip;
4786 	} else {
4787 		return (next_fport);
4788 	}
4789 
4790 	if (fptr) {
4791 		mutex_enter(&fptr->fcip_mutex);
4792 		instance = ddi_get_instance(fptr->fcip_dip);
4793 
4794 		/*
4795 		 * dismantle timeout thread for this instance of fcip
4796 		 */
4797 		tid = fptr->fcip_timeout_id;
4798 		fptr->fcip_timeout_id = NULL;
4799 
4800 		mutex_exit(&fptr->fcip_mutex);
4801 		(void) untimeout(tid);
4802 		mutex_enter(&fptr->fcip_mutex);
4803 
4804 		ASSERT(fcip_num_instances >= 0);
4805 		fcip_num_instances--;
4806 
4807 		/*
4808 		 * stop sendup thread
4809 		 */
4810 		mutex_enter(&fptr->fcip_sendup_mutex);
4811 		if (fptr->fcip_sendup_thr_initted) {
4812 			fptr->fcip_sendup_thr_initted = 0;
4813 			cv_signal(&fptr->fcip_sendup_cv);
4814 			cv_wait(&fptr->fcip_sendup_cv,
4815 			    &fptr->fcip_sendup_mutex);
4816 		}
4817 		ASSERT(fptr->fcip_sendup_head == NULL);
4818 		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
4819 		mutex_exit(&fptr->fcip_sendup_mutex);
4820 
4821 		/*
4822 		 * dismantle taskq
4823 		 */
4824 		if (fptr->fcip_tq) {
4825 			taskq_t	*tq = fptr->fcip_tq;
4826 
4827 			fptr->fcip_tq = NULL;
4828 
4829 			mutex_exit(&fptr->fcip_mutex);
4830 			taskq_destroy(tq);
4831 			mutex_enter(&fptr->fcip_mutex);
4832 		}
4833 
4834 		if (fptr->fcip_kstatp) {
4835 			kstat_delete(fptr->fcip_kstatp);
4836 			fptr->fcip_kstatp = NULL;
4837 		}
4838 
4839 		/* flush the routing table entries */
4840 		fcip_rt_flush(fptr);
4841 
4842 		if (fptr->fcip_xmit_cache) {
4843 			kmem_cache_destroy(fptr->fcip_xmit_cache);
4844 			fptr->fcip_xmit_cache = NULL;
4845 		}
4846 
4847 		if (fptr->fcip_sendup_cache) {
4848 			kmem_cache_destroy(fptr->fcip_sendup_cache);
4849 			fptr->fcip_sendup_cache = NULL;
4850 		}
4851 
4852 		fcip_cleanup_dest(fptr);
4853 
4854 		/* release unsolicited buffers */
4855 		if (fptr->fcip_ub_tokens) {
4856 			uint64_t	*tokens = fptr->fcip_ub_tokens;
4857 
4858 			fptr->fcip_ub_tokens = NULL;
4859 			mutex_exit(&fptr->fcip_mutex);
4860 			if (phandle) {
4861 				/*
4862 				 * release the global mutex here to
4863 				 * permit any data pending callbacks to
4864 				 * complete. Else we will deadlock in the
4865 				 * FCA waiting for all unsol buffers to be
4866 				 * returned.
4867 				 */
4868 				mutex_exit(&fcip_global_mutex);
4869 				(void) fc_ulp_ubfree(phandle,
4870 				    fptr->fcip_ub_nbufs, tokens);
4871 				mutex_enter(&fcip_global_mutex);
4872 			}
4873 			kmem_free(tokens, (sizeof (*tokens) * fcip_ub_nbufs));
4874 		} else {
4875 			mutex_exit(&fptr->fcip_mutex);
4876 		}
4877 
4878 		mutex_destroy(&fptr->fcip_mutex);
4879 		mutex_destroy(&fptr->fcip_ub_mutex);
4880 		mutex_destroy(&fptr->fcip_rt_mutex);
4881 		mutex_destroy(&fptr->fcip_dest_mutex);
4882 		mutex_destroy(&fptr->fcip_sendup_mutex);
4883 		cv_destroy(&fptr->fcip_farp_cv);
4884 		cv_destroy(&fptr->fcip_sendup_cv);
4885 		cv_destroy(&fptr->fcip_ub_cv);
4886 
4887 		ddi_soft_state_free(fcip_softp, instance);
4888 	}
4889 
4890 	/*
4891 	 * Now dequeue the fcip_port_info from the port list
4892 	 */
4893 	cur_fport = fcip_port_head;
4894 	prev_fport = NULL;
4895 	while (cur_fport != NULL) {
4896 		if (cur_fport == fport) {
4897 			break;
4898 		}
4899 		prev_fport = cur_fport;
4900 		cur_fport = cur_fport->fcipp_next;
4901 	}
4902 
4903 	/*
4904 	 * Assert that we found a port in our port list
4905 	 */
4906 	ASSERT(cur_fport == fport);
4907 
4908 	if (prev_fport) {
4909 		/*
4910 		 * Not the first port in the port list
4911 		 */
4912 		prev_fport->fcipp_next = fport->fcipp_next;
4913 	} else {
4914 		/*
4915 		 * first port
4916 		 */
4917 		fcip_port_head = fport->fcipp_next;
4918 	}
4919 	next_fport = fport->fcipp_next;
4920 	kmem_free(fport, sizeof (fcip_port_info_t));
4921 
4922 	return (next_fport);
4923 }
4924 
4925 
4926 /*
4927  * This is called by transport for any ioctl operations performed
4928  * on the devctl or other transport minor nodes. It is currently
4929  * unused for fcip
4930  */
4931 /* ARGSUSED */
4932 static int
fcip_port_ioctl(opaque_t ulp_handle,opaque_t port_handle,dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval,uint32_t claimed)4933 fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle, dev_t dev,
4934 	int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
4935 	uint32_t claimed)
4936 {
4937 	return (FC_UNCLAIMED);
4938 }
4939 
4940 /*
4941  * DL_INFO_REQ - returns information about the DLPI stream to the DLS user
4942  * requesting information about this interface
4943  */
4944 static void
fcip_ireq(queue_t * wq,mblk_t * mp)4945 fcip_ireq(queue_t *wq, mblk_t *mp)
4946 {
4947 	struct fcipstr		*slp;
4948 	struct fcip		*fptr;
4949 	dl_info_ack_t		*dlip;
4950 	struct fcipdladdr	*dlap;
4951 	la_wwn_t		*ep;
4952 	int 			size;
4953 	char			etherstr[ETHERSTRL];
4954 
4955 	slp = (struct fcipstr *)wq->q_ptr;
4956 
4957 	fptr = slp->sl_fcip;
4958 
4959 	FCIP_DEBUG(FCIP_DEBUG_DLPI,
4960 	    (CE_NOTE, "fcip_ireq: info request req rcvd"));
4961 
4962 	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
4963 		dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
4964 		return;
4965 	}
4966 
4967 	/*
4968 	 * Exchange current message for a DL_INFO_ACK
4969 	 */
4970 	size = sizeof (dl_info_ack_t) + FCIPADDRL + ETHERADDRL;
4971 	if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) {
4972 		return;
4973 	}
4974 
4975 	/*
4976 	 * FILL in the DL_INFO_ACK fields and reply
4977 	 */
4978 	dlip = (dl_info_ack_t *)mp->b_rptr;
4979 	*dlip = fcip_infoack;
4980 	dlip->dl_current_state = slp->sl_state;
4981 	dlap = (struct fcipdladdr *)(mp->b_rptr + dlip->dl_addr_offset);
4982 	dlap->dl_sap = slp->sl_sap;
4983 
4984 
4985 	if (fptr) {
4986 		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
4987 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
4988 		    (CE_NOTE, "ireq - our mac: %s", etherstr));
4989 		ether_bcopy(&fptr->fcip_macaddr, &dlap->dl_phys);
4990 	} else {
4991 		bzero((caddr_t)&dlap->dl_phys, ETHERADDRL);
4992 	}
4993 
4994 	ep = (la_wwn_t *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
4995 	ether_bcopy(&fcip_arpbroadcast_addr, ep);
4996 
4997 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "sending back info req.."));
4998 	qreply(wq, mp);
4999 }
5000 
5001 
5002 /*
5003  * To handle DL_UNITDATA_REQ requests.
5004  */
5005 
5006 static void
fcip_udreq(queue_t * wq,mblk_t * mp)5007 fcip_udreq(queue_t *wq, mblk_t *mp)
5008 {
5009 	struct fcipstr		*slp;
5010 	struct fcip		*fptr;
5011 	fcip_port_info_t	*fport;
5012 	dl_unitdata_req_t	*dludp;
5013 	mblk_t			*nmp;
5014 	struct fcipdladdr	*dlap;
5015 	fcph_network_hdr_t 	*headerp;
5016 	llc_snap_hdr_t		*lsnap;
5017 	t_uscalar_t		off, len;
5018 	struct fcip_dest	*fdestp;
5019 	la_wwn_t		wwn;
5020 	int			hdr_size;
5021 
5022 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "inside fcip_udreq"));
5023 
5024 	slp = (struct fcipstr *)wq->q_ptr;
5025 
5026 	if (slp->sl_state != DL_IDLE) {
5027 		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5028 		return;
5029 	}
5030 
5031 	fptr = slp->sl_fcip;
5032 
5033 	if (fptr == NULL) {
5034 		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5035 		return;
5036 	}
5037 
5038 	fport = fptr->fcip_port_info;
5039 
5040 	dludp = (dl_unitdata_req_t *)mp->b_rptr;
5041 	off = dludp->dl_dest_addr_offset;
5042 	len = dludp->dl_dest_addr_length;
5043 
5044 	/*
5045 	 * Validate destination address format
5046 	 */
5047 	if (!MBLKIN(mp, off, len) || (len != FCIPADDRL)) {
5048 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADADDR, 0);
5049 		return;
5050 	}
5051 
5052 	/*
5053 	 * Error if no M_DATA follows
5054 	 */
5055 	nmp = mp->b_cont;
5056 	if (nmp == NULL) {
5057 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5058 		return;
5059 	}
5060 	dlap = (struct fcipdladdr *)(mp->b_rptr + off);
5061 
5062 	/*
5063 	 * Now get the destination structure for the remote NPORT
5064 	 */
5065 	ether_to_wwn(&dlap->dl_phys, &wwn);
5066 	fdestp = fcip_get_dest(fptr, &wwn);
5067 
5068 	if (fdestp == NULL) {
5069 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE,
5070 		    "udreq - couldn't find dest struct for remote port"));
5071 		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5072 		return;
5073 	}
5074 
5075 	/*
5076 	 * Network header + SAP
5077 	 */
5078 	hdr_size = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
5079 
5080 	/* DB_REF gives the no. of msgs pointing to this block */
5081 	if ((DB_REF(nmp) == 1) &&
5082 	    (MBLKHEAD(nmp) >= hdr_size) &&
5083 	    (((uintptr_t)mp->b_rptr & 0x1) == 0)) {
5084 		la_wwn_t wwn;
5085 		nmp->b_rptr -= hdr_size;
5086 
5087 		/* first put the network header */
5088 		headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5089 		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5090 			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5091 		} else {
5092 			ether_to_wwn(&dlap->dl_phys, &wwn);
5093 		}
5094 		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5095 		ether_to_wwn(&fptr->fcip_macaddr, &wwn);
5096 		bcopy(&wwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5097 
5098 		/* Now the snap header */
5099 		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5100 		    sizeof (fcph_network_hdr_t));
5101 		lsnap->dsap = 0xAA;
5102 		lsnap->ssap = 0xAA;
5103 		lsnap->ctrl = 0x03;
5104 		lsnap->oui[0] = 0x00;
5105 		lsnap->oui[1] = 0x00; 	/* 80 */
5106 		lsnap->oui[2] = 0x00;	/* C2 */
5107 		lsnap->pid = BE_16((dlap->dl_sap));
5108 
5109 		freeb(mp);
5110 		mp = nmp;
5111 
5112 	} else {
5113 		la_wwn_t wwn;
5114 
5115 		DB_TYPE(mp) = M_DATA;
5116 		headerp = (fcph_network_hdr_t *)mp->b_rptr;
5117 
5118 		/*
5119 		 * Only fill in the low 48bits of WWN for now - we can
5120 		 * fill in the NAA_ID after we find the port in the
5121 		 * routing tables
5122 		 */
5123 		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5124 			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5125 		} else {
5126 			ether_to_wwn(&dlap->dl_phys, &wwn);
5127 		}
5128 		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5129 		/* need to send our PWWN */
5130 		bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr,
5131 		    sizeof (la_wwn_t));
5132 
5133 		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5134 		    sizeof (fcph_network_hdr_t));
5135 		lsnap->dsap = 0xAA;
5136 		lsnap->ssap = 0xAA;
5137 		lsnap->ctrl = 0x03;
5138 		lsnap->oui[0] = 0x00;
5139 		lsnap->oui[1] = 0x00;
5140 		lsnap->oui[2] = 0x00;
5141 		lsnap->pid = BE_16(dlap->dl_sap);
5142 
5143 		mp->b_wptr = mp->b_rptr + hdr_size;
5144 	}
5145 
5146 	/*
5147 	 * Ethernet drivers have a lot of gunk here to put the Type
5148 	 * information (for Ethernet encapsulation (RFC 894) or the
5149 	 * Length (for 802.2/802.3) - I guess we'll just ignore that
5150 	 * here.
5151 	 */
5152 
5153 	/*
5154 	 * Start the I/O on this port. If fcip_start failed for some reason
5155 	 * we call putbq in fcip_start so we don't need to check the
5156 	 * return value from fcip_start
5157 	 */
5158 	(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
5159 }
5160 
5161 /*
5162  * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed
5163  * for style 2 DLS providers to identify the physical medium through which
5164  * the streams communication will happen
5165  */
5166 static void
fcip_areq(queue_t * wq,mblk_t * mp)5167 fcip_areq(queue_t *wq, mblk_t *mp)
5168 {
5169 	struct fcipstr		*slp;
5170 	union DL_primitives	*dlp;
5171 	fcip_port_info_t	*fport;
5172 	struct fcip		*fptr;
5173 	int			ppa;
5174 
5175 	slp = (struct fcipstr *)wq->q_ptr;
5176 	dlp = (union DL_primitives *)mp->b_rptr;
5177 
5178 	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
5179 		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
5180 		return;
5181 	}
5182 
5183 	if (slp->sl_state != DL_UNATTACHED) {
5184 		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
5185 		return;
5186 	}
5187 
5188 	ppa = dlp->attach_req.dl_ppa;
5189 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "attach req: ppa %x", ppa));
5190 
5191 	/*
5192 	 * check if the PPA is valid
5193 	 */
5194 
5195 	mutex_enter(&fcip_global_mutex);
5196 
5197 	for (fport = fcip_port_head; fport; fport = fport->fcipp_next) {
5198 		if ((fptr = fport->fcipp_fcip) == NULL) {
5199 			continue;
5200 		}
5201 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "ppa %x, inst %x", ppa,
5202 		    ddi_get_instance(fptr->fcip_dip)));
5203 
5204 		if (ppa == ddi_get_instance(fptr->fcip_dip)) {
5205 			FCIP_DEBUG(FCIP_DEBUG_DLPI,
5206 			    (CE_NOTE, "ppa found %x", ppa));
5207 			break;
5208 		}
5209 	}
5210 
5211 	if (fport == NULL) {
5212 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5213 		    (CE_NOTE, "dlerrorack coz fport==NULL"));
5214 
5215 		mutex_exit(&fcip_global_mutex);
5216 
5217 		if (fc_ulp_get_port_handle(ppa) == NULL) {
5218 			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5219 			return;
5220 		}
5221 
5222 		/*
5223 		 * Wait for Port attach callback to trigger.  If port_detach
5224 		 * got in while we were waiting, then ddi_get_soft_state
5225 		 * will return NULL, and we'll return error.
5226 		 */
5227 
5228 		delay(drv_usectohz(FCIP_INIT_DELAY));
5229 		mutex_enter(&fcip_global_mutex);
5230 
5231 		fptr = ddi_get_soft_state(fcip_softp, ppa);
5232 		if (fptr == NULL) {
5233 			mutex_exit(&fcip_global_mutex);
5234 			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5235 			return;
5236 		}
5237 	}
5238 
5239 	/*
5240 	 * set link to device and update our state
5241 	 */
5242 	slp->sl_fcip = fptr;
5243 	slp->sl_state = DL_UNBOUND;
5244 
5245 	mutex_exit(&fcip_global_mutex);
5246 
5247 #ifdef DEBUG
5248 	mutex_enter(&fptr->fcip_mutex);
5249 	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
5250 		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_WARN, "port not online yet"));
5251 	}
5252 	mutex_exit(&fptr->fcip_mutex);
5253 #endif
5254 
5255 	dlokack(wq, mp, DL_ATTACH_REQ);
5256 }
5257 
5258 
5259 /*
5260  * DL_DETACH request - detaches a PPA from a stream
5261  */
5262 static void
fcip_dreq(queue_t * wq,mblk_t * mp)5263 fcip_dreq(queue_t *wq, mblk_t *mp)
5264 {
5265 	struct fcipstr		*slp;
5266 
5267 	slp = (struct fcipstr *)wq->q_ptr;
5268 
5269 	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
5270 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
5271 		return;
5272 	}
5273 
5274 	if (slp->sl_state != DL_UNBOUND) {
5275 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
5276 		return;
5277 	}
5278 
5279 	fcip_dodetach(slp);
5280 	dlokack(wq, mp, DL_DETACH_REQ);
5281 }
5282 
5283 /*
5284  * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream.
5285  * DLS users communicate with a physical interface through DLSAPs. Multiple
5286  * DLSAPs can be bound to the same stream (PPA)
5287  */
5288 static void
fcip_breq(queue_t * wq,mblk_t * mp)5289 fcip_breq(queue_t *wq, mblk_t *mp)
5290 {
5291 	struct fcipstr		*slp;
5292 	union DL_primitives	*dlp;
5293 	struct fcip		*fptr;
5294 	struct fcipdladdr	fcipaddr;
5295 	t_uscalar_t		sap;
5296 	int			xidtest;
5297 
5298 	slp = (struct fcipstr *)wq->q_ptr;
5299 
5300 	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
5301 		dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
5302 		return;
5303 	}
5304 
5305 	if (slp->sl_state != DL_UNBOUND) {
5306 		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5307 		return;
5308 	}
5309 
5310 	dlp = (union DL_primitives *)mp->b_rptr;
5311 	fptr = slp->sl_fcip;
5312 
5313 	if (fptr == NULL) {
5314 		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5315 		return;
5316 	}
5317 
5318 	sap = dlp->bind_req.dl_sap;
5319 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "fcip_breq - sap: %x", sap));
5320 	xidtest = dlp->bind_req.dl_xidtest_flg;
5321 
5322 	if (xidtest) {
5323 		dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
5324 		return;
5325 	}
5326 
5327 	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "DLBIND: sap : %x", sap));
5328 
5329 	if (sap > ETHERTYPE_MAX) {
5330 		dlerrorack(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
5331 		return;
5332 	}
5333 	/*
5334 	 * save SAP for this stream and change the link state
5335 	 */
5336 	slp->sl_sap = sap;
5337 	slp->sl_state = DL_IDLE;
5338 
5339 	fcipaddr.dl_sap = sap;
5340 	ether_bcopy(&fptr->fcip_macaddr, &fcipaddr.dl_phys);
5341 	dlbindack(wq, mp, sap, &fcipaddr, FCIPADDRL, 0, 0);
5342 
5343 	fcip_setipq(fptr);
5344 }
5345 
5346 /*
5347  * DL_UNBIND request to unbind a previously bound DLSAP, from this stream
5348  */
5349 static void
fcip_ubreq(queue_t * wq,mblk_t * mp)5350 fcip_ubreq(queue_t *wq, mblk_t *mp)
5351 {
5352 	struct fcipstr	*slp;
5353 
5354 	slp = (struct fcipstr *)wq->q_ptr;
5355 
5356 	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
5357 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
5358 		return;
5359 	}
5360 
5361 	if (slp->sl_state != DL_IDLE) {
5362 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
5363 		return;
5364 	}
5365 
5366 	slp->sl_state = DL_UNBOUND;
5367 	slp->sl_sap = 0;
5368 
5369 	(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
5370 	dlokack(wq, mp, DL_UNBIND_REQ);
5371 
5372 	fcip_setipq(slp->sl_fcip);
5373 }
5374 
5375 /*
5376  * Return our physical address
5377  */
5378 static void
fcip_pareq(queue_t * wq,mblk_t * mp)5379 fcip_pareq(queue_t *wq, mblk_t *mp)
5380 {
5381 	struct fcipstr 		*slp;
5382 	union DL_primitives	*dlp;
5383 	int			type;
5384 	struct fcip		*fptr;
5385 	fcip_port_info_t	*fport;
5386 	struct ether_addr	addr;
5387 
5388 	slp = (struct fcipstr *)wq->q_ptr;
5389 
5390 	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
5391 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5392 		return;
5393 	}
5394 
5395 	dlp = (union DL_primitives *)mp->b_rptr;
5396 	type = dlp->physaddr_req.dl_addr_type;
5397 	fptr = slp->sl_fcip;
5398 
5399 	if (fptr == NULL) {
5400 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5401 		return;
5402 	}
5403 
5404 	fport = fptr->fcip_port_info;
5405 
5406 	switch (type) {
5407 	case DL_FACT_PHYS_ADDR:
5408 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5409 		    (CE_NOTE, "returning factory phys addr"));
5410 		wwn_to_ether(&fport->fcipp_pwwn, &addr);
5411 		break;
5412 
5413 	case DL_CURR_PHYS_ADDR:
5414 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5415 		    (CE_NOTE, "returning current phys addr"));
5416 		ether_bcopy(&fptr->fcip_macaddr, &addr);
5417 		break;
5418 
5419 	default:
5420 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5421 		    (CE_NOTE, "Not known cmd type in phys addr"));
5422 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
5423 		return;
5424 	}
5425 	dlphysaddrack(wq, mp, &addr, ETHERADDRL);
5426 }
5427 
5428 /*
5429  * Set physical address DLPI request
5430  */
5431 static void
fcip_spareq(queue_t * wq,mblk_t * mp)5432 fcip_spareq(queue_t *wq, mblk_t *mp)
5433 {
5434 	struct fcipstr		*slp;
5435 	union DL_primitives	*dlp;
5436 	t_uscalar_t		off, len;
5437 	struct ether_addr	*addrp;
5438 	la_wwn_t		wwn;
5439 	struct fcip		*fptr;
5440 	fc_ns_cmd_t		fcip_ns_cmd;
5441 
5442 	slp = (struct fcipstr *)wq->q_ptr;
5443 
5444 	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
5445 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5446 		return;
5447 	}
5448 
5449 	dlp = (union DL_primitives *)mp->b_rptr;
5450 	len = dlp->set_physaddr_req.dl_addr_length;
5451 	off = dlp->set_physaddr_req.dl_addr_offset;
5452 
5453 	if (!MBLKIN(mp, off, len)) {
5454 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5455 		return;
5456 	}
5457 
5458 	addrp = (struct ether_addr *)(mp->b_rptr + off);
5459 
5460 	/*
5461 	 * If the length of physical address is not correct or address
5462 	 * specified is a broadcast address or multicast addr -
5463 	 * return an error.
5464 	 */
5465 	if ((len != ETHERADDRL) ||
5466 	    ((addrp->ether_addr_octet[0] & 01) == 1) ||
5467 	    (ether_cmp(addrp, &fcip_arpbroadcast_addr) == 0)) {
5468 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5469 		return;
5470 	}
5471 
5472 	/*
5473 	 * check if a stream is attached to this device. Else return an error
5474 	 */
5475 	if ((fptr = slp->sl_fcip) == NULL) {
5476 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5477 		return;
5478 	}
5479 
5480 	/*
5481 	 * set the new interface local address. We request the transport
5482 	 * layer to change the Port WWN for this device - return an error
5483 	 * if we don't succeed.
5484 	 */
5485 
5486 	ether_to_wwn(addrp, &wwn);
5487 	if (fcip_set_wwn(&wwn) == FC_SUCCESS) {
5488 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5489 		    (CE_WARN, "WWN changed in spareq"));
5490 	} else {
5491 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5492 	}
5493 
5494 	/*
5495 	 * register The new Port WWN and Node WWN with the transport
5496 	 * and Nameserver. Hope the transport ensures all current I/O
5497 	 * has stopped before actually attempting to register a new
5498 	 * port and Node WWN else we are hosed. Maybe a Link reset
5499 	 * will get everyone's attention.
5500 	 */
5501 	fcip_ns_cmd.ns_flags = 0;
5502 	fcip_ns_cmd.ns_cmd = NS_RPN_ID;
5503 	fcip_ns_cmd.ns_req_len = sizeof (la_wwn_t);
5504 	fcip_ns_cmd.ns_req_payload = (caddr_t)&wwn.raw_wwn[0];
5505 	fcip_ns_cmd.ns_resp_len = 0;
5506 	fcip_ns_cmd.ns_resp_payload = (caddr_t)0;
5507 	if (fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
5508 	    (opaque_t)0, &fcip_ns_cmd) != FC_SUCCESS) {
5509 		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5510 		    (CE_WARN, "setting Port WWN failed"));
5511 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5512 		return;
5513 	}
5514 
5515 	dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ);
5516 }
5517 
5518 /*
5519  * change our port's WWN if permitted by hardware
5520  */
5521 /* ARGSUSED */
5522 static int
fcip_set_wwn(la_wwn_t * pwwn)5523 fcip_set_wwn(la_wwn_t *pwwn)
5524 {
5525 	/*
5526 	 * We're usually not allowed to change the WWN of adapters
5527 	 * but some adapters do permit us to change the WWN - don't
5528 	 * permit setting of WWNs (yet?) - This behavior could be
5529 	 * modified if needed
5530 	 */
5531 	return (FC_FAILURE);
5532 }
5533 
5534 
5535 /*
5536  * This routine fills in the header for fastpath data requests. What this
5537  * does in simple terms is, instead of sending all data through the Unitdata
5538  * request dlpi code paths (which will then append the protocol specific
5539  * header - network and snap headers in our case), the upper layers issue
5540  * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint
5541  * driver to give the header it needs appended and the upper layer
5542  * allocates and fills in the header and calls our put routine
5543  */
5544 static void
fcip_dl_ioc_hdr_info(queue_t * wq,mblk_t * mp)5545 fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp)
5546 {
5547 	mblk_t			*nmp;
5548 	struct fcipstr		*slp;
5549 	struct fcipdladdr	*dlap;
5550 	dl_unitdata_req_t	*dlup;
5551 	fcph_network_hdr_t	*headerp;
5552 	la_wwn_t		wwn;
5553 	llc_snap_hdr_t		*lsnap;
5554 	struct fcip		*fptr;
5555 	fcip_port_info_t	*fport;
5556 	t_uscalar_t		off, len;
5557 	size_t			hdrlen;
5558 	int 			error;
5559 
5560 	slp = (struct fcipstr *)wq->q_ptr;
5561 	fptr = slp->sl_fcip;
5562 	if (fptr == NULL) {
5563 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5564 		    (CE_NOTE, "dliochdr : returns EINVAL1"));
5565 		miocnak(wq, mp, 0, EINVAL);
5566 		return;
5567 	}
5568 
5569 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + FCIPADDRL);
5570 	if (error != 0) {
5571 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5572 		    (CE_NOTE, "dliochdr : returns %d", error));
5573 		miocnak(wq, mp, 0, error);
5574 		return;
5575 	}
5576 
5577 	fport = fptr->fcip_port_info;
5578 
5579 	/*
5580 	 * check if the DL_UNITDATA_REQ destination addr has valid offset
5581 	 * and length values
5582 	 */
5583 	dlup = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
5584 	off = dlup->dl_dest_addr_offset;
5585 	len = dlup->dl_dest_addr_length;
5586 	if (dlup->dl_primitive != DL_UNITDATA_REQ ||
5587 	    !MBLKIN(mp->b_cont, off, len) || (len != FCIPADDRL)) {
5588 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5589 		    (CE_NOTE, "dliochdr : returns EINVAL2"));
5590 		miocnak(wq, mp, 0, EINVAL);
5591 		return;
5592 	}
5593 
5594 	dlap = (struct fcipdladdr *)(mp->b_cont->b_rptr + off);
5595 
5596 	/*
5597 	 * Allocate a new mblk to hold the ether header
5598 	 */
5599 
5600 	/*
5601 	 * setup space for network header
5602 	 */
5603 	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
5604 	if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
5605 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5606 		    (CE_NOTE, "dliochdr : returns ENOMEM"));
5607 		miocnak(wq, mp, 0, ENOMEM);
5608 		return;
5609 	}
5610 	nmp->b_wptr += hdrlen;
5611 
5612 	/*
5613 	 * Fill in the Network Hdr and LLC SNAP header;
5614 	 */
5615 	headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5616 	/*
5617 	 * just fill in the Node WWN here - we can fill in the NAA_ID when
5618 	 * we search the routing table
5619 	 */
5620 	if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5621 		ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5622 	} else {
5623 		ether_to_wwn(&dlap->dl_phys, &wwn);
5624 	}
5625 	bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5626 	bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5627 	lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + sizeof (fcph_network_hdr_t));
5628 	lsnap->dsap = 0xAA;
5629 	lsnap->ssap = 0xAA;
5630 	lsnap->ctrl = 0x03;
5631 	lsnap->oui[0] = 0x00;
5632 	lsnap->oui[1] = 0x00;
5633 	lsnap->oui[2] = 0x00;
5634 	lsnap->pid = BE_16(dlap->dl_sap);
5635 
5636 	/*
5637 	 * Link new mblk in after the "request" mblks.
5638 	 */
5639 	linkb(mp, nmp);
5640 
5641 	slp->sl_flags |= FCIP_SLFAST;
5642 
5643 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5644 	    (CE_NOTE, "dliochdr : returns success "));
5645 	miocack(wq, mp, msgsize(mp->b_cont), 0);
5646 }
5647 
5648 
5649 /*
5650  * Establish a kmem cache for fcip packets
5651  */
5652 static int
fcip_cache_constructor(void * buf,void * arg,int flags)5653 fcip_cache_constructor(void *buf, void *arg, int flags)
5654 {
5655 	fcip_pkt_t		*fcip_pkt = buf;
5656 	fc_packet_t		*fc_pkt;
5657 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5658 	int			(*cb) (caddr_t);
5659 	struct fcip		*fptr;
5660 
5661 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
5662 
5663 	ASSERT(fport != NULL);
5664 
5665 	fptr = fport->fcipp_fcip;
5666 
5667 	/*
5668 	 * we allocated space for our private area at the end of the
5669 	 * fc packet. Make sure we point to it correctly. Ideally we
5670 	 * should just push fc_packet_private to the beginning or end
5671 	 * of the fc_packet structure
5672 	 */
5673 	fcip_pkt->fcip_pkt_next = NULL;
5674 	fcip_pkt->fcip_pkt_prev = NULL;
5675 	fcip_pkt->fcip_pkt_dest = NULL;
5676 	fcip_pkt->fcip_pkt_state = 0;
5677 	fcip_pkt->fcip_pkt_reason = 0;
5678 	fcip_pkt->fcip_pkt_flags = 0;
5679 	fcip_pkt->fcip_pkt_fptr = fptr;
5680 	fcip_pkt->fcip_pkt_dma_flags = 0;
5681 
5682 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5683 	fc_pkt->pkt_ulp_rscn_infop = NULL;
5684 
5685 	/*
5686 	 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect
5687 	 * any responses for outbound IP data so no need to setup
5688 	 * response or data dma handles.
5689 	 */
5690 	if (ddi_dma_alloc_handle(fport->fcipp_dip,
5691 	    &fport->fcipp_cmd_dma_attr, cb, NULL,
5692 	    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
5693 		return (FCIP_FAILURE);
5694 	}
5695 
5696 	fc_pkt->pkt_cmd_acc = fc_pkt->pkt_resp_acc = NULL;
5697 	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)buf +
5698 	    sizeof (fcip_pkt_t));
5699 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
5700 
5701 	fc_pkt->pkt_cmd_cookie_cnt = fc_pkt->pkt_resp_cookie_cnt =
5702 	    fc_pkt->pkt_data_cookie_cnt = 0;
5703 	fc_pkt->pkt_cmd_cookie = fc_pkt->pkt_resp_cookie =
5704 	    fc_pkt->pkt_data_cookie = NULL;
5705 
5706 	return (FCIP_SUCCESS);
5707 }
5708 
5709 /*
5710  * destroy the fcip kmem cache
5711  */
5712 static void
fcip_cache_destructor(void * buf,void * arg)5713 fcip_cache_destructor(void *buf, void *arg)
5714 {
5715 	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)buf;
5716 	fc_packet_t		*fc_pkt;
5717 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5718 	struct fcip		*fptr;
5719 
5720 	ASSERT(fport != NULL);
5721 
5722 	fptr = fport->fcipp_fcip;
5723 
5724 	ASSERT(fptr == fcip_pkt->fcip_pkt_fptr);
5725 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5726 
5727 	if (fc_pkt->pkt_cmd_dma) {
5728 		ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
5729 	}
5730 }
5731 
5732 /*
5733  * the fcip destination structure is hashed on Node WWN assuming
5734  * a  NAA_ID of 0x1 (IEEE)
5735  */
5736 static struct fcip_dest *
fcip_get_dest(struct fcip * fptr,la_wwn_t * pwwn)5737 fcip_get_dest(struct fcip *fptr, la_wwn_t *pwwn)
5738 {
5739 	struct fcip_dest	*fdestp = NULL;
5740 	fcip_port_info_t	*fport;
5741 	int			hash_bucket;
5742 	opaque_t		pd;
5743 	int			rval;
5744 	struct fcip_routing_table *frp;
5745 	la_wwn_t		twwn;
5746 	uint32_t		*twwnp = (uint32_t *)&twwn;
5747 
5748 	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
5749 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5750 	    (CE_NOTE, "get dest hashbucket : 0x%x", hash_bucket));
5751 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5752 	    (CE_NOTE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5753 	    pwwn->raw_wwn[2], pwwn->raw_wwn[3], pwwn->raw_wwn[4],
5754 	    pwwn->raw_wwn[5], pwwn->raw_wwn[6], pwwn->raw_wwn[7]));
5755 
5756 	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
5757 
5758 	if (fcip_check_port_exists(fptr)) {
5759 		/* fptr is stale, return fdestp */
5760 		return (fdestp);
5761 	}
5762 	fport = fptr->fcip_port_info;
5763 
5764 	/*
5765 	 * First check if we have active I/Os going on with the
5766 	 * destination port (an entry would exist in fcip_dest hash table)
5767 	 */
5768 	mutex_enter(&fptr->fcip_dest_mutex);
5769 	fdestp = fptr->fcip_dest[hash_bucket];
5770 	while (fdestp != NULL) {
5771 		mutex_enter(&fdestp->fcipd_mutex);
5772 		if (fdestp->fcipd_rtable) {
5773 			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
5774 			    FCIP_COMPARE_NWWN) == 0) {
5775 				FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5776 				    (CE_NOTE, "found fdestp"));
5777 				mutex_exit(&fdestp->fcipd_mutex);
5778 				mutex_exit(&fptr->fcip_dest_mutex);
5779 				return (fdestp);
5780 			}
5781 		}
5782 		mutex_exit(&fdestp->fcipd_mutex);
5783 		fdestp = fdestp->fcipd_next;
5784 	}
5785 	mutex_exit(&fptr->fcip_dest_mutex);
5786 
5787 	/*
5788 	 * We did not find the destination port information in our
5789 	 * active port list so search for an entry in our routing
5790 	 * table.
5791 	 */
5792 	mutex_enter(&fptr->fcip_rt_mutex);
5793 	frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
5794 	mutex_exit(&fptr->fcip_rt_mutex);
5795 
5796 	if (frp == NULL || (frp && (!FCIP_RTE_UNAVAIL(frp->fcipr_state)) &&
5797 	    frp->fcipr_state != PORT_DEVICE_LOGGED_IN) ||
5798 	    (frp && frp->fcipr_pd == NULL)) {
5799 		/*
5800 		 * No entry for the destination port in our routing
5801 		 * table too. First query the transport to see if it
5802 		 * already has structures for the destination port in
5803 		 * its hash tables. This must be done for all topologies
5804 		 * since we could have retired entries in the hash tables
5805 		 * which may have to be re-added without a statechange
5806 		 * callback happening. Its better to try and get an entry
5807 		 * for the destination port rather than simply failing a
5808 		 * request though it may be an overkill in private loop
5809 		 * topologies.
5810 		 * If a entry for the remote port exists in the transport's
5811 		 * hash tables, we are fine and can add the entry to our
5812 		 * routing and dest hash lists, Else for fabric configs we
5813 		 * query the nameserver if one exists or issue FARP ELS.
5814 		 */
5815 
5816 		/*
5817 		 * We need to do a PortName based Nameserver
5818 		 * query operation. So get the right PortWWN
5819 		 * for the adapter.
5820 		 */
5821 		bcopy(pwwn, &twwn, sizeof (la_wwn_t));
5822 
5823 		/*
5824 		 * Try IEEE Name (Format 1) first, this is the default and
5825 		 * Emulex uses this format.
5826 		 */
5827 		pd = fc_ulp_get_remote_port(fport->fcipp_handle,
5828 					    &twwn, &rval, 1);
5829 
5830 		if (rval != FC_SUCCESS) {
5831 			/*
5832 			 * If IEEE Name (Format 1) query failed, try IEEE
5833 			 * Extended Name (Format 2) which Qlogic uses.
5834 			 * And try port 1 on Qlogic FC-HBA first.
5835 			 * Note: On x86, we need to byte swap the 32-bit
5836 			 * word first, after the modification, swap it back.
5837 			 */
5838 			*twwnp = BE_32(*twwnp);
5839 			twwn.w.nport_id = QLC_PORT_1_ID_BITS;
5840 			twwn.w.naa_id = QLC_PORT_NAA;
5841 			*twwnp = BE_32(*twwnp);
5842 			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
5843 						    &twwn, &rval, 1);
5844 		}
5845 
5846 		if (rval != FC_SUCCESS) {
5847 			/* If still failed, try port 2 on Qlogic FC-HBA. */
5848 			*twwnp = BE_32(*twwnp);
5849 			twwn.w.nport_id = QLC_PORT_2_ID_BITS;
5850 			*twwnp = BE_32(*twwnp);
5851 			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
5852 						    &twwn, &rval, 1);
5853 		}
5854 
5855 		if (rval == FC_SUCCESS) {
5856 			fc_portmap_t	map;
5857 			/*
5858 			 * Add the newly found destination structure
5859 			 * to our routing table. Create a map with
5860 			 * the device we found. We could ask the
5861 			 * transport to give us the list of all
5862 			 * devices connected to our port but we
5863 			 * probably don't need to know all the devices
5864 			 * so let us just constuct a list with only
5865 			 * one device instead.
5866 			 */
5867 
5868 			fc_ulp_copy_portmap(&map, pd);
5869 			fcip_rt_update(fptr, &map, 1);
5870 
5871 			mutex_enter(&fptr->fcip_rt_mutex);
5872 			frp = fcip_lookup_rtable(fptr, pwwn,
5873 			    FCIP_COMPARE_NWWN);
5874 			mutex_exit(&fptr->fcip_rt_mutex);
5875 
5876 			fdestp = fcip_add_dest(fptr, frp);
5877 		} else if (fcip_farp_supported &&
5878 			(FC_TOP_EXTERNAL(fport->fcipp_topology) ||
5879 			(fport->fcipp_topology == FC_TOP_PT_PT))) {
5880 			/*
5881 			 * The Name server request failed so
5882 			 * issue an FARP
5883 			 */
5884 			fdestp = fcip_do_farp(fptr, pwwn, NULL,
5885 				0, 0);
5886 		} else {
5887 		    fdestp = NULL;
5888 		}
5889 	} else if (frp && frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
5890 		/*
5891 		 * Prepare a dest structure to return to caller
5892 		 */
5893 		fdestp = fcip_add_dest(fptr, frp);
5894 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5895 		    (CE_NOTE, "in fcip get dest non fabric"));
5896 	}
5897 	return (fdestp);
5898 }
5899 
5900 
5901 /*
5902  * Endian clean WWN compare.
5903  * Returns 0 if they compare OK, else return non zero value.
5904  * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN,
5905  * FCIP_COMPARE_BROADCAST.
5906  */
5907 static int
fcip_wwn_compare(la_wwn_t * wwn1,la_wwn_t * wwn2,int flag)5908 fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag)
5909 {
5910 	int rval = 0;
5911 	if ((wwn1->raw_wwn[2] != wwn2->raw_wwn[2]) ||
5912 	    (wwn1->raw_wwn[3] != wwn2->raw_wwn[3]) ||
5913 	    (wwn1->raw_wwn[4] != wwn2->raw_wwn[4]) ||
5914 	    (wwn1->raw_wwn[5] != wwn2->raw_wwn[5]) ||
5915 	    (wwn1->raw_wwn[6] != wwn2->raw_wwn[6]) ||
5916 	    (wwn1->raw_wwn[7] != wwn2->raw_wwn[7])) {
5917 		rval = 1;
5918 	} else if ((flag == FCIP_COMPARE_PWWN) &&
5919 	    (((wwn1->raw_wwn[0] & 0xf0) != (wwn2->raw_wwn[0] & 0xf0)) ||
5920 	    (wwn1->raw_wwn[1] != wwn2->raw_wwn[1]))) {
5921 		rval = 1;
5922 	}
5923 	return (rval);
5924 }
5925 
5926 
5927 /*
5928  * Add an entry for a remote port in the dest hash table. Dest hash table
5929  * has entries for ports in the routing hash table with which we decide
5930  * to establish IP communication with. The no. of entries in the dest hash
5931  * table must always be less than or equal to the entries in the routing
5932  * hash table. Every entry in the dest hash table ofcourse must have a
5933  * corresponding entry in the routing hash table
5934  */
5935 static struct fcip_dest *
fcip_add_dest(struct fcip * fptr,struct fcip_routing_table * frp)5936 fcip_add_dest(struct fcip *fptr, struct fcip_routing_table *frp)
5937 {
5938 	struct fcip_dest *fdestp = NULL;
5939 	la_wwn_t	*pwwn;
5940 	int hash_bucket;
5941 	struct fcip_dest *fdest_new;
5942 
5943 	if (frp == NULL) {
5944 		return (fdestp);
5945 	}
5946 
5947 	pwwn = &frp->fcipr_pwwn;
5948 	mutex_enter(&fptr->fcip_dest_mutex);
5949 	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
5950 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5951 	    (CE_NOTE, "add dest hash_bucket: 0x%x", hash_bucket));
5952 
5953 	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
5954 
5955 	fdestp = fptr->fcip_dest[hash_bucket];
5956 	while (fdestp != NULL) {
5957 		mutex_enter(&fdestp->fcipd_mutex);
5958 		if (fdestp->fcipd_rtable) {
5959 			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
5960 			    FCIP_COMPARE_PWWN) == 0) {
5961 				mutex_exit(&fdestp->fcipd_mutex);
5962 				mutex_exit(&fptr->fcip_dest_mutex);
5963 				return (fdestp);
5964 			}
5965 		}
5966 		mutex_exit(&fdestp->fcipd_mutex);
5967 		fdestp = fdestp->fcipd_next;
5968 	}
5969 
5970 	ASSERT(fdestp == NULL);
5971 
5972 	fdest_new = (struct fcip_dest *)
5973 			kmem_zalloc(sizeof (struct fcip_dest), KM_SLEEP);
5974 
5975 	mutex_init(&fdest_new->fcipd_mutex, NULL, MUTEX_DRIVER, NULL);
5976 	fdest_new->fcipd_next = fptr->fcip_dest[hash_bucket];
5977 	fdest_new->fcipd_refcnt = 0;
5978 	fdest_new->fcipd_rtable = frp;
5979 	fdest_new->fcipd_ncmds = 0;
5980 	fptr->fcip_dest[hash_bucket] = fdest_new;
5981 	fdest_new->fcipd_flags = FCIP_PORT_NOTLOGGED;
5982 
5983 	mutex_exit(&fptr->fcip_dest_mutex);
5984 	return (fdest_new);
5985 }
5986 
5987 /*
5988  * Cleanup the dest hash table and remove all entries
5989  */
5990 static void
fcip_cleanup_dest(struct fcip * fptr)5991 fcip_cleanup_dest(struct fcip *fptr)
5992 {
5993 	struct fcip_dest *fdestp = NULL;
5994 	struct fcip_dest *fdest_delp = NULL;
5995 	int i;
5996 
5997 	mutex_enter(&fptr->fcip_dest_mutex);
5998 
5999 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
6000 		fdestp = fptr->fcip_dest[i];
6001 		while (fdestp != NULL) {
6002 			mutex_destroy(&fdestp->fcipd_mutex);
6003 			fdest_delp = fdestp;
6004 			fdestp = fdestp->fcipd_next;
6005 			kmem_free(fdest_delp, sizeof (struct fcip_dest));
6006 			fptr->fcip_dest[i] = NULL;
6007 		}
6008 	}
6009 	mutex_exit(&fptr->fcip_dest_mutex);
6010 }
6011 
6012 
6013 /*
6014  * Send FARP requests for Fabric ports when we don't have the port
6015  * we wish to talk to in our routing hash table. FARP is specially required
6016  * to talk to FC switches for inband switch management. Most FC switches
6017  * today have a switch FC IP address for IP over FC inband switch management
6018  * but the WWN and Port_ID for this traffic is not available through the
6019  * Nameservers since the switch themeselves are transparent.
6020  */
6021 /* ARGSUSED */
6022 static struct fcip_dest *
fcip_do_farp(struct fcip * fptr,la_wwn_t * pwwn,char * ip_addr,size_t ip_addr_len,int flags)6023 fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, char *ip_addr,
6024     size_t ip_addr_len, int flags)
6025 {
6026 	fcip_pkt_t		*fcip_pkt;
6027 	fc_packet_t		*fc_pkt;
6028 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6029 	la_els_farp_t		farp_cmd;
6030 	la_els_farp_t		*fcmd;
6031 	struct fcip_dest	*fdestp = NULL;
6032 	int			rval;
6033 	clock_t			farp_lbolt;
6034 	la_wwn_t		broadcast_wwn;
6035 	struct fcip_dest	*bdestp;
6036 	struct fcip_routing_table 	*frp;
6037 
6038 	bdestp = fcip_get_dest(fptr, &broadcast_wwn);
6039 
6040 	if (bdestp == NULL) {
6041 		return (fdestp);
6042 	}
6043 
6044 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
6045 	    sizeof (la_els_farp_t), bdestp->fcipd_pd, KM_SLEEP);
6046 
6047 	if (fcip_pkt == NULL) {
6048 		return (fdestp);
6049 	}
6050 
6051 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6052 	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
6053 
6054 	mutex_enter(&bdestp->fcipd_mutex);
6055 	if (bdestp->fcipd_rtable == NULL) {
6056 		mutex_exit(&bdestp->fcipd_mutex);
6057 		fcip_ipkt_free(fcip_pkt);
6058 		return (fdestp);
6059 	}
6060 
6061 	fcip_pkt->fcip_pkt_dest = bdestp;
6062 	fc_pkt->pkt_fca_device = bdestp->fcipd_fca_dev;
6063 
6064 	bdestp->fcipd_ncmds++;
6065 	mutex_exit(&bdestp->fcipd_mutex);
6066 
6067 	fcip_init_broadcast_pkt(fcip_pkt, NULL, 1);
6068 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6069 
6070 	/*
6071 	 * Now initialize the FARP payload itself
6072 	 */
6073 	fcmd = &farp_cmd;
6074 	fcmd->ls_code.ls_code = LA_ELS_FARP_REQ;
6075 	fcmd->ls_code.mbz = 0;
6076 	/*
6077 	 * for now just match the Port WWN since the other match addr
6078 	 * code points are optional. We can explore matching the IP address
6079 	 * if needed
6080 	 */
6081 	if (ip_addr) {
6082 		fcmd->match_addr = FARP_MATCH_WW_PN_IPv4;
6083 	} else {
6084 		fcmd->match_addr = FARP_MATCH_WW_PN;
6085 	}
6086 
6087 	/*
6088 	 * Request the responder port to log into us - that way
6089 	 * the Transport is aware of the remote port when we create
6090 	 * an entry for it in our tables
6091 	 */
6092 	fcmd->resp_flags = FARP_INIT_REPLY | FARP_INIT_P_LOGI;
6093 	fcmd->req_id = fport->fcipp_sid;
6094 	fcmd->dest_id.port_id = fc_pkt->pkt_cmd_fhdr.d_id;
6095 	bcopy(&fport->fcipp_pwwn, &fcmd->req_pwwn, sizeof (la_wwn_t));
6096 	bcopy(&fport->fcipp_nwwn, &fcmd->req_nwwn, sizeof (la_wwn_t));
6097 	bcopy(pwwn, &fcmd->resp_pwwn, sizeof (la_wwn_t));
6098 	/*
6099 	 * copy in source IP address if we get to know it
6100 	 */
6101 	if (ip_addr) {
6102 		bcopy(ip_addr, fcmd->resp_ip, ip_addr_len);
6103 	}
6104 
6105 	fc_pkt->pkt_cmdlen = sizeof (la_els_farp_t);
6106 	fc_pkt->pkt_rsplen = sizeof (la_els_farp_t);
6107 	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6108 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6109 
6110 	/*
6111 	 * Endian safe copy
6112 	 */
6113 	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6114 	    sizeof (la_els_farp_t));
6115 
6116 	/*
6117 	 * send the packet in polled mode.
6118 	 */
6119 	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
6120 	if (rval != FC_SUCCESS) {
6121 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6122 		    "fcip_transport of farp pkt failed 0x%x", rval));
6123 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
6124 		fcip_ipkt_free(fcip_pkt);
6125 
6126 		mutex_enter(&bdestp->fcipd_mutex);
6127 		bdestp->fcipd_ncmds--;
6128 		mutex_exit(&bdestp->fcipd_mutex);
6129 
6130 		return (fdestp);
6131 	}
6132 
6133 	farp_lbolt = ddi_get_lbolt();
6134 	farp_lbolt += drv_usectohz(FCIP_FARP_TIMEOUT);
6135 
6136 	mutex_enter(&fptr->fcip_mutex);
6137 	fptr->fcip_farp_rsp_flag = 0;
6138 	while (!fptr->fcip_farp_rsp_flag) {
6139 		if (cv_timedwait(&fptr->fcip_farp_cv, &fptr->fcip_mutex,
6140 		    farp_lbolt) == -1) {
6141 			/*
6142 			 * No FARP response from any destination port
6143 			 * so bail out.
6144 			 */
6145 			fptr->fcip_farp_rsp_flag = 1;
6146 		} else {
6147 			/*
6148 			 * We received a FARP response - check to see if the
6149 			 * response was in reply to our FARP request.
6150 			 */
6151 
6152 			mutex_enter(&fptr->fcip_rt_mutex);
6153 			frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
6154 			mutex_exit(&fptr->fcip_rt_mutex);
6155 
6156 			if ((frp != NULL) &&
6157 			    !FCIP_RTE_UNAVAIL(frp->fcipr_state)) {
6158 				fdestp = fcip_get_dest(fptr, pwwn);
6159 			} else {
6160 				/*
6161 				 * Not our FARP response so go back and wait
6162 				 * again till FARP_TIMEOUT expires
6163 				 */
6164 				fptr->fcip_farp_rsp_flag = 0;
6165 			}
6166 		}
6167 	}
6168 	mutex_exit(&fptr->fcip_mutex);
6169 
6170 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6171 	fcip_ipkt_free(fcip_pkt);
6172 	mutex_enter(&bdestp->fcipd_mutex);
6173 	bdestp->fcipd_ncmds--;
6174 	mutex_exit(&bdestp->fcipd_mutex);
6175 	return (fdestp);
6176 }
6177 
6178 
6179 
6180 /*
6181  * Helper routine to PLOGI to a remote port we wish to talk to.
6182  * This may not be required since the port driver does logins anyway,
6183  * but this can be required in fabric cases since FARP requests/responses
6184  * don't require you to be logged in?
6185  */
6186 
6187 /* ARGSUSED */
6188 static int
fcip_do_plogi(struct fcip * fptr,struct fcip_routing_table * frp)6189 fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp)
6190 {
6191 	fcip_pkt_t		*fcip_pkt;
6192 	fc_packet_t		*fc_pkt;
6193 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6194 	la_els_logi_t		logi;
6195 	int			rval;
6196 	fc_frame_hdr_t		*fr_hdr;
6197 
6198 	/*
6199 	 * Don't bother to login for broadcast RTE entries
6200 	 */
6201 	if ((frp->fcipr_d_id.port_id == 0x0) ||
6202 	    (frp->fcipr_d_id.port_id == 0xffffff)) {
6203 		return (FC_FAILURE);
6204 	}
6205 
6206 	/*
6207 	 * We shouldn't pound in too many logins here
6208 	 *
6209 	 */
6210 	if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS ||
6211 	    frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6212 		return (FC_SUCCESS);
6213 	}
6214 
6215 	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_logi_t),
6216 	    sizeof (la_els_logi_t), frp->fcipr_pd, KM_SLEEP);
6217 
6218 	if (fcip_pkt == NULL) {
6219 		return (FC_FAILURE);
6220 	}
6221 
6222 	/*
6223 	 * Update back pointer for login state update
6224 	 */
6225 	fcip_pkt->fcip_pkt_frp = frp;
6226 	frp->fcipr_state = FCIP_RT_LOGIN_PROGRESS;
6227 
6228 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6229 
6230 	/*
6231 	 * Initialize frame header for ELS
6232 	 */
6233 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6234 	fr_hdr->r_ctl = R_CTL_ELS_REQ;
6235 	fr_hdr->type = FC_TYPE_EXTENDED_LS;
6236 	fr_hdr->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6237 	fr_hdr->df_ctl = 0;
6238 	fr_hdr->s_id = fport->fcipp_sid.port_id;
6239 	fr_hdr->d_id = frp->fcipr_d_id.port_id;
6240 	fr_hdr->seq_cnt = 0;
6241 	fr_hdr->ox_id = 0xffff;
6242 	fr_hdr->rx_id = 0xffff;
6243 	fr_hdr->ro = 0;
6244 
6245 	fc_pkt->pkt_rsplen = sizeof (la_els_logi_t);
6246 	fc_pkt->pkt_comp = fcip_ipkt_callback;
6247 	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6248 	fc_pkt->pkt_timeout = 10;	/* 10 seconds */
6249 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6250 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6251 
6252 	/*
6253 	 * Everybody does class 3, so let's just set it.  If the transport
6254 	 * knows better, it will deal with the class appropriately.
6255 	 */
6256 
6257 	fc_pkt->pkt_tran_flags = FC_TRAN_INTR | FC_TRAN_CLASS3;
6258 
6259 	/*
6260 	 * we need only fill in the ls_code and the cmd frame header
6261 	 */
6262 	bzero((void *)&logi, sizeof (la_els_logi_t));
6263 	logi.ls_code.ls_code = LA_ELS_PLOGI;
6264 	logi.ls_code.mbz = 0;
6265 
6266 	FCIP_CP_OUT((uint8_t *)&logi, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6267 	    sizeof (la_els_logi_t));
6268 
6269 	rval = fc_ulp_login(fport->fcipp_handle, &fc_pkt, 1);
6270 	if (rval != FC_SUCCESS) {
6271 		cmn_err(CE_WARN,
6272 		    "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x",
6273 		    frp->fcipr_d_id.port_id, rval);
6274 		fcip_ipkt_free(fcip_pkt);
6275 	}
6276 	return (rval);
6277 }
6278 
6279 /*
6280  * The packet callback routine - called from the transport/FCA after
6281  * it is done DMA'ing/sending out the packet contents on the wire so
6282  * that the alloc'ed packet can be freed
6283  */
6284 static void
fcip_ipkt_callback(fc_packet_t * fc_pkt)6285 fcip_ipkt_callback(fc_packet_t *fc_pkt)
6286 {
6287 	ls_code_t			logi_req;
6288 	ls_code_t			logi_resp;
6289 	fcip_pkt_t			*fcip_pkt;
6290 	fc_frame_hdr_t			*fr_hdr;
6291 	struct fcip 			*fptr;
6292 	fcip_port_info_t		*fport;
6293 	struct fcip_routing_table	*frp;
6294 
6295 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6296 
6297 	FCIP_CP_IN(fc_pkt->pkt_resp, (uint8_t *)&logi_resp,
6298 	    fc_pkt->pkt_resp_acc, sizeof (logi_resp));
6299 
6300 	FCIP_CP_IN(fc_pkt->pkt_cmd, (uint8_t *)&logi_req, fc_pkt->pkt_cmd_acc,
6301 	    sizeof (logi_req));
6302 
6303 	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
6304 	frp = fcip_pkt->fcip_pkt_frp;
6305 	fptr = fcip_pkt->fcip_pkt_fptr;
6306 	fport = fptr->fcip_port_info;
6307 
6308 	ASSERT(logi_req.ls_code == LA_ELS_PLOGI);
6309 
6310 	if (fc_pkt->pkt_state != FC_PKT_SUCCESS ||
6311 	    logi_resp.ls_code != LA_ELS_ACC) {
6312 		/* EMPTY */
6313 
6314 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6315 		    "opcode : 0x%x to d_id: 0x%x failed",
6316 		    logi_req.ls_code, fr_hdr->d_id));
6317 
6318 		mutex_enter(&fptr->fcip_rt_mutex);
6319 		frp->fcipr_state = PORT_DEVICE_INVALID;
6320 		frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
6321 		    (FCIP_RTE_TIMEOUT / 2);
6322 		mutex_exit(&fptr->fcip_rt_mutex);
6323 	} else {
6324 		fc_portid_t	d_id;
6325 
6326 		d_id.port_id = fr_hdr->d_id;
6327 		d_id.priv_lilp_posit = 0;
6328 
6329 		/*
6330 		 * Update PLOGI results; FCA Handle, and Port device handles
6331 		 */
6332 		mutex_enter(&fptr->fcip_rt_mutex);
6333 		frp->fcipr_pd = fc_pkt->pkt_pd;
6334 		frp->fcipr_fca_dev =
6335 		    fc_ulp_get_fca_device(fport->fcipp_handle, d_id);
6336 		frp->fcipr_state = PORT_DEVICE_LOGGED_IN;
6337 		mutex_exit(&fptr->fcip_rt_mutex);
6338 	}
6339 
6340 	fcip_ipkt_free(fcip_pkt);
6341 }
6342 
6343 
6344 /*
6345  * pkt_alloc routine for outbound IP datagrams. The cache constructor
6346  * Only initializes the pkt_cmd_dma (which is where the outbound datagram
6347  * is stuffed) since we don't expect response
6348  */
6349 static fcip_pkt_t *
fcip_pkt_alloc(struct fcip * fptr,mblk_t * bp,int flags,int datalen)6350 fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, int flags, int datalen)
6351 {
6352 	fcip_pkt_t 	*fcip_pkt;
6353 	fc_packet_t	*fc_pkt;
6354 	ddi_dma_cookie_t	pkt_cookie;
6355 	ddi_dma_cookie_t	*cp;
6356 	uint32_t		cnt;
6357 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6358 
6359 	fcip_pkt = kmem_cache_alloc(fptr->fcip_xmit_cache, flags);
6360 	if (fcip_pkt == NULL) {
6361 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6362 		    "fcip_pkt_alloc: kmem_cache_alloc failed"));
6363 		return (NULL);
6364 	}
6365 
6366 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6367 	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6368 	fc_pkt->pkt_tran_flags = 0;
6369 	fcip_pkt->fcip_pkt_dma_flags = 0;
6370 
6371 	/*
6372 	 * the cache constructor has allocated the dma handle
6373 	 */
6374 	fc_pkt->pkt_cmd = (caddr_t)bp->b_rptr;
6375 	if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6376 	    (caddr_t)bp->b_rptr, datalen, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
6377 	    DDI_DMA_DONTWAIT, NULL, &pkt_cookie,
6378 	    &fc_pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED) {
6379 			goto fail;
6380 	}
6381 
6382 	fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6383 
6384 	if (fc_pkt->pkt_cmd_cookie_cnt >
6385 	    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6386 		goto fail;
6387 	}
6388 
6389 	ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6390 
6391 	cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6392 	    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6393 	    KM_NOSLEEP);
6394 
6395 	if (cp == NULL) {
6396 		goto fail;
6397 	}
6398 
6399 	*cp = pkt_cookie;
6400 	cp++;
6401 	for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6402 		ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6403 		*cp = pkt_cookie;
6404 	}
6405 
6406 	fc_pkt->pkt_cmdlen = datalen;
6407 
6408 	fcip_pkt->fcip_pkt_mp = NULL;
6409 	fcip_pkt->fcip_pkt_wq = NULL;
6410 	fcip_pkt->fcip_pkt_dest = NULL;
6411 	fcip_pkt->fcip_pkt_next = NULL;
6412 	fcip_pkt->fcip_pkt_prev = NULL;
6413 	fcip_pkt->fcip_pkt_state = 0;
6414 	fcip_pkt->fcip_pkt_reason = 0;
6415 	fcip_pkt->fcip_pkt_flags = 0;
6416 	fcip_pkt->fcip_pkt_frp = NULL;
6417 
6418 	return (fcip_pkt);
6419 fail:
6420 	if (fcip_pkt) {
6421 		fcip_pkt_free(fcip_pkt, 0);
6422 	}
6423 	return ((fcip_pkt_t *)0);
6424 }
6425 
6426 /*
6427  * Free a packet and all its associated resources
6428  */
6429 static void
fcip_pkt_free(struct fcip_pkt * fcip_pkt,int free_mblk)6430 fcip_pkt_free(struct fcip_pkt *fcip_pkt, int free_mblk)
6431 {
6432 	fc_packet_t	*fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6433 	struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6434 
6435 	if (fc_pkt->pkt_cmd_cookie != NULL) {
6436 		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6437 		    sizeof (ddi_dma_cookie_t));
6438 		fc_pkt->pkt_cmd_cookie = NULL;
6439 	}
6440 
6441 	fcip_free_pkt_dma(fcip_pkt);
6442 	if (free_mblk && fcip_pkt->fcip_pkt_mp) {
6443 		freemsg(fcip_pkt->fcip_pkt_mp);
6444 		fcip_pkt->fcip_pkt_mp = NULL;
6445 	}
6446 
6447 	(void) fc_ulp_uninit_packet(fptr->fcip_port_info->fcipp_handle, fc_pkt);
6448 
6449 	kmem_cache_free(fptr->fcip_xmit_cache, (void *)fcip_pkt);
6450 }
6451 
6452 /*
6453  * Allocate a Packet for internal driver use. This is for requests
6454  * that originate from within the driver
6455  */
6456 static fcip_pkt_t *
fcip_ipkt_alloc(struct fcip * fptr,int cmdlen,int resplen,opaque_t pd,int flags)6457 fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, int resplen,
6458     opaque_t pd, int flags)
6459 {
6460 	fcip_pkt_t 		*fcip_pkt;
6461 	fc_packet_t		*fc_pkt;
6462 	int			(*cb)(caddr_t);
6463 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6464 	size_t			real_len;
6465 	uint_t			held_here = 0;
6466 	ddi_dma_cookie_t	pkt_cookie;
6467 	ddi_dma_cookie_t	*cp;
6468 	uint32_t		cnt;
6469 
6470 	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
6471 
6472 	fcip_pkt = kmem_zalloc((sizeof (fcip_pkt_t) +
6473 	    fport->fcipp_fca_pkt_size), flags);
6474 
6475 	if (fcip_pkt == NULL) {
6476 		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6477 		    (CE_WARN, "pkt alloc of ineternal pkt failed"));
6478 		goto fail;
6479 	}
6480 
6481 	fcip_pkt->fcip_pkt_flags = FCIP_PKT_INTERNAL;
6482 	fcip_pkt->fcip_pkt_fptr = fptr;
6483 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6484 	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6485 	fc_pkt->pkt_tran_flags = 0;
6486 	fc_pkt->pkt_cmdlen = 0;
6487 	fc_pkt->pkt_rsplen = 0;
6488 	fc_pkt->pkt_datalen = 0;
6489 	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)fcip_pkt +
6490 	    sizeof (fcip_pkt_t));
6491 	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6492 
6493 	if (cmdlen) {
6494 		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6495 		    &fport->fcipp_cmd_dma_attr, cb, NULL,
6496 		    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
6497 			goto fail;
6498 		}
6499 
6500 		if (ddi_dma_mem_alloc(fc_pkt->pkt_cmd_dma, cmdlen,
6501 		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6502 		    cb, NULL, (caddr_t *)&fc_pkt->pkt_cmd,
6503 		    &real_len, &fc_pkt->pkt_cmd_acc) != DDI_SUCCESS) {
6504 			goto fail;
6505 		}
6506 
6507 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_MEM;
6508 		fc_pkt->pkt_cmdlen = cmdlen;
6509 
6510 		if (real_len < cmdlen) {
6511 			goto fail;
6512 		}
6513 
6514 		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6515 		    (caddr_t)fc_pkt->pkt_cmd, real_len,
6516 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6517 		    &pkt_cookie, &fc_pkt->pkt_cmd_cookie_cnt) !=
6518 		    DDI_DMA_MAPPED) {
6519 			goto fail;
6520 		}
6521 
6522 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6523 
6524 		if (fc_pkt->pkt_cmd_cookie_cnt >
6525 		    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6526 			goto fail;
6527 		}
6528 
6529 		ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6530 
6531 		cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6532 		    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6533 		    KM_NOSLEEP);
6534 
6535 		if (cp == NULL) {
6536 			goto fail;
6537 		}
6538 
6539 		*cp = pkt_cookie;
6540 		cp++;
6541 		for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6542 			ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6543 			*cp = pkt_cookie;
6544 		}
6545 	}
6546 
6547 	if (resplen) {
6548 		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6549 		    &fport->fcipp_resp_dma_attr, cb, NULL,
6550 		    &fc_pkt->pkt_resp_dma) != DDI_SUCCESS) {
6551 			goto fail;
6552 		}
6553 
6554 		if (ddi_dma_mem_alloc(fc_pkt->pkt_resp_dma, resplen,
6555 		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6556 		    cb, NULL, (caddr_t *)&fc_pkt->pkt_resp,
6557 		    &real_len, &fc_pkt->pkt_resp_acc) != DDI_SUCCESS) {
6558 			goto fail;
6559 		}
6560 
6561 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_MEM;
6562 
6563 		if (real_len < resplen) {
6564 			goto fail;
6565 		}
6566 
6567 		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_resp_dma, NULL,
6568 		    (caddr_t)fc_pkt->pkt_resp, real_len,
6569 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6570 		    &pkt_cookie, &fc_pkt->pkt_resp_cookie_cnt) !=
6571 		    DDI_DMA_MAPPED) {
6572 			goto fail;
6573 		}
6574 
6575 		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_BOUND;
6576 		fc_pkt->pkt_rsplen = resplen;
6577 
6578 		if (fc_pkt->pkt_resp_cookie_cnt >
6579 		    fport->fcipp_resp_dma_attr.dma_attr_sgllen) {
6580 			goto fail;
6581 		}
6582 
6583 		ASSERT(fc_pkt->pkt_resp_cookie_cnt != 0);
6584 
6585 		cp = fc_pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6586 		    fc_pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
6587 		    KM_NOSLEEP);
6588 
6589 		if (cp == NULL) {
6590 			goto fail;
6591 		}
6592 
6593 		*cp = pkt_cookie;
6594 		cp++;
6595 		for (cnt = 1; cnt < fc_pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
6596 			ddi_dma_nextcookie(fc_pkt->pkt_resp_dma, &pkt_cookie);
6597 			*cp = pkt_cookie;
6598 		}
6599 	}
6600 
6601 	/*
6602 	 * Initialize pkt_pd prior to calling fc_ulp_init_packet
6603 	 */
6604 
6605 	fc_pkt->pkt_pd = pd;
6606 
6607 	/*
6608 	 * Ask the FCA to bless the internal packet
6609 	 */
6610 	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
6611 	    fc_pkt, flags) != FC_SUCCESS) {
6612 		goto fail;
6613 	}
6614 
6615 	/*
6616 	 * Keep track of # of ipkts alloc-ed
6617 	 * This function can get called with mutex either held or not. So, we'll
6618 	 * grab mutex if it is not already held by this thread.
6619 	 * This has to be cleaned up someday.
6620 	 */
6621 	if (!MUTEX_HELD(&fptr->fcip_mutex)) {
6622 		held_here = 1;
6623 		mutex_enter(&fptr->fcip_mutex);
6624 	}
6625 
6626 	fptr->fcip_num_ipkts_pending++;
6627 
6628 	if (held_here)
6629 		mutex_exit(&fptr->fcip_mutex);
6630 
6631 	return (fcip_pkt);
6632 fail:
6633 	if (fcip_pkt) {
6634 		fcip_ipkt_free(fcip_pkt);
6635 	}
6636 
6637 	return (NULL);
6638 }
6639 
6640 /*
6641  * free up an internal IP packet (like a FARP pkt etc)
6642  */
6643 static void
fcip_ipkt_free(fcip_pkt_t * fcip_pkt)6644 fcip_ipkt_free(fcip_pkt_t *fcip_pkt)
6645 {
6646 	fc_packet_t		*fc_pkt;
6647 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6648 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6649 
6650 	ASSERT(fptr != NULL);
6651 	ASSERT(!mutex_owned(&fptr->fcip_mutex));
6652 
6653 	/* One less ipkt to wait for */
6654 	mutex_enter(&fptr->fcip_mutex);
6655 	if (fptr->fcip_num_ipkts_pending)	/* Safety check */
6656 		fptr->fcip_num_ipkts_pending--;
6657 	mutex_exit(&fptr->fcip_mutex);
6658 
6659 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6660 
6661 	if (fc_pkt->pkt_cmd_cookie != NULL) {
6662 		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6663 		    sizeof (ddi_dma_cookie_t));
6664 		fc_pkt->pkt_cmd_cookie = NULL;
6665 	}
6666 
6667 	if (fc_pkt->pkt_resp_cookie != NULL) {
6668 		kmem_free(fc_pkt->pkt_resp_cookie, fc_pkt->pkt_resp_cookie_cnt *
6669 		    sizeof (ddi_dma_cookie_t));
6670 		fc_pkt->pkt_resp_cookie = NULL;
6671 	}
6672 
6673 	if (fc_ulp_uninit_packet(fport->fcipp_handle, fc_pkt) != FC_SUCCESS) {
6674 		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
6675 		    "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p",
6676 		    (void *)fc_pkt));
6677 	}
6678 	fcip_free_pkt_dma(fcip_pkt);
6679 	kmem_free(fcip_pkt, (sizeof (fcip_pkt_t) + fport->fcipp_fca_pkt_size));
6680 }
6681 
6682 /*
6683  * initialize a unicast request. This is a misnomer because even the
6684  * broadcast requests are initialized with this routine
6685  */
6686 static void
fcip_init_unicast_pkt(fcip_pkt_t * fcip_pkt,fc_portid_t sid,fc_portid_t did,void (* comp)())6687 fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, fc_portid_t did,
6688     void (*comp) ())
6689 {
6690 	fc_packet_t		*fc_pkt;
6691 	fc_frame_hdr_t		*fr_hdr;
6692 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6693 
6694 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6695 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6696 
6697 	fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6698 	fr_hdr->s_id = sid.port_id;
6699 	fr_hdr->d_id = did.port_id;
6700 	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6701 	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ;
6702 	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6703 	fr_hdr->seq_cnt = 0;
6704 	fr_hdr->ox_id = 0xffff;
6705 	fr_hdr->rx_id = 0xffff;
6706 	fr_hdr->ro = 0;
6707 	/*
6708 	 * reset all the length fields
6709 	 */
6710 	fc_pkt->pkt_rsplen = 0;
6711 	fc_pkt->pkt_datalen = 0;
6712 	fc_pkt->pkt_comp = comp;
6713 	if (comp) {
6714 		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6715 	} else {
6716 		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6717 	}
6718 	fc_pkt->pkt_tran_type = FC_PKT_OUTBOUND | FC_PKT_IP_WRITE;
6719 	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6720 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6721 }
6722 
6723 
6724 /*
6725  * Initialize a fcip_packet for broadcast data transfers
6726  */
6727 static void
fcip_init_broadcast_pkt(fcip_pkt_t * fcip_pkt,void (* comp)(),int is_els)6728 fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), int is_els)
6729 {
6730 	fc_packet_t		*fc_pkt;
6731 	fc_frame_hdr_t		*fr_hdr;
6732 	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6733 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6734 	uint32_t		sid;
6735 	uint32_t		did;
6736 
6737 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6738 	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6739 	sid = fport->fcipp_sid.port_id;
6740 
6741 	if (is_els) {
6742 		fr_hdr->r_ctl = R_CTL_ELS_REQ;
6743 	} else {
6744 		fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6745 	}
6746 	fr_hdr->s_id = sid;
6747 	/*
6748 	 * The destination broadcast address depends on the topology
6749 	 * of the underlying port
6750 	 */
6751 	did = fptr->fcip_broadcast_did;
6752 	/*
6753 	 * mark pkt a broadcast pkt
6754 	 */
6755 	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6756 
6757 	fr_hdr->d_id = did;
6758 	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6759 	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
6760 	fr_hdr->f_ctl &= ~(F_CTL_SEQ_INITIATIVE);
6761 	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6762 	fr_hdr->seq_cnt = 0;
6763 	fr_hdr->ox_id = 0xffff;
6764 	fr_hdr->rx_id = 0xffff;
6765 	fr_hdr->ro = 0;
6766 	fc_pkt->pkt_comp = comp;
6767 
6768 	if (comp) {
6769 		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6770 	} else {
6771 		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6772 	}
6773 
6774 	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6775 	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6776 	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6777 }
6778 
6779 
6780 
6781 /*
6782  * Free up all DMA resources associated with an allocated packet
6783  */
6784 static void
fcip_free_pkt_dma(fcip_pkt_t * fcip_pkt)6785 fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt)
6786 {
6787 	fc_packet_t	*fc_pkt;
6788 
6789 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6790 
6791 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6792 	    (CE_NOTE, "in freepktdma : flags 0x%x",
6793 	    fcip_pkt->fcip_pkt_dma_flags));
6794 
6795 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_BOUND) {
6796 		(void) ddi_dma_unbind_handle(fc_pkt->pkt_cmd_dma);
6797 	}
6798 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_MEM) {
6799 		ddi_dma_mem_free(&fc_pkt->pkt_cmd_acc);
6800 	}
6801 
6802 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_BOUND) {
6803 		(void) ddi_dma_unbind_handle(fc_pkt->pkt_resp_dma);
6804 	}
6805 	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_MEM) {
6806 		ddi_dma_mem_free(&fc_pkt->pkt_resp_acc);
6807 	}
6808 	/*
6809 	 * for internal commands, we need to free up the dma handles too.
6810 	 * This is done in the cache destructor for non internal cmds
6811 	 */
6812 	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_INTERNAL) {
6813 		if (fc_pkt->pkt_cmd_dma) {
6814 			ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
6815 		}
6816 		if (fc_pkt->pkt_resp_dma) {
6817 			ddi_dma_free_handle(&fc_pkt->pkt_resp_dma);
6818 		}
6819 	}
6820 }
6821 
6822 
6823 /*
6824  * helper routine to generate a string, given an ether addr
6825  */
6826 static void
fcip_ether_to_str(struct ether_addr * e,caddr_t s)6827 fcip_ether_to_str(struct ether_addr *e, caddr_t s)
6828 {
6829 	int i;
6830 
6831 	for (i = 0; i < sizeof (struct ether_addr); i++, s += 2) {
6832 		FCIP_DEBUG(FCIP_DEBUG_MISC,
6833 		    (CE_CONT, "0x%02X:", e->ether_addr_octet[i]));
6834 		(void) sprintf(s, "%02X", e->ether_addr_octet[i]);
6835 	}
6836 
6837 	*s = '\0';
6838 }
6839 
6840 /*
6841  * When a broadcast request comes from the upper streams modules, it
6842  * is ugly to look into every datagram to figure out if it is a broadcast
6843  * datagram or a unicast packet. Instead just add the broadcast entries
6844  * into our routing and dest tables and the standard hash table look ups
6845  * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig
6846  * seems to be very ethernet specific and it requires broadcasts to the
6847  * ether broadcast addr of 0xffffffffff to succeed even though we specified
6848  * in the dl_info request that our broadcast MAC addr is 0x0000000000
6849  * (can't figure out why RFC2625 did this though). So add broadcast entries
6850  * for both MAC address
6851  */
6852 static int
fcip_dest_add_broadcast_entry(struct fcip * fptr,int new_flag)6853 fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag)
6854 {
6855 	fc_portmap_t 		map;
6856 	struct fcip_routing_table *frp;
6857 	uint32_t		did;
6858 	la_wwn_t		broadcast_wwn;
6859 
6860 	/*
6861 	 * get port_id of destination for broadcast - this is topology
6862 	 * dependent
6863 	 */
6864 	did = fptr->fcip_broadcast_did;
6865 
6866 	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
6867 	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
6868 	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
6869 
6870 	map.map_did.port_id = did;
6871 	map.map_hard_addr.hard_addr = did;
6872 	map.map_state = PORT_DEVICE_VALID;
6873 	if (new_flag) {
6874 		map.map_type = PORT_DEVICE_NEW;
6875 	} else {
6876 		map.map_type = PORT_DEVICE_CHANGED;
6877 	}
6878 	map.map_flags = 0;
6879 	map.map_pd = NULL;
6880 	bzero(&map.map_fc4_types, sizeof (map.map_fc4_types));
6881 	fcip_rt_update(fptr, &map, 1);
6882 	mutex_enter(&fptr->fcip_rt_mutex);
6883 	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
6884 	mutex_exit(&fptr->fcip_rt_mutex);
6885 	if (frp == NULL) {
6886 		return (FC_FAILURE);
6887 	}
6888 	(void) fcip_add_dest(fptr, frp);
6889 	/*
6890 	 * The Upper IP layers expect the traditional broadcast MAC addr
6891 	 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip
6892 	 * stream through the /etc/hostname.fcipXX file. Instead of checking
6893 	 * each phys addr for a match with fcip's ARP header broadcast
6894 	 * addr (0x00 00 00 00 00 00), its simply easier to add another
6895 	 * broadcast entry for 0xff ff ff ff ff ff.
6896 	 */
6897 	ether_to_wwn(&fcipnhbroadcastaddr, &broadcast_wwn);
6898 	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
6899 	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
6900 	fcip_rt_update(fptr, &map, 1);
6901 	mutex_enter(&fptr->fcip_rt_mutex);
6902 	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
6903 	mutex_exit(&fptr->fcip_rt_mutex);
6904 	if (frp == NULL) {
6905 		return (FC_FAILURE);
6906 	}
6907 	(void) fcip_add_dest(fptr, frp);
6908 	return (FC_SUCCESS);
6909 }
6910 
6911 /*
6912  * We need to obtain the D_ID of the broadcast port for transmitting all
6913  * our broadcast (and multicast) requests. The broadcast D_ID as we know
6914  * is dependent on the link topology
6915  */
6916 static uint32_t
fcip_get_broadcast_did(struct fcip * fptr)6917 fcip_get_broadcast_did(struct fcip *fptr)
6918 {
6919 	fcip_port_info_t	*fport = fptr->fcip_port_info;
6920 	uint32_t		did = 0;
6921 	uint32_t		sid;
6922 
6923 	sid = fport->fcipp_sid.port_id;
6924 
6925 	switch (fport->fcipp_topology) {
6926 
6927 	case FC_TOP_PT_PT: {
6928 		fc_portmap_t	*port_map = NULL;
6929 		uint32_t	listlen = 0;
6930 
6931 		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
6932 		    &listlen, FC_ULP_PLOGI_DONTCARE) == FC_SUCCESS) {
6933 			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE,
6934 			    "fcip_gpmap: listlen :  0x%x", listlen));
6935 			if (listlen == 1) {
6936 				did = port_map->map_did.port_id;
6937 			}
6938 		}
6939 		if (port_map) {
6940 			kmem_free(port_map, listlen * sizeof (fc_portmap_t));
6941 		}
6942 		if (listlen != 1) {
6943 			/* Dummy return value */
6944 			return (0x00FFFFFF);
6945 		}
6946 		break;
6947 	}
6948 
6949 	case FC_TOP_NO_NS:
6950 	/* FALLTHROUGH */
6951 	case FC_TOP_FABRIC:
6952 		/*
6953 		 * The broadcast address is the same whether or not
6954 		 * the switch/fabric contains a Name service.
6955 		 */
6956 		did = 0x00FFFFFF;
6957 		break;
6958 
6959 	case FC_TOP_PUBLIC_LOOP:
6960 		/*
6961 		 * The open replicate primitive must not be used. The
6962 		 * broadcast sequence is simply sent to ALPA 0x00. The
6963 		 * fabric controller then propagates the broadcast to all
6964 		 * other ports. The fabric propagates the broadcast by
6965 		 * using the OPNfr primitive.
6966 		 */
6967 		did = 0x00;
6968 		break;
6969 
6970 	case FC_TOP_PRIVATE_LOOP:
6971 		/*
6972 		 * The source port for broadcast in private loop mode
6973 		 * must send an OPN(fr) signal forcing all ports in the
6974 		 * loop to replicate the frames that they receive.
6975 		 */
6976 		did = 0x00FFFFFF;
6977 		break;
6978 
6979 	case FC_TOP_UNKNOWN:
6980 	/* FALLTHROUGH */
6981 	default:
6982 		did = sid;
6983 		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
6984 		    "fcip(0x%x):unknown topology in init_broadcast_pkt",
6985 		    fptr->fcip_instance));
6986 		break;
6987 	}
6988 
6989 	return (did);
6990 }
6991 
6992 
6993 /*
6994  * fcip timeout performs 2 operations:
6995  * 1. timeout any packets sent to the FCA for which a callback hasn't
6996  *    happened. If you are wondering why we need a callback since all
6997  *    traffic in FCIP is unidirectional, hence all exchanges are unidirectional
6998  *    but wait, we can only free up the resources after we know the FCA has
6999  *    DMA'ed out the data. pretty obvious eh :)
7000  *
7001  * 2. Retire and routing table entries we marked up for retiring. This is
7002  *    to give the link a chance to recover instead of marking a port down
7003  *    when we have lost all communication with it after a link transition
7004  */
7005 static void
fcip_timeout(void * arg)7006 fcip_timeout(void *arg)
7007 {
7008 	struct fcip 			*fptr = (struct fcip *)arg;
7009 	int				i;
7010 	fcip_pkt_t			*fcip_pkt;
7011 	struct fcip_dest		*fdestp;
7012 	int 				index;
7013 	struct fcip_routing_table 	*frtp;
7014 	int				dispatch_rte_removal = 0;
7015 
7016 	mutex_enter(&fptr->fcip_mutex);
7017 
7018 	fptr->fcip_flags |= FCIP_IN_TIMEOUT;
7019 	fptr->fcip_timeout_ticks += fcip_tick_incr;
7020 
7021 	if (fptr->fcip_flags & (FCIP_DETACHED | FCIP_DETACHING | \
7022 	    FCIP_SUSPENDED | FCIP_POWER_DOWN)) {
7023 		fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7024 		mutex_exit(&fptr->fcip_mutex);
7025 		return;
7026 	}
7027 
7028 	if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
7029 		if (fptr->fcip_timeout_ticks > fptr->fcip_mark_offline) {
7030 			fptr->fcip_flags |= FCIP_LINK_DOWN;
7031 		}
7032 	}
7033 	if (!fptr->fcip_flags & FCIP_RTE_REMOVING) {
7034 		dispatch_rte_removal = 1;
7035 	}
7036 	mutex_exit(&fptr->fcip_mutex);
7037 
7038 	/*
7039 	 * Check if we have any Invalid routing table entries in our
7040 	 * hashtable we have marked off for deferred removal. If any,
7041 	 * we can spawn a taskq thread to do the cleanup for us. We
7042 	 * need to avoid cleanup in the timeout thread since we may
7043 	 * have to wait for outstanding commands to complete before
7044 	 * we retire a routing table entry. Also dispatch the taskq
7045 	 * thread only if we are already do not have a taskq thread
7046 	 * dispatched.
7047 	 */
7048 	if (dispatch_rte_removal) {
7049 		mutex_enter(&fptr->fcip_rt_mutex);
7050 		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7051 			frtp = fptr->fcip_rtable[index];
7052 			while (frtp) {
7053 				if ((frtp->fcipr_state == FCIP_RT_INVALID) &&
7054 				    (fptr->fcip_timeout_ticks >
7055 				    frtp->fcipr_invalid_timeout)) {
7056 					/*
7057 					 * If we cannot schedule a task thread
7058 					 * let us attempt again on the next
7059 					 * tick rather than call
7060 					 * fcip_rte_remove_deferred() from here
7061 					 * directly since the routine can sleep.
7062 					 */
7063 					frtp->fcipr_state = FCIP_RT_RETIRED;
7064 
7065 					mutex_enter(&fptr->fcip_mutex);
7066 					fptr->fcip_flags |= FCIP_RTE_REMOVING;
7067 					mutex_exit(&fptr->fcip_mutex);
7068 
7069 					if (taskq_dispatch(fptr->fcip_tq,
7070 					    fcip_rte_remove_deferred, fptr,
7071 					    KM_NOSLEEP) == TASKQID_INVALID) {
7072 						/*
7073 						 * failed - so mark the entry
7074 						 * as invalid again.
7075 						 */
7076 						frtp->fcipr_state =
7077 						    FCIP_RT_INVALID;
7078 
7079 						mutex_enter(&fptr->fcip_mutex);
7080 						fptr->fcip_flags &=
7081 						    ~FCIP_RTE_REMOVING;
7082 						mutex_exit(&fptr->fcip_mutex);
7083 					}
7084 				}
7085 				frtp = frtp->fcipr_next;
7086 			}
7087 		}
7088 		mutex_exit(&fptr->fcip_rt_mutex);
7089 	}
7090 
7091 	mutex_enter(&fptr->fcip_dest_mutex);
7092 
7093 	/*
7094 	 * Now timeout any packets stuck with the transport/FCA for too long
7095 	 */
7096 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7097 		fdestp = fptr->fcip_dest[i];
7098 		while (fdestp != NULL) {
7099 			mutex_enter(&fdestp->fcipd_mutex);
7100 			for (fcip_pkt = fdestp->fcipd_head; fcip_pkt != NULL;
7101 			    fcip_pkt = fcip_pkt->fcip_pkt_next) {
7102 				if (fcip_pkt->fcip_pkt_flags &
7103 				    (FCIP_PKT_RETURNED | FCIP_PKT_IN_TIMEOUT |
7104 				    FCIP_PKT_IN_ABORT)) {
7105 					continue;
7106 				}
7107 				if (fptr->fcip_timeout_ticks >
7108 				    fcip_pkt->fcip_pkt_ttl) {
7109 					fcip_pkt->fcip_pkt_flags |=
7110 					    FCIP_PKT_IN_TIMEOUT;
7111 
7112 					mutex_exit(&fdestp->fcipd_mutex);
7113 					if (taskq_dispatch(fptr->fcip_tq,
7114 					    fcip_pkt_timeout, fcip_pkt,
7115 					    KM_NOSLEEP) == TASKQID_INVALID) {
7116 						/*
7117 						 * timeout immediately
7118 						 */
7119 						fcip_pkt_timeout(fcip_pkt);
7120 					}
7121 					mutex_enter(&fdestp->fcipd_mutex);
7122 					/*
7123 					 * The linked list is altered because
7124 					 * of one of the following reasons:
7125 					 *	a. Timeout code dequeued a pkt
7126 					 *	b. Pkt completion happened
7127 					 *
7128 					 * So restart the spin starting at
7129 					 * the head again; This is a bit
7130 					 * excessive, but okay since
7131 					 * fcip_timeout_ticks isn't incremented
7132 					 * for this spin, we will skip the
7133 					 * not-to-be-timedout packets quickly
7134 					 */
7135 					fcip_pkt = fdestp->fcipd_head;
7136 					if (fcip_pkt == NULL) {
7137 						break;
7138 					}
7139 				}
7140 			}
7141 			mutex_exit(&fdestp->fcipd_mutex);
7142 			fdestp = fdestp->fcipd_next;
7143 		}
7144 	}
7145 	mutex_exit(&fptr->fcip_dest_mutex);
7146 
7147 	/*
7148 	 * reschedule the timeout thread
7149 	 */
7150 	mutex_enter(&fptr->fcip_mutex);
7151 
7152 	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
7153 	    drv_usectohz(1000000));
7154 	fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7155 	mutex_exit(&fptr->fcip_mutex);
7156 }
7157 
7158 
7159 /*
7160  * This routine is either called from taskq or directly from fcip_timeout
7161  * does the actual job of aborting the packet
7162  */
7163 static void
fcip_pkt_timeout(void * arg)7164 fcip_pkt_timeout(void *arg)
7165 {
7166 	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)arg;
7167 	struct fcip_dest	*fdestp;
7168 	struct fcip		*fptr;
7169 	fc_packet_t		*fc_pkt;
7170 	fcip_port_info_t	*fport;
7171 	int			rval;
7172 
7173 	fdestp = fcip_pkt->fcip_pkt_dest;
7174 	fptr = fcip_pkt->fcip_pkt_fptr;
7175 	fport = fptr->fcip_port_info;
7176 	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
7177 
7178 	/*
7179 	 * try to abort the pkt
7180 	 */
7181 	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_ABORT;
7182 	rval = fc_ulp_abort(fport->fcipp_handle, fc_pkt, KM_NOSLEEP);
7183 
7184 	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
7185 	    (CE_NOTE, "fc_ulp_abort returns: 0x%x", rval));
7186 
7187 	if (rval == FC_SUCCESS) {
7188 		ASSERT(fdestp != NULL);
7189 
7190 		/*
7191 		 * dequeue the pkt from the dest structure pkt list
7192 		 */
7193 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7194 		mutex_enter(&fdestp->fcipd_mutex);
7195 		rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7196 		ASSERT(rval == 1);
7197 		mutex_exit(&fdestp->fcipd_mutex);
7198 
7199 		/*
7200 		 * Now cleanup the pkt and free the mblk
7201 		 */
7202 		fcip_pkt_free(fcip_pkt, 1);
7203 	} else {
7204 		/*
7205 		 * abort failed - just mark the pkt as done and
7206 		 * wait for it to complete in fcip_pkt_callback since
7207 		 * the pkt has already been xmitted by the FCA
7208 		 */
7209 		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_TIMEOUT;
7210 		if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_RETURNED) {
7211 			fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7212 			mutex_enter(&fdestp->fcipd_mutex);
7213 			rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7214 			ASSERT(rval == 1);
7215 			mutex_exit(&fdestp->fcipd_mutex);
7216 
7217 			fcip_pkt_free(fcip_pkt, 1);
7218 		}
7219 		return;
7220 	}
7221 }
7222 
7223 
7224 /*
7225  * Remove  a routing table entry marked for deferred removal. This routine
7226  * unlike fcip_pkt_timeout, is always called from a taskq context
7227  */
7228 static void
fcip_rte_remove_deferred(void * arg)7229 fcip_rte_remove_deferred(void *arg)
7230 {
7231 	struct fcip 			*fptr = (struct fcip *)arg;
7232 	int				hash_bucket;
7233 	struct fcip_dest 		*fdestp;
7234 	la_wwn_t			*pwwn;
7235 	int 				index;
7236 	struct fcip_routing_table 	*frtp, *frtp_next, *frtp_prev;
7237 
7238 
7239 	mutex_enter(&fptr->fcip_rt_mutex);
7240 	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7241 		frtp = fptr->fcip_rtable[index];
7242 		frtp_prev = NULL;
7243 		while (frtp) {
7244 			frtp_next = frtp->fcipr_next;
7245 
7246 			if (frtp->fcipr_state == FCIP_RT_RETIRED) {
7247 
7248 				pwwn = &frtp->fcipr_pwwn;
7249 				/*
7250 				 * Get hold of destination pointer
7251 				 */
7252 				mutex_enter(&fptr->fcip_dest_mutex);
7253 
7254 				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
7255 				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
7256 
7257 				fdestp = fptr->fcip_dest[hash_bucket];
7258 				while (fdestp != NULL) {
7259 					mutex_enter(&fdestp->fcipd_mutex);
7260 					if (fdestp->fcipd_rtable) {
7261 						if (fcip_wwn_compare(pwwn,
7262 						    &fdestp->fcipd_pwwn,
7263 						    FCIP_COMPARE_PWWN) == 0) {
7264 							mutex_exit(
7265 							&fdestp->fcipd_mutex);
7266 							break;
7267 						}
7268 					}
7269 					mutex_exit(&fdestp->fcipd_mutex);
7270 					fdestp = fdestp->fcipd_next;
7271 				}
7272 
7273 				mutex_exit(&fptr->fcip_dest_mutex);
7274 				if (fdestp == NULL) {
7275 					frtp_prev = frtp;
7276 					frtp = frtp_next;
7277 					continue;
7278 				}
7279 
7280 				mutex_enter(&fdestp->fcipd_mutex);
7281 				if (fdestp->fcipd_ncmds) {
7282 					/*
7283 					 * Instead of waiting to drain commands
7284 					 * let us revisit this RT entry in
7285 					 * the next pass.
7286 					 */
7287 					mutex_exit(&fdestp->fcipd_mutex);
7288 					frtp_prev = frtp;
7289 					frtp = frtp_next;
7290 					continue;
7291 				}
7292 
7293 				/*
7294 				 * We are clean, so remove the RTE
7295 				 */
7296 				fdestp->fcipd_rtable = NULL;
7297 				mutex_exit(&fdestp->fcipd_mutex);
7298 
7299 				if (frtp_prev == NULL) {
7300 					/* first element */
7301 					fptr->fcip_rtable[index] =
7302 					    frtp->fcipr_next;
7303 				} else {
7304 					frtp_prev->fcipr_next =
7305 					    frtp->fcipr_next;
7306 				}
7307 				kmem_free(frtp,
7308 				    sizeof (struct fcip_routing_table));
7309 
7310 				frtp = frtp_next;
7311 			} else {
7312 				frtp_prev = frtp;
7313 				frtp = frtp_next;
7314 			}
7315 		}
7316 	}
7317 	mutex_exit(&fptr->fcip_rt_mutex);
7318 	/*
7319 	 * Clear the RTE_REMOVING flag
7320 	 */
7321 	mutex_enter(&fptr->fcip_mutex);
7322 	fptr->fcip_flags &= ~FCIP_RTE_REMOVING;
7323 	mutex_exit(&fptr->fcip_mutex);
7324 }
7325 
7326 /*
7327  * Walk through all the dest hash table entries and count up the total
7328  * no. of packets outstanding against a given port
7329  */
7330 static int
fcip_port_get_num_pkts(struct fcip * fptr)7331 fcip_port_get_num_pkts(struct fcip *fptr)
7332 {
7333 	int 			num_cmds = 0;
7334 	int 			i;
7335 	struct fcip_dest	*fdestp;
7336 
7337 	ASSERT(mutex_owned(&fptr->fcip_dest_mutex));
7338 
7339 	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7340 		fdestp = fptr->fcip_dest[i];
7341 		while (fdestp != NULL) {
7342 			mutex_enter(&fdestp->fcipd_mutex);
7343 
7344 			ASSERT(fdestp->fcipd_ncmds >= 0);
7345 
7346 			if (fdestp->fcipd_ncmds > 0) {
7347 				num_cmds += fdestp->fcipd_ncmds;
7348 			}
7349 			mutex_exit(&fdestp->fcipd_mutex);
7350 			fdestp = fdestp->fcipd_next;
7351 		}
7352 	}
7353 
7354 	return (num_cmds);
7355 }
7356 
7357 
7358 /*
7359  * Walk through the routing table for this state instance and see if there is a
7360  * PLOGI in progress for any of the entries. Return success even if we find one.
7361  */
7362 static int
fcip_plogi_in_progress(struct fcip * fptr)7363 fcip_plogi_in_progress(struct fcip *fptr)
7364 {
7365 	int				i;
7366 	struct fcip_routing_table	*frp;
7367 
7368 	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
7369 
7370 	for (i = 0; i < FCIP_RT_HASH_ELEMS; i++) {
7371 		frp = fptr->fcip_rtable[i];
7372 		while (frp) {
7373 			if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS) {
7374 				/* Found an entry where PLOGI is in progress */
7375 				return (1);
7376 			}
7377 			frp = frp->fcipr_next;
7378 		}
7379 	}
7380 
7381 	return (0);
7382 }
7383 
7384 /*
7385  * Walk through the fcip port global list and check if the given port exists in
7386  * the list. Returns "0" if port exists and "1" if otherwise.
7387  */
7388 static int
fcip_check_port_exists(struct fcip * fptr)7389 fcip_check_port_exists(struct fcip *fptr)
7390 {
7391 	fcip_port_info_t	*cur_fport;
7392 	fcip_port_info_t	*fport;
7393 
7394 	mutex_enter(&fcip_global_mutex);
7395 	fport = fptr->fcip_port_info;
7396 	cur_fport = fcip_port_head;
7397 	while (cur_fport != NULL) {
7398 		if (cur_fport == fport) {
7399 			/* Found */
7400 			mutex_exit(&fcip_global_mutex);
7401 			return (0);
7402 		} else {
7403 			cur_fport = cur_fport->fcipp_next;
7404 		}
7405 	}
7406 	mutex_exit(&fcip_global_mutex);
7407 
7408 	return (1);
7409 }
7410 
7411 /*
7412  * Constructor to initialize the sendup elements for callback into
7413  * modules upstream
7414  */
7415 
7416 /* ARGSUSED */
7417 static int
fcip_sendup_constructor(void * buf,void * arg,int flags)7418 fcip_sendup_constructor(void *buf, void *arg, int flags)
7419 {
7420 	struct fcip_sendup_elem	*msg_elem = (struct fcip_sendup_elem *)buf;
7421 	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
7422 
7423 	ASSERT(fport != NULL);
7424 
7425 	msg_elem->fcipsu_mp = NULL;
7426 	msg_elem->fcipsu_func = NULL;
7427 	msg_elem->fcipsu_next = NULL;
7428 
7429 	return (FCIP_SUCCESS);
7430 }
7431