xref: /titanic_41/usr/src/uts/sun4u/starcat/io/dman.c (revision 07d06da50d310a325b457d6330165aebab1e0064)
103831d35Sstevel /*
203831d35Sstevel  * CDDL HEADER START
303831d35Sstevel  *
403831d35Sstevel  * The contents of this file are subject to the terms of the
503831d35Sstevel  * Common Development and Distribution License (the "License").
603831d35Sstevel  * You may not use this file except in compliance with the License.
703831d35Sstevel  *
803831d35Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
903831d35Sstevel  * or http://www.opensolaris.org/os/licensing.
1003831d35Sstevel  * See the License for the specific language governing permissions
1103831d35Sstevel  * and limitations under the License.
1203831d35Sstevel  *
1303831d35Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
1403831d35Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1503831d35Sstevel  * If applicable, add the following below this CDDL HEADER, with the
1603831d35Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
1703831d35Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
1803831d35Sstevel  *
1903831d35Sstevel  * CDDL HEADER END
2003831d35Sstevel  */
2103831d35Sstevel 
2203831d35Sstevel /*
23d3d50737SRafael Vanoni  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
2403831d35Sstevel  * Use is subject to license terms.
2503831d35Sstevel  */
2603831d35Sstevel 
2703831d35Sstevel 
2803831d35Sstevel /*
2903831d35Sstevel  * Starcat Management Network Driver
3003831d35Sstevel  *
3103831d35Sstevel  * ****** NOTICE **** This file also resides in the SSC gate as
3203831d35Sstevel  * ****** NOTICE **** usr/src/uts/sun4u/scman/scman.c. Any changes
3303831d35Sstevel  * ****** NOTICE **** made here must be propogated there as well.
3403831d35Sstevel  *
3503831d35Sstevel  */
3603831d35Sstevel 
3703831d35Sstevel #include <sys/types.h>
3803831d35Sstevel #include <sys/proc.h>
3903831d35Sstevel #include <sys/disp.h>
4003831d35Sstevel #include <sys/kmem.h>
4103831d35Sstevel #include <sys/stat.h>
4203831d35Sstevel #include <sys/kstat.h>
4303831d35Sstevel #include <sys/ksynch.h>
4403831d35Sstevel #include <sys/stream.h>
4503831d35Sstevel #include <sys/dlpi.h>
4603831d35Sstevel #include <sys/stropts.h>
4703831d35Sstevel #include <sys/strsubr.h>
4803831d35Sstevel #include <sys/debug.h>
4903831d35Sstevel #include <sys/conf.h>
5003831d35Sstevel #include <sys/kstr.h>
5103831d35Sstevel #include <sys/errno.h>
5203831d35Sstevel #include <sys/ethernet.h>
5303831d35Sstevel #include <sys/byteorder.h>
5403831d35Sstevel #include <sys/ddi.h>
5503831d35Sstevel #include <sys/sunddi.h>
5603831d35Sstevel #include <sys/sunldi.h>
5703831d35Sstevel #include <sys/modctl.h>
5803831d35Sstevel #include <sys/strsun.h>
5903831d35Sstevel #include <sys/callb.h>
6003831d35Sstevel #include <sys/pci.h>
6103831d35Sstevel #include <netinet/in.h>
6203831d35Sstevel #include <inet/common.h>
6303831d35Sstevel #include <inet/mi.h>
6403831d35Sstevel #include <inet/nd.h>
6503831d35Sstevel #include <sys/socket.h>
6603831d35Sstevel #include <netinet/igmp_var.h>
6703831d35Sstevel #include <netinet/ip6.h>
6803831d35Sstevel #include <netinet/icmp6.h>
6903831d35Sstevel #include <inet/ip.h>
7003831d35Sstevel #include <inet/ip6.h>
7103831d35Sstevel #include <sys/file.h>
7203831d35Sstevel #include <sys/dman.h>
7303831d35Sstevel #include <sys/autoconf.h>
7403831d35Sstevel #include <sys/zone.h>
7503831d35Sstevel 
7603831d35Sstevel extern int ddi_create_internal_pathname(dev_info_t *, char *, int, minor_t);
7703831d35Sstevel 
7803831d35Sstevel #define	MAN_IDNAME	"dman"
7903831d35Sstevel #define	DMAN_INT_PATH	"/devices/pseudo/dman@0:dman"
8003831d35Sstevel #define	DMAN_PATH	"/devices/pseudo/clone@0:dman"
8103831d35Sstevel #define	ERI_IDNAME	"eri"
8203831d35Sstevel #define	ERI_PATH	"/devices/pseudo/clone@0:eri"
8303831d35Sstevel 
8403831d35Sstevel #if defined(DEBUG)
8503831d35Sstevel 
8603831d35Sstevel static void man_print_msp(manstr_t *);
8703831d35Sstevel static void man_print_man(man_t *);
8803831d35Sstevel static void man_print_mdp(man_dest_t *);
8903831d35Sstevel static void man_print_dev(man_dev_t *);
9003831d35Sstevel static void man_print_mip(mi_path_t *);
9103831d35Sstevel static void man_print_mtp(mi_time_t *);
9203831d35Sstevel static void man_print_mpg(man_pg_t *);
9303831d35Sstevel static void man_print_path(man_path_t *);
9403831d35Sstevel static void man_print_work(man_work_t *);
9503831d35Sstevel 
9603831d35Sstevel /*
9703831d35Sstevel  * Set manstr_t dlpistate (upper half of multiplexor)
9803831d35Sstevel  */
9903831d35Sstevel #define	SETSTATE(msp, state) \
10003831d35Sstevel 	MAN_DBG(MAN_DLPI, ("msp=0x%p @ %d state %s=>%s\n",		\
10103831d35Sstevel 		    (void *)msp, __LINE__, dss[msp->ms_dlpistate],	\
10203831d35Sstevel 		    dss[(state)]));					\
10303831d35Sstevel 		    msp->ms_dlpistate = (state);
10403831d35Sstevel /*
10503831d35Sstevel  * Set man_dest_t dlpistate (lower half of multiplexor)
10603831d35Sstevel  */
10703831d35Sstevel #define	D_SETSTATE(mdp, state) \
10803831d35Sstevel 	MAN_DBG(MAN_DLPI, ("dst=0x%p @ %d state %s=>%s\n",	   \
10903831d35Sstevel 		    (void *)mdp, __LINE__, dss[mdp->md_dlpistate], \
11003831d35Sstevel 		    dss[(state)]));				   \
11103831d35Sstevel 		    mdp->md_dlpistate = (state);
11203831d35Sstevel 
11303831d35Sstevel static char *promisc[] = {	/* DLPI promisc Strings */
11403831d35Sstevel 	"not used",		/* 0x00 */
11503831d35Sstevel 	"DL_PROMISC_PHYS",	/* 0x01 */
11603831d35Sstevel 	"DL_PROMISC_SAP",	/* 0x02 */
11703831d35Sstevel 	"DL_PROMISC_MULTI"	/* 0x03 */
11803831d35Sstevel };
11903831d35Sstevel 
12003831d35Sstevel static char *dps[] = {			/* DLPI Primitive Strings */
12103831d35Sstevel 	"DL_INFO_REQ",			/* 0x00 */
12203831d35Sstevel 	"DL_BIND_REQ",			/* 0x01 */
12303831d35Sstevel 	"DL_UNBIND_REQ",		/* 0x02 */
12403831d35Sstevel 	"DL_INFO_ACK",			/* 0x03 */
12503831d35Sstevel 	"DL_BIND_ACK",			/* 0x04 */
12603831d35Sstevel 	"DL_ERROR_ACK",			/* 0x05 */
12703831d35Sstevel 	"DL_OK_ACK",			/* 0x06 */
12803831d35Sstevel 	"DL_UNITDATA_REQ",		/* 0x07 */
12903831d35Sstevel 	"DL_UNITDATA_IND",		/* 0x08 */
13003831d35Sstevel 	"DL_UDERROR_IND",		/* 0x09 */
13103831d35Sstevel 	"DL_UDQOS_REQ",			/* 0x0a */
13203831d35Sstevel 	"DL_ATTACH_REQ",		/* 0x0b */
13303831d35Sstevel 	"DL_DETACH_REQ",		/* 0x0c */
13403831d35Sstevel 	"DL_CONNECT_REQ",		/* 0x0d */
13503831d35Sstevel 	"DL_CONNECT_IND",		/* 0x0e */
13603831d35Sstevel 	"DL_CONNECT_RES",		/* 0x0f */
13703831d35Sstevel 	"DL_CONNECT_CON",		/* 0x10 */
13803831d35Sstevel 	"DL_TOKEN_REQ",			/* 0x11 */
13903831d35Sstevel 	"DL_TOKEN_ACK",			/* 0x12 */
14003831d35Sstevel 	"DL_DISCONNECT_REQ",		/* 0x13 */
14103831d35Sstevel 	"DL_DISCONNECT_IND",		/* 0x14 */
14203831d35Sstevel 	"DL_SUBS_UNBIND_REQ",		/* 0x15 */
14303831d35Sstevel 	"DL_LIARLIARPANTSONFIRE",	/* 0x16 */
14403831d35Sstevel 	"DL_RESET_REQ",			/* 0x17 */
14503831d35Sstevel 	"DL_RESET_IND",			/* 0x18 */
14603831d35Sstevel 	"DL_RESET_RES",			/* 0x19 */
14703831d35Sstevel 	"DL_RESET_CON",			/* 0x1a */
14803831d35Sstevel 	"DL_SUBS_BIND_REQ",		/* 0x1b */
14903831d35Sstevel 	"DL_SUBS_BIND_ACK",		/* 0x1c */
15003831d35Sstevel 	"DL_ENABMULTI_REQ",		/* 0x1d */
15103831d35Sstevel 	"DL_DISABMULTI_REQ",		/* 0x1e */
15203831d35Sstevel 	"DL_PROMISCON_REQ",		/* 0x1f */
15303831d35Sstevel 	"DL_PROMISCOFF_REQ",		/* 0x20 */
15403831d35Sstevel 	"DL_DATA_ACK_REQ",		/* 0x21 */
15503831d35Sstevel 	"DL_DATA_ACK_IND",		/* 0x22 */
15603831d35Sstevel 	"DL_DATA_ACK_STATUS_IND",	/* 0x23 */
15703831d35Sstevel 	"DL_REPLY_REQ",			/* 0x24 */
15803831d35Sstevel 	"DL_REPLY_IND",			/* 0x25 */
15903831d35Sstevel 	"DL_REPLY_STATUS_IND",		/* 0x26 */
16003831d35Sstevel 	"DL_REPLY_UPDATE_REQ",		/* 0x27 */
16103831d35Sstevel 	"DL_REPLY_UPDATE_STATUS_IND",	/* 0x28 */
16203831d35Sstevel 	"DL_XID_REQ",			/* 0x29 */
16303831d35Sstevel 	"DL_XID_IND",			/* 0x2a */
16403831d35Sstevel 	"DL_XID_RES",			/* 0x2b */
16503831d35Sstevel 	"DL_XID_CON",			/* 0x2c */
16603831d35Sstevel 	"DL_TEST_REQ",			/* 0x2d */
16703831d35Sstevel 	"DL_TEST_IND",			/* 0x2e */
16803831d35Sstevel 	"DL_TEST_RES",			/* 0x2f */
16903831d35Sstevel 	"DL_TEST_CON",			/* 0x30 */
17003831d35Sstevel 	"DL_PHYS_ADDR_REQ",		/* 0x31 */
17103831d35Sstevel 	"DL_PHYS_ADDR_ACK",		/* 0x32 */
17203831d35Sstevel 	"DL_SET_PHYS_ADDR_REQ",		/* 0x33 */
17303831d35Sstevel 	"DL_GET_STATISTICS_REQ",	/* 0x34 */
17403831d35Sstevel 	"DL_GET_STATISTICS_ACK",	/* 0x35 */
17503831d35Sstevel };
17603831d35Sstevel 
17703831d35Sstevel #define	MAN_DLPI_MAX_PRIM	0x35
17803831d35Sstevel 
17903831d35Sstevel static char *dss[] = {			/* DLPI State Strings */
18003831d35Sstevel 	"DL_UNBOUND",			/* 0x00	*/
18103831d35Sstevel 	"DL_BIND_PENDING",		/* 0x01	*/
18203831d35Sstevel 	"DL_UNBIND_PENDING",		/* 0x02	*/
18303831d35Sstevel 	"DL_IDLE",			/* 0x03	*/
18403831d35Sstevel 	"DL_UNATTACHED",		/* 0x04	*/
18503831d35Sstevel 	"DL_ATTACH_PENDING",		/* 0x05	*/
18603831d35Sstevel 	"DL_DETACH_PENDING",		/* 0x06	*/
18703831d35Sstevel 	"DL_UDQOS_PENDING",		/* 0x07	*/
18803831d35Sstevel 	"DL_OUTCON_PENDING",		/* 0x08	*/
18903831d35Sstevel 	"DL_INCON_PENDING",		/* 0x09	*/
19003831d35Sstevel 	"DL_CONN_RES_PENDING",		/* 0x0a	*/
19103831d35Sstevel 	"DL_DATAXFER",			/* 0x0b	*/
19203831d35Sstevel 	"DL_USER_RESET_PENDING",	/* 0x0c	*/
19303831d35Sstevel 	"DL_PROV_RESET_PENDING",	/* 0x0d	*/
19403831d35Sstevel 	"DL_RESET_RES_PENDING",		/* 0x0e	*/
19503831d35Sstevel 	"DL_DISCON8_PENDING",		/* 0x0f	*/
19603831d35Sstevel 	"DL_DISCON9_PENDING",		/* 0x10	*/
19703831d35Sstevel 	"DL_DISCON11_PENDING",		/* 0x11	*/
19803831d35Sstevel 	"DL_DISCON12_PENDING",		/* 0x12	*/
19903831d35Sstevel 	"DL_DISCON13_PENDING",		/* 0x13	*/
20003831d35Sstevel 	"DL_SUBS_BIND_PND",		/* 0x14	*/
20103831d35Sstevel 	"DL_SUBS_UNBIND_PND",		/* 0x15	*/
20203831d35Sstevel };
20303831d35Sstevel 
20403831d35Sstevel static const char *lss[] = {
20503831d35Sstevel 	"UNKNOWN",	/* 0x0 */
20603831d35Sstevel 	"INIT",		/* 0x1 */
20703831d35Sstevel 	"GOOD",		/* 0x2 */
20803831d35Sstevel 	"STALE",	/* 0x3 */
20903831d35Sstevel 	"FAIL",		/* 0x4 */
21003831d35Sstevel };
21103831d35Sstevel 
21203831d35Sstevel static char *_mw_type[] = {
21303831d35Sstevel 	"OPEN_CTL",		/* 0x0 */
21403831d35Sstevel 	"CLOSE_CTL",		/* 0x1 */
21503831d35Sstevel 	"SWITCH",		/* 0x2 */
21603831d35Sstevel 	"PATH_UPDATE",		/* 0x3 */
21703831d35Sstevel 	"CLOSE",		/* 0x4 */
21803831d35Sstevel 	"CLOSE_STREAM",	/* 0x5 */
21903831d35Sstevel 	"DRATTACH",		/* 0x6 */
22003831d35Sstevel 	"DRDETACH",		/* 0x7 */
22103831d35Sstevel 	"STOP",			/* 0x8 */
22203831d35Sstevel 	"DRSWITCH",		/* 0x9 */
22303831d35Sstevel 	"KSTAT_UPDATE"		/* 0xA */
22403831d35Sstevel };
22503831d35Sstevel 
22603831d35Sstevel uint32_t		man_debug = MAN_WARN;
22703831d35Sstevel 
22803831d35Sstevel #define	man_kzalloc(a, b)	man_dbg_kzalloc(__LINE__, a, b)
22903831d35Sstevel #define	man_kfree(a, b)		man_dbg_kfree(__LINE__, a, b)
23003831d35Sstevel void	*man_dbg_kzalloc(int line, size_t size, int kmflags);
23103831d35Sstevel void	man_dbg_kfree(int line, void *buf, size_t size);
23203831d35Sstevel 
23303831d35Sstevel #else	/* DEBUG */
23403831d35Sstevel 
23503831d35Sstevel uint32_t		man_debug = 0;
23603831d35Sstevel /*
23703831d35Sstevel  * Set manstr_t dlpistate (upper half of multiplexor)
23803831d35Sstevel  */
23903831d35Sstevel #define	SETSTATE(msp, state) msp->ms_dlpistate = (state);
24003831d35Sstevel /*
24103831d35Sstevel  * Set man_dest_t dlpistate (lower half of multiplexor)
24203831d35Sstevel  */
24303831d35Sstevel #define	D_SETSTATE(mdp, state) mdp->md_dlpistate = (state);
24403831d35Sstevel 
24503831d35Sstevel #define	man_kzalloc(a, b)	kmem_zalloc(a, b)
24603831d35Sstevel #define	man_kfree(a, b)		kmem_free(a, b)
24703831d35Sstevel 
24803831d35Sstevel #endif	/* DEBUG */
24903831d35Sstevel 
25003831d35Sstevel #define	DL_PRIM(mp)	(((union DL_primitives *)(mp)->b_rptr)->dl_primitive)
25103831d35Sstevel #define	DL_PROMISCON_TYPE(mp)	\
25203831d35Sstevel 		(((union DL_primitives *)(mp)->b_rptr)->promiscon_req.dl_level)
25303831d35Sstevel #define	IOC_CMD(mp)	(((struct iocblk *)(mp)->b_rptr)->ioc_cmd)
25403831d35Sstevel 
25503831d35Sstevel /*
25603831d35Sstevel  * Start of kstat-related declarations
25703831d35Sstevel  */
25803831d35Sstevel #define	MK_NOT_COUNTER		(1<<0)	/* is it a counter? */
25903831d35Sstevel #define	MK_ERROR		(1<<2)	/* for error statistics */
26003831d35Sstevel #define	MK_NOT_PHYSICAL		(1<<3)	/* no matching physical stat */
26103831d35Sstevel 
26203831d35Sstevel typedef struct man_kstat_info_s {
26303831d35Sstevel 	char		*mk_name;	/* e.g. align_errors */
26403831d35Sstevel 	char		*mk_physname;	/* e.g. framing (NULL for same) */
26503831d35Sstevel 	char		*mk_physalias;	/* e.g. framing (NULL for same) */
26603831d35Sstevel 	uchar_t		mk_type;	/* e.g. KSTAT_DATA_UINT32 */
26703831d35Sstevel 	int		mk_flags;
26803831d35Sstevel } man_kstat_info_t;
26903831d35Sstevel 
27003831d35Sstevel /*
27103831d35Sstevel  * Master declaration macro, note that it uses token pasting
27203831d35Sstevel  */
27303831d35Sstevel #define	MK_DECLARE(name, pname, palias, bits, flags) \
27403831d35Sstevel 	{ name,		pname,	palias,	KSTAT_DATA_UINT ## bits, flags }
27503831d35Sstevel 
27603831d35Sstevel /*
27703831d35Sstevel  * Obsolete forms don't have the _sinceswitch forms, they are all errors
27803831d35Sstevel  */
27903831d35Sstevel #define	MK_OBSOLETE32(name, alias) MK_DECLARE(alias, name, alias, 32, MK_ERROR)
28003831d35Sstevel #define	MK_OBSOLETE64(name, alias) MK_DECLARE(alias, name, alias, 64, MK_ERROR)
28103831d35Sstevel 
28203831d35Sstevel /*
28303831d35Sstevel  * The only non-counters don't have any other aliases
28403831d35Sstevel  */
28503831d35Sstevel #define	MK_NOTCOUNTER32(name) MK_DECLARE(name, name, NULL, 32, MK_NOT_COUNTER)
28603831d35Sstevel #define	MK_NOTCOUNTER64(name) MK_DECLARE(name, name, NULL, 64, MK_NOT_COUNTER)
28703831d35Sstevel 
28803831d35Sstevel /*
28903831d35Sstevel  * Normal counter forms
29003831d35Sstevel  */
29103831d35Sstevel #define	MK_DECLARE32(name, alias) \
29203831d35Sstevel 	MK_DECLARE(name, name, alias, 32, 0)
29303831d35Sstevel #define	MK_DECLARE64(name, alias) \
29403831d35Sstevel 	MK_DECLARE(name, name, alias, 64, 0)
29503831d35Sstevel 
29603831d35Sstevel /*
29703831d35Sstevel  * Error counters need special MK_ERROR flag only for the non-AP form
29803831d35Sstevel  */
29903831d35Sstevel #define	MK_ERROR32(name, alias) \
30003831d35Sstevel 	MK_DECLARE(name, name, alias, 32, MK_ERROR)
30103831d35Sstevel #define	MK_ERROR64(name, alias) \
30203831d35Sstevel 	MK_DECLARE(name, name, alias, 64, MK_ERROR)
30303831d35Sstevel 
30403831d35Sstevel /*
30503831d35Sstevel  * These AP-specific stats are not backed by physical statistics
30603831d35Sstevel  */
30703831d35Sstevel #define	MK_NOTPHYS32(name) MK_DECLARE(name, NULL, NULL, 32, MK_NOT_PHYSICAL)
30803831d35Sstevel #define	MK_NOTPHYS64(name) MK_DECLARE(name, NULL, NULL, 64, MK_NOT_PHYSICAL)
30903831d35Sstevel 
31003831d35Sstevel /*
31103831d35Sstevel  * START of the actual man_kstat_info declaration using above macros
31203831d35Sstevel  */
31303831d35Sstevel static man_kstat_info_t man_kstat_info[] = {
31403831d35Sstevel 	/*
31503831d35Sstevel 	 * Link Input/Output stats
31603831d35Sstevel 	 */
31703831d35Sstevel 	MK_DECLARE32("ipackets", NULL),
31803831d35Sstevel 	MK_ERROR32("ierrors", NULL),
31903831d35Sstevel 	MK_DECLARE32("opackets", NULL),
32003831d35Sstevel 	MK_ERROR32("oerrors", NULL),
32103831d35Sstevel 	MK_ERROR32("collisions", NULL),
32203831d35Sstevel 	MK_NOTCOUNTER64("ifspeed"),
32303831d35Sstevel 	/*
32403831d35Sstevel 	 * These are new MIB-II stats, per PSARC 1997/198
32503831d35Sstevel 	 */
32603831d35Sstevel 	MK_DECLARE32("rbytes", NULL),
32703831d35Sstevel 	MK_DECLARE32("obytes", NULL),
32803831d35Sstevel 	MK_DECLARE32("multircv", NULL),
32903831d35Sstevel 	MK_DECLARE32("multixmt", NULL),
33003831d35Sstevel 	MK_DECLARE32("brdcstrcv", NULL),
33103831d35Sstevel 	MK_DECLARE32("brdcstxmt", NULL),
33203831d35Sstevel 	/*
33303831d35Sstevel 	 * Error values
33403831d35Sstevel 	 */
33503831d35Sstevel 	MK_ERROR32("norcvbuf", NULL),
33603831d35Sstevel 	MK_ERROR32("noxmtbuf", NULL),
33703831d35Sstevel 	MK_ERROR32("unknowns", NULL),
33803831d35Sstevel 	/*
33903831d35Sstevel 	 * These are the 64-bit values, they fallback to 32-bit values
34003831d35Sstevel 	 */
34103831d35Sstevel 	MK_DECLARE64("ipackets64", "ipackets"),
34203831d35Sstevel 	MK_DECLARE64("opackets64", "opackets"),
34303831d35Sstevel 	MK_DECLARE64("rbytes64", "rbytes"),
34403831d35Sstevel 	MK_DECLARE64("obytes64", "obytes"),
34503831d35Sstevel 
34603831d35Sstevel 	/* New AP switching statistics */
34703831d35Sstevel 	MK_NOTPHYS64("man_switches"),
34803831d35Sstevel 	MK_NOTPHYS64("man_link_fails"),
34903831d35Sstevel 	MK_NOTPHYS64("man_link_stales"),
35003831d35Sstevel 	MK_NOTPHYS64("man_icmpv4_probes"),
35103831d35Sstevel 	MK_NOTPHYS64("man_icmpv6_probes"),
35203831d35Sstevel 
35303831d35Sstevel 	MK_ERROR32("align_errors", "framing"),
35403831d35Sstevel 	MK_ERROR32("fcs_errors", "crc"),
35503831d35Sstevel 	MK_ERROR32("first_collisions", NULL),
35603831d35Sstevel 	MK_ERROR32("multi_collisions", NULL),
35703831d35Sstevel 	MK_ERROR32("sqe_errors", "sqe"),
35803831d35Sstevel 
35903831d35Sstevel 	MK_ERROR32("tx_late_collisions", NULL),
36003831d35Sstevel 	MK_ERROR32("ex_collisions", "excollisions"),
36103831d35Sstevel 	MK_ERROR32("macxmt_errors", NULL),
36203831d35Sstevel 	MK_ERROR32("carrier_errors", "nocarrier"),
36303831d35Sstevel 	MK_ERROR32("toolong_errors", "buff"),
36403831d35Sstevel 	MK_ERROR32("macrcv_errors", NULL),
36503831d35Sstevel 
36603831d35Sstevel 	MK_OBSOLETE32("framing", "align_errors"),
36703831d35Sstevel 	MK_OBSOLETE32("crc", "fcs_errors"),
36803831d35Sstevel 	MK_OBSOLETE32("sqe", "sqe_errors"),
36903831d35Sstevel 	MK_OBSOLETE32("excollisions", "ex_collisions"),
37003831d35Sstevel 	MK_OBSOLETE32("nocarrier", "carrier_errors"),
37103831d35Sstevel 	MK_OBSOLETE32("buff", "toolong_errors"),
37203831d35Sstevel };
37303831d35Sstevel 
37403831d35Sstevel #define	MAN_NUMSTATS (sizeof (man_kstat_info) / sizeof (man_kstat_info_t))
37503831d35Sstevel 
37603831d35Sstevel /*
37703831d35Sstevel  * Miscellaneous ethernet stuff.
37803831d35Sstevel  *
37903831d35Sstevel  * MANs DL_INFO_ACK template.
38003831d35Sstevel  */
38103831d35Sstevel static	dl_info_ack_t man_infoack = {
38203831d35Sstevel 	DL_INFO_ACK,				/* dl_primitive */
38303831d35Sstevel 	ETHERMTU,				/* dl_max_sdu */
38403831d35Sstevel 	0,					/* dl_min_sdu */
38503831d35Sstevel 	MAN_ADDRL,				/* dl_addr_length */
38603831d35Sstevel 	DL_ETHER,				/* dl_mac_type */
38703831d35Sstevel 	0,					/* dl_reserved */
38803831d35Sstevel 	0,					/* dl_current_state */
38903831d35Sstevel 	-2,					/* dl_sap_length */
39003831d35Sstevel 	DL_CLDLS,				/* dl_service_mode */
39103831d35Sstevel 	0,					/* dl_qos_length */
39203831d35Sstevel 	0,					/* dl_qos_offset */
39303831d35Sstevel 	0,					/* dl_range_length */
39403831d35Sstevel 	0,					/* dl_range_offset */
39503831d35Sstevel 	DL_STYLE2,				/* dl_provider_style */
39603831d35Sstevel 	sizeof (dl_info_ack_t),			/* dl_addr_offset */
39703831d35Sstevel 	DL_VERSION_2,				/* dl_version */
39803831d35Sstevel 	ETHERADDRL,				/* dl_brdcst_addr_length */
39903831d35Sstevel 	sizeof (dl_info_ack_t) + MAN_ADDRL,	/* dl_brdcst_addr_offset */
40003831d35Sstevel 	0					/* dl_growth */
40103831d35Sstevel };
40203831d35Sstevel 
40303831d35Sstevel /*
40403831d35Sstevel  * Ethernet broadcast address definition.
40503831d35Sstevel  */
40603831d35Sstevel static	struct ether_addr	etherbroadcast = {
40703831d35Sstevel 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
40803831d35Sstevel };
40903831d35Sstevel 
41003831d35Sstevel static struct ether_addr zero_ether_addr = {
41103831d35Sstevel 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
41203831d35Sstevel };
41303831d35Sstevel 
41403831d35Sstevel /*
41503831d35Sstevel  * Set via MAN_SET_SC_IPADDRS ioctl.
41603831d35Sstevel  */
41703831d35Sstevel man_sc_ipaddrs_t	man_sc_ipaddrs = { 0xffffffffU, 0xffffffffU };
41803831d35Sstevel 
41903831d35Sstevel /*
42003831d35Sstevel  * Set via MAN_SET_SC_IP6ADDRS ioctl.
42103831d35Sstevel  */
42203831d35Sstevel man_sc_ip6addrs_t	man_sc_ip6addrs = { 0, 0, 0, 0, 0, 0, 0, 0 };
42303831d35Sstevel 
42403831d35Sstevel /*
42503831d35Sstevel  * IP & ICMP constants
42603831d35Sstevel  */
42703831d35Sstevel #ifndef	ETHERTYPE_IPV6
42803831d35Sstevel #define	ETHERTYPE_IPV6 0x86DD
42903831d35Sstevel #endif
43003831d35Sstevel 
43103831d35Sstevel /*
43203831d35Sstevel  * Function prototypes.
43303831d35Sstevel  *
43403831d35Sstevel  * Upper multiplexor functions.
43503831d35Sstevel  */
43603831d35Sstevel static int	man_attach(dev_info_t *, ddi_attach_cmd_t);
43703831d35Sstevel static int	man_detach(dev_info_t *, ddi_detach_cmd_t);
43803831d35Sstevel static int	man_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
43903831d35Sstevel static int	man_open(register queue_t *, dev_t *, int, int, cred_t *);
44003831d35Sstevel static int	man_configure(queue_t *);
44103831d35Sstevel static int	man_deconfigure(void);
44203831d35Sstevel static int	man_init_dests(man_t *, manstr_t *);
44303831d35Sstevel static void	man_start_dest(man_dest_t *, manstr_t *, man_pg_t *);
44403831d35Sstevel static void	man_set_optimized_dest(manstr_t *);
44503831d35Sstevel static int	man_close(queue_t *);
44603831d35Sstevel static void	man_cancel_timers(man_adest_t *);
44703831d35Sstevel static int	man_uwput(queue_t *, mblk_t *);
44803831d35Sstevel static int	man_start(queue_t *, mblk_t *, eaddr_t *);
44903831d35Sstevel static void	man_ioctl(queue_t *, mblk_t *);
45003831d35Sstevel static void	man_set_linkcheck_time(queue_t *, mblk_t *);
45103831d35Sstevel static void	man_setpath(queue_t *, mblk_t *);
45203831d35Sstevel static void	man_geteaddr(queue_t *, mblk_t *);
45303831d35Sstevel static void	man_set_sc_ipaddrs(queue_t *, mblk_t *);
45403831d35Sstevel static void	man_set_sc_ip6addrs(queue_t *, mblk_t *);
45503831d35Sstevel static int	man_get_our_etheraddr(eaddr_t *eap);
45603831d35Sstevel static void	man_nd_getset(queue_t *, mblk_t *);
45703831d35Sstevel static void	man_dl_ioc_hdr_info(queue_t *, mblk_t *);
45803831d35Sstevel static int	man_uwsrv(queue_t *);
45903831d35Sstevel static int	man_proto(queue_t *, mblk_t *);
46003831d35Sstevel static int	man_udreq(queue_t *, mblk_t *);
46103831d35Sstevel static void	man_areq(queue_t *, mblk_t *);
46203831d35Sstevel static mblk_t	*man_alloc_physreq_mp(eaddr_t *);
46303831d35Sstevel static void	man_dreq(queue_t *, mblk_t *);
46403831d35Sstevel static void	man_dodetach(manstr_t *, man_work_t *);
46503831d35Sstevel static void	man_dl_clean(mblk_t **);
46603831d35Sstevel static void	man_breq(queue_t *, mblk_t *);
46703831d35Sstevel static void	man_ubreq(queue_t *, mblk_t *);
46803831d35Sstevel static void	man_ireq(queue_t *, mblk_t *);
46903831d35Sstevel static void	man_ponreq(queue_t *, mblk_t *);
47003831d35Sstevel static void	man_poffreq(queue_t *, mblk_t *);
47103831d35Sstevel static void	man_emreq(queue_t *, mblk_t *);
47203831d35Sstevel static void	man_dmreq(queue_t *, mblk_t *);
47303831d35Sstevel static void	man_pareq(queue_t *, mblk_t *);
47403831d35Sstevel static void	man_spareq(queue_t *, mblk_t *);
47503831d35Sstevel static int	man_dlpi(manstr_t *, mblk_t *);
47603831d35Sstevel static int	man_dlioc(manstr_t *, mblk_t *);
47703831d35Sstevel static int	man_dl_catch(mblk_t **, mblk_t *);
47803831d35Sstevel static void	man_dl_release(mblk_t **, mblk_t *);
47903831d35Sstevel static int	man_match_proto(mblk_t *, mblk_t *);
48003831d35Sstevel static int	man_open_ctl();
48103831d35Sstevel static void	man_close_ctl();
48203831d35Sstevel /*
48303831d35Sstevel  * upper/lower multiplexor functions.
48403831d35Sstevel  */
48503831d35Sstevel static int	man_dlpi_senddown(manstr_t *, mblk_t *);
48603831d35Sstevel static int	man_start_lower(man_dest_t *, mblk_t *, queue_t *, int caller);
48703831d35Sstevel static int	man_lrput(queue_t *, mblk_t *);
48803831d35Sstevel /*
48903831d35Sstevel  * Lower multiplexor functions.
49003831d35Sstevel  */
49103831d35Sstevel static int	man_lwsrv(queue_t *);
49203831d35Sstevel static int	man_lrsrv(queue_t *);
49303831d35Sstevel static void	man_dlpi_replay(man_dest_t *, mblk_t *);
49403831d35Sstevel static int	man_dlioc_replay(man_dest_t *);
49503831d35Sstevel /*
49603831d35Sstevel  * Link failover routines.
49703831d35Sstevel  */
49803831d35Sstevel static int	man_gettimer(int, man_dest_t *);
49903831d35Sstevel static void	man_linkcheck_timer(void *);
50003831d35Sstevel static int	man_needs_linkcheck(man_dest_t *);
50103831d35Sstevel static int	man_do_autoswitch(man_dest_t *);
50203831d35Sstevel static int	man_autoswitch(man_pg_t *, man_dev_t *, man_work_t *);
50303831d35Sstevel static int	man_prep_dests_for_switch(man_pg_t *, man_dest_t **, int *);
50403831d35Sstevel static int	man_str_uses_pg(manstr_t *, man_pg_t *);
50503831d35Sstevel static void	man_do_icmp_bcast(man_dest_t *, t_uscalar_t);
50603831d35Sstevel static mblk_t	*man_alloc_udreq(int, man_dladdr_t *);
50703831d35Sstevel static mblk_t	*man_pinger(t_uscalar_t);
50803831d35Sstevel /*
50903831d35Sstevel  * Functions normally executing outside of the STREAMs perimeter.
51003831d35Sstevel  */
51103831d35Sstevel /*
51203831d35Sstevel  * Functions supporting/processing work requests.
51303831d35Sstevel  */
51403831d35Sstevel static void	man_bwork(void);
51503831d35Sstevel static void	man_iwork(void);		/* inside perimeter */
51603831d35Sstevel void		man_work_add(man_workq_t *, man_work_t *);
51703831d35Sstevel man_work_t	*man_work_alloc(int, int);
51803831d35Sstevel void		man_work_free(man_work_t *);
51903831d35Sstevel /*
52003831d35Sstevel  * Functions implementing/supporting failover.
52103831d35Sstevel  *
52203831d35Sstevel  * Executed inside perimeter.
52303831d35Sstevel  */
52403831d35Sstevel static int	man_do_dr_attach(man_work_t *);
52503831d35Sstevel static int	man_do_dr_switch(man_work_t *);
52603831d35Sstevel static void	man_do_dr_detach(man_work_t *);
52703831d35Sstevel static int	man_iswitch(man_work_t *);
52803831d35Sstevel static void	man_ifail_dest(man_dest_t *);
52903831d35Sstevel static man_dest_t *man_switch_match(man_dest_t *, int, void *);
53003831d35Sstevel static void	man_add_dests(man_pg_t *);
53103831d35Sstevel static void	man_reset_dlpi(void *);
53203831d35Sstevel static mblk_t	*man_dup_mplist(mblk_t *);
53303831d35Sstevel static mblk_t	*man_alloc_ubreq_dreq();
53403831d35Sstevel /*
53503831d35Sstevel  * Executed outside perimeter (us man_lock for synchronization).
53603831d35Sstevel  */
53703831d35Sstevel static void	man_bclose(man_adest_t *);
53803831d35Sstevel static void	man_bswitch(man_adest_t *, man_work_t *);
53903831d35Sstevel static int	man_plumb(man_dest_t *);
54003831d35Sstevel static void	man_unplumb(man_dest_t *);
54103831d35Sstevel static void	man_plink(queue_t *, mblk_t *);
54203831d35Sstevel static void	man_unplink(queue_t *, mblk_t *);
54303831d35Sstevel static void	man_linkrec_insert(man_linkrec_t *);
54403831d35Sstevel static queue_t	*man_linkrec_find(int);
54503831d35Sstevel /*
54603831d35Sstevel  * Functions supporting pathgroups
54703831d35Sstevel  */
54803831d35Sstevel int	man_pg_cmd(mi_path_t *, man_work_t *);
54903831d35Sstevel static int	man_pg_assign(man_pg_t **, mi_path_t *, int);
55003831d35Sstevel static int	man_pg_create(man_pg_t **, man_pg_t **, mi_path_t *);
55103831d35Sstevel static int	man_pg_unassign(man_pg_t **, mi_path_t *);
55203831d35Sstevel static int	man_pg_activate(man_t *, mi_path_t *, man_work_t *);
55303831d35Sstevel static int	man_pg_read(man_pg_t *, mi_path_t *);
55403831d35Sstevel static man_pg_t	*man_find_path_by_dev(man_pg_t *, man_dev_t *, man_path_t **);
55503831d35Sstevel static man_pg_t	*man_find_pg_by_id(man_pg_t *, int);
55603831d35Sstevel static man_path_t	*man_find_path_by_ppa(man_path_t *, int);
55703831d35Sstevel static man_path_t	*man_find_active_path(man_path_t *);
55803831d35Sstevel static man_path_t	*man_find_alternate_path(man_path_t *);
55903831d35Sstevel static void	man_path_remove(man_path_t **, man_path_t *);
56003831d35Sstevel static void	man_path_insert(man_path_t **, man_path_t *);
56103831d35Sstevel static void	man_path_merge(man_path_t **, man_path_t *);
56203831d35Sstevel static int	man_path_kstat_init(man_path_t *);
56303831d35Sstevel static void	man_path_kstat_uninit(man_path_t *);
56403831d35Sstevel /*
56503831d35Sstevel  * Functions supporting kstat reporting.
56603831d35Sstevel  */
56703831d35Sstevel static int	man_kstat_update(kstat_t *, int);
56803831d35Sstevel static void	man_do_kstats(man_work_t *);
56903831d35Sstevel static void	man_update_path_kstats(man_t *);
57003831d35Sstevel static void 	man_update_dev_kstats(kstat_named_t *, man_path_t *);
57103831d35Sstevel static void	man_sum_dests_kstats(kstat_named_t *, man_pg_t *);
57203831d35Sstevel static void	man_kstat_named_init(kstat_named_t *, int);
57303831d35Sstevel static int	man_kstat_byname(kstat_t *, char *, kstat_named_t *);
57403831d35Sstevel static void	man_sum_kstats(kstat_named_t *, kstat_t *, kstat_named_t *);
57503831d35Sstevel /*
57603831d35Sstevel  * Functions supporting ndd.
57703831d35Sstevel  */
57803831d35Sstevel static int	man_param_register(param_t *, int);
57903831d35Sstevel static int	man_pathgroups_report(queue_t *, mblk_t *, caddr_t, cred_t *);
58003831d35Sstevel static void	man_preport(man_path_t *, mblk_t *);
58103831d35Sstevel static int	man_set_active_path(queue_t *, mblk_t *, char *, caddr_t,
58203831d35Sstevel 			cred_t *);
58303831d35Sstevel static int	man_get_hostinfo(queue_t *, mblk_t *, caddr_t, cred_t *);
58403831d35Sstevel static char	*man_inet_ntoa(in_addr_t);
58503831d35Sstevel static int	man_param_get(queue_t *, mblk_t *, caddr_t, cred_t *);
58603831d35Sstevel static int	man_param_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *);
58703831d35Sstevel static  void    man_param_cleanup(void);
58803831d35Sstevel static  void    man_nd_free(caddr_t *nd_pparam);
58903831d35Sstevel /*
59003831d35Sstevel  * MAN SSC/Domain specific externs.
59103831d35Sstevel  */
59203831d35Sstevel extern int	man_get_iosram(manc_t *);
59303831d35Sstevel extern int	man_domain_configure(void);
59403831d35Sstevel extern int	man_domain_deconfigure(void);
59503831d35Sstevel extern int	man_dossc_switch(uint32_t);
59603831d35Sstevel extern int	man_is_on_domain;
59703831d35Sstevel 
59803831d35Sstevel /*
59903831d35Sstevel  * Driver Globals protected by inner perimeter.
60003831d35Sstevel  */
60103831d35Sstevel static manstr_t	*man_strup = NULL;	/* list of MAN STREAMS */
60203831d35Sstevel static caddr_t	man_ndlist = NULL;	/* head of ndd var list */
60303831d35Sstevel void		*man_softstate = NULL;
60403831d35Sstevel 
60503831d35Sstevel /*
60603831d35Sstevel  * Driver globals protected by man_lock.
60703831d35Sstevel  */
60803831d35Sstevel kmutex_t		man_lock;		/* lock protecting vars below */
60903831d35Sstevel static kthread_id_t	man_bwork_id = NULL;	/* background thread ID */
61003831d35Sstevel man_workq_t		*man_bwork_q;		/* bgthread work q */
61103831d35Sstevel man_workq_t		*man_iwork_q;		/* inner perim (uwsrv) work q */
61203831d35Sstevel static man_linkrec_t	*man_linkrec_head = NULL;	/* list of linkblks */
61303831d35Sstevel ldi_handle_t		man_ctl_lh = NULL;	/* MAN control handle */
61403831d35Sstevel queue_t			*man_ctl_wq = NULL;	/* MAN control rq */
61503831d35Sstevel static int		man_config_state = MAN_UNCONFIGURED;
61603831d35Sstevel static int		man_config_error = ENODEV;
61703831d35Sstevel 
61803831d35Sstevel /*
61903831d35Sstevel  * These parameters are accessed via ndd to report the link configuration
62003831d35Sstevel  * for the MAN driver. They can also be used to force configuration changes.
62103831d35Sstevel  */
62203831d35Sstevel #define	MAN_NOTUSR	0x0f000000
62303831d35Sstevel 
62403831d35Sstevel /* ------------------------------------------------------------------------- */
62503831d35Sstevel 
62603831d35Sstevel static  param_t	man_param_arr[] = {
62703831d35Sstevel 	/* min		max		value		name */
62803831d35Sstevel 	{  0,		0xFFFF,		0,		"man_debug_level"},
62903831d35Sstevel };
63003831d35Sstevel 
63103831d35Sstevel #define	MAN_NDD_GETABLE	1
63203831d35Sstevel #define	MAN_NDD_SETABLE	2
63303831d35Sstevel 
63403831d35Sstevel static  uint32_t	man_param_display[] = {
63503831d35Sstevel /* DISPLAY */
63603831d35Sstevel MAN_NDD_SETABLE,	/* man_debug_level */
63703831d35Sstevel };
63803831d35Sstevel 
63903831d35Sstevel /*
64003831d35Sstevel  * STREAMs information.
64103831d35Sstevel  */
64203831d35Sstevel static struct module_info man_m_info = {
64303831d35Sstevel 	MAN_IDNUM,			/* mi_idnum */
64403831d35Sstevel 	MAN_IDNAME,			/* mi_idname */
64503831d35Sstevel 	MAN_MINPSZ,			/* mi_minpsz */
64603831d35Sstevel 	MAN_MAXPSZ,			/* mi_maxpsz */
64703831d35Sstevel 	MAN_HIWAT,			/* mi_hiwat */
64803831d35Sstevel 	MAN_LOWAT			/* mi_lowat */
64903831d35Sstevel };
65003831d35Sstevel 
65103831d35Sstevel /*
65203831d35Sstevel  * Upper read queue does not do anything.
65303831d35Sstevel  */
65403831d35Sstevel static struct qinit man_urinit = {
65503831d35Sstevel 	NULL,				/* qi_putp */
65603831d35Sstevel 	NULL,				/* qi_srvp */
65703831d35Sstevel 	man_open,			/* qi_qopen */
65803831d35Sstevel 	man_close,			/* qi_qclose */
65903831d35Sstevel 	NULL,				/* qi_qadmin */
66003831d35Sstevel 	&man_m_info,			/* qi_minfo */
66103831d35Sstevel 	NULL				/* qi_mstat */
66203831d35Sstevel };
66303831d35Sstevel 
66403831d35Sstevel static struct qinit man_lrinit = {
66503831d35Sstevel 	man_lrput,			/* qi_putp */
66603831d35Sstevel 	man_lrsrv,			/* qi_srvp */
66703831d35Sstevel 	man_open,			/* qi_qopen */
66803831d35Sstevel 	man_close,			/* qi_qclose */
66903831d35Sstevel 	NULL,				/* qi_qadmin */
67003831d35Sstevel 	&man_m_info,			/* qi_minfo */
67103831d35Sstevel 	NULL				/* qi_mstat */
67203831d35Sstevel };
67303831d35Sstevel 
67403831d35Sstevel static struct qinit man_uwinit = {
67503831d35Sstevel 	man_uwput,			/* qi_putp */
67603831d35Sstevel 	man_uwsrv,			/* qi_srvp */
67703831d35Sstevel 	man_open,			/* qi_qopen */
67803831d35Sstevel 	man_close,			/* qi_qclose */
67903831d35Sstevel 	NULL,				/* qi_qadmin */
68003831d35Sstevel 	&man_m_info,			/* qi_minfo */
68103831d35Sstevel 	NULL				/* qi_mstat */
68203831d35Sstevel };
68303831d35Sstevel 
68403831d35Sstevel static struct qinit man_lwinit = {
68503831d35Sstevel 	NULL,				/* qi_putp */
68603831d35Sstevel 	man_lwsrv,			/* qi_srvp */
68703831d35Sstevel 	man_open,			/* qi_qopen */
68803831d35Sstevel 	man_close,			/* qi_qclose */
68903831d35Sstevel 	NULL,				/* qi_qadmin */
69003831d35Sstevel 	&man_m_info,			/* qi_minfo */
69103831d35Sstevel 	NULL				/* qi_mstat */
69203831d35Sstevel };
69303831d35Sstevel 
69403831d35Sstevel static struct streamtab man_maninfo = {
69503831d35Sstevel 	&man_urinit,			/* st_rdinit */
69603831d35Sstevel 	&man_uwinit,			/* st_wrinit */
69703831d35Sstevel 	&man_lrinit,			/* st_muxrinit */
69803831d35Sstevel 	&man_lwinit			/* st_muxwrinit */
69903831d35Sstevel };
70003831d35Sstevel 
70103831d35Sstevel 
70203831d35Sstevel /*
70303831d35Sstevel  * Module linkage information for the kernel.
70403831d35Sstevel  *
70503831d35Sstevel  * Locking Theory:
70603831d35Sstevel  * 	D_MTPERMOD -	Only an inner perimeter: All routines single
70703831d35Sstevel  * 			threaded (except put, see below).
70803831d35Sstevel  *	D_MTPUTSHARED -	Put routines enter inner perimeter shared (not
70903831d35Sstevel  *			exclusive) for concurrency/performance reasons.
71003831d35Sstevel  *
71103831d35Sstevel  *	Anyone who needs exclusive outer perimeter permission (changing
71203831d35Sstevel  *	global data structures) does so via qwriter() calls. The
71303831d35Sstevel  *	background thread does all his work outside of perimeter and
71403831d35Sstevel  *	submits work via qtimeout() when data structures need to be
71503831d35Sstevel  *	modified.
71603831d35Sstevel  */
71703831d35Sstevel 
71803831d35Sstevel #define	MAN_MDEV_FLAGS	(D_MP|D_MTPERMOD|D_MTPUTSHARED)
71903831d35Sstevel 
72003831d35Sstevel DDI_DEFINE_STREAM_OPS(man_ops, nulldev, nulldev, man_attach,
72119397407SSherry Moore     man_detach, nodev, man_info, MAN_MDEV_FLAGS, &man_maninfo,
72219397407SSherry Moore     ddi_quiesce_not_supported);
72303831d35Sstevel 
72403831d35Sstevel extern int nodev(), nulldev();
72503831d35Sstevel 
72603831d35Sstevel static struct modldrv modldrv = {
72703831d35Sstevel 	&mod_driverops, 	/* Module type.  This one is a pseudo driver */
72819397407SSherry Moore 	"MAN MetaDriver",
72903831d35Sstevel 	&man_ops,		/* driver ops */
73003831d35Sstevel };
73103831d35Sstevel 
73203831d35Sstevel static struct modlinkage modlinkage = {
73303831d35Sstevel 	MODREV_1,
73403831d35Sstevel 	(void *) &modldrv,
73503831d35Sstevel 	NULL
73603831d35Sstevel };
73703831d35Sstevel 
73803831d35Sstevel 
73903831d35Sstevel /* Virtual Driver loader entry points */
74003831d35Sstevel 
74103831d35Sstevel int
_init(void)74203831d35Sstevel _init(void)
74303831d35Sstevel {
74403831d35Sstevel 	int		status = DDI_FAILURE;
74503831d35Sstevel 
74603831d35Sstevel 	MAN_DBG(MAN_INIT, ("_init:"));
74703831d35Sstevel 
74803831d35Sstevel 	status = mod_install(&modlinkage);
74903831d35Sstevel 	if (status != 0) {
75003831d35Sstevel 		cmn_err(CE_WARN, "man_init: mod_install failed"
75103831d35Sstevel 		    " error = %d", status);
75203831d35Sstevel 		return (status);
75303831d35Sstevel 	}
75403831d35Sstevel 
75503831d35Sstevel 	status = ddi_soft_state_init(&man_softstate, sizeof (man_t), 4);
75603831d35Sstevel 	if (status != 0) {
75703831d35Sstevel 		cmn_err(CE_WARN, "man_init: ddi_soft_state_init failed"
75803831d35Sstevel 		    " error = %d", status);
759*07d06da5SSurya Prakki 		(void) mod_remove(&modlinkage);
76003831d35Sstevel 		return (status);
76103831d35Sstevel 	}
76203831d35Sstevel 
76303831d35Sstevel 	man_bwork_q = man_kzalloc(sizeof (man_workq_t), KM_SLEEP);
76403831d35Sstevel 	man_iwork_q = man_kzalloc(sizeof (man_workq_t), KM_SLEEP);
76503831d35Sstevel 
76603831d35Sstevel 	mutex_init(&man_lock, NULL, MUTEX_DRIVER, NULL);
76703831d35Sstevel 	cv_init(&man_bwork_q->q_cv, NULL, CV_DRIVER, NULL);
76803831d35Sstevel 	cv_init(&man_iwork_q->q_cv, NULL, CV_DRIVER, NULL);
76903831d35Sstevel 
77003831d35Sstevel 	return (0);
77103831d35Sstevel }
77203831d35Sstevel 
77303831d35Sstevel /*
77403831d35Sstevel  * _info is called by modinfo().
77503831d35Sstevel  */
77603831d35Sstevel int
_info(struct modinfo * modinfop)77703831d35Sstevel _info(struct modinfo *modinfop)
77803831d35Sstevel {
77903831d35Sstevel 	int	status;
78003831d35Sstevel 
78103831d35Sstevel 	MAN_DBG(MAN_INIT, ("_info:"));
78203831d35Sstevel 
78303831d35Sstevel 	status = mod_info(&modlinkage, modinfop);
78403831d35Sstevel 
78503831d35Sstevel 	MAN_DBG(MAN_INIT, ("_info: returns %d", status));
78603831d35Sstevel 
78703831d35Sstevel 	return (status);
78803831d35Sstevel }
78903831d35Sstevel 
79003831d35Sstevel /*
79103831d35Sstevel  * _fini called by modunload() just before driver is unloaded from memory.
79203831d35Sstevel  */
79303831d35Sstevel int
_fini(void)79403831d35Sstevel _fini(void)
79503831d35Sstevel {
79603831d35Sstevel 	int status = 0;
79703831d35Sstevel 
79803831d35Sstevel 	MAN_DBG(MAN_INIT, ("_fini:"));
79903831d35Sstevel 
80003831d35Sstevel 
80103831d35Sstevel 	/*
80203831d35Sstevel 	 * The only upper stream left should be man_ctl_lh. Note that
80303831d35Sstevel 	 * man_close (upper stream) is synchronous (i.e. it waits for
80403831d35Sstevel 	 * all STREAMS framework associated with the upper stream to be
80503831d35Sstevel 	 * torn down). This guarantees that man_ctl_lh will never become
80603831d35Sstevel 	 * NULL until noone is around to notice. This assumption is made
80703831d35Sstevel 	 * in a few places like man_plumb, man_unplumb, etc.
80803831d35Sstevel 	 */
80903831d35Sstevel 	if (man_strup && (man_strup->ms_next != NULL))
81003831d35Sstevel 		return (EBUSY);
81103831d35Sstevel 
81203831d35Sstevel 	/*
81303831d35Sstevel 	 * Deconfigure the driver.
81403831d35Sstevel 	 */
81503831d35Sstevel 	status = man_deconfigure();
81603831d35Sstevel 	if (status)
81703831d35Sstevel 		goto exit;
81803831d35Sstevel 
81903831d35Sstevel 	/*
82003831d35Sstevel 	 * need to detach every instance of the driver
82103831d35Sstevel 	 */
82203831d35Sstevel 	status = mod_remove(&modlinkage);
82303831d35Sstevel 	if (status != 0)
82403831d35Sstevel 		goto exit;
82503831d35Sstevel 
82603831d35Sstevel 	ddi_soft_state_fini(&man_softstate);
82703831d35Sstevel 
82803831d35Sstevel 	/*
82903831d35Sstevel 	 * Free up locks.
83003831d35Sstevel 	 */
83103831d35Sstevel 	mutex_destroy(&man_lock);
83203831d35Sstevel 	cv_destroy(&man_bwork_q->q_cv);
83303831d35Sstevel 	cv_destroy(&man_iwork_q->q_cv);
83403831d35Sstevel 
83503831d35Sstevel 	man_kfree(man_bwork_q, sizeof (man_workq_t));
83603831d35Sstevel 	man_kfree(man_iwork_q, sizeof (man_workq_t));
83703831d35Sstevel 
83803831d35Sstevel exit:
83903831d35Sstevel 
84003831d35Sstevel 	MAN_DBG(MAN_INIT, ("_fini: returns %d", status));
84103831d35Sstevel 
84203831d35Sstevel 	return (status);
84303831d35Sstevel }
84403831d35Sstevel 
84503831d35Sstevel /*
84603831d35Sstevel  * Deconfigure the MAN driver.
84703831d35Sstevel  */
84803831d35Sstevel static int
man_deconfigure()84903831d35Sstevel man_deconfigure()
85003831d35Sstevel {
85103831d35Sstevel 	man_work_t	*wp;
85203831d35Sstevel 	int		status = 0;
85303831d35Sstevel 
85403831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_deconfigure:\n"));
85503831d35Sstevel 
85603831d35Sstevel 	mutex_enter(&man_lock);
85703831d35Sstevel 
85803831d35Sstevel 	if (man_is_on_domain) {
85903831d35Sstevel 		status = man_domain_deconfigure();
86003831d35Sstevel 		if (status != 0)
86103831d35Sstevel 			goto exit;
86203831d35Sstevel 	}
86303831d35Sstevel 
86403831d35Sstevel 	man_param_cleanup();	/* Free up NDD resources */
86503831d35Sstevel 
86603831d35Sstevel 	/*
86703831d35Sstevel 	 * I may have to handle straggling work requests. Just qwait?
86803831d35Sstevel 	 * or cvwait? Called from _fini - TBD
86903831d35Sstevel 	 */
87003831d35Sstevel 	ASSERT(man_bwork_q->q_work == NULL);
87103831d35Sstevel 	ASSERT(man_iwork_q->q_work == NULL);
87203831d35Sstevel 
87303831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_deconfigure: submitting CLOSE_CTL\n"));
87403831d35Sstevel 
87503831d35Sstevel 	if (man_ctl_lh != NULL) {
87603831d35Sstevel 		wp = man_work_alloc(MAN_WORK_CLOSE_CTL, KM_SLEEP);
87703831d35Sstevel 		wp->mw_flags = MAN_WFLAGS_CVWAITER;
87803831d35Sstevel 		man_work_add(man_bwork_q, wp);
87903831d35Sstevel 
88003831d35Sstevel 		while (!(wp->mw_flags & MAN_WFLAGS_DONE)) {
88103831d35Sstevel 			cv_wait(&wp->mw_cv, &man_lock);
88203831d35Sstevel 		}
88303831d35Sstevel 		man_work_free(wp);
88403831d35Sstevel 	}
88503831d35Sstevel 
88603831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_deconfigure: submitting STOP\n"));
88703831d35Sstevel 	if (man_bwork_id != NULL) {
88803831d35Sstevel 
88903831d35Sstevel 		wp = man_work_alloc(MAN_WORK_STOP, KM_SLEEP);
89003831d35Sstevel 		wp->mw_flags = MAN_WFLAGS_CVWAITER;
89103831d35Sstevel 		man_work_add(man_bwork_q, wp);
89203831d35Sstevel 
89303831d35Sstevel 		while (!(wp->mw_flags & MAN_WFLAGS_DONE)) {
89403831d35Sstevel 			cv_wait(&wp->mw_cv, &man_lock);
89503831d35Sstevel 		}
89603831d35Sstevel 		man_work_free(wp);
89703831d35Sstevel 	}
89803831d35Sstevel 	man_config_state = MAN_UNCONFIGURED;
89903831d35Sstevel 
90003831d35Sstevel exit:
90103831d35Sstevel 	mutex_exit(&man_lock);
90203831d35Sstevel 
90303831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_deconfigure: returns %d\n", status));
90403831d35Sstevel 
90503831d35Sstevel 	return (status);
90603831d35Sstevel }
90703831d35Sstevel 
90803831d35Sstevel /*
90903831d35Sstevel  * man_attach - allocate resources and attach an instance of the MAN driver
91003831d35Sstevel  * The <man>.conf file controls how many instances of the MAN driver are
91103831d35Sstevel  * available.
91203831d35Sstevel  *
91303831d35Sstevel  *	dip - devinfo of node
91403831d35Sstevel  * 	cmd - one of DDI_ATTACH | DDI_RESUME
91503831d35Sstevel  *
91603831d35Sstevel  *	returns	- success - DDI_SUCCESS
91703831d35Sstevel  *		- failure - DDI_FAILURE
91803831d35Sstevel  */
91903831d35Sstevel static int
man_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)92003831d35Sstevel man_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
92103831d35Sstevel {
92203831d35Sstevel 	man_t		*manp;		/* per instance data */
92303831d35Sstevel 	uchar_t		flag = KSTAT_FLAG_WRITABLE; /* support netstat -kc */
92403831d35Sstevel 	kstat_t		*ksp;
92503831d35Sstevel 	int		minor_node_created = 0;
92603831d35Sstevel 	int		instance;
92703831d35Sstevel 	eaddr_t		man_eaddr;
92803831d35Sstevel 
92903831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_attach: \n"));
93003831d35Sstevel 
93103831d35Sstevel 	if (cmd != DDI_ATTACH) {
93203831d35Sstevel 		MAN_DBG(MAN_INIT, ("man_attach: bad command %d\n", cmd));
93303831d35Sstevel 		return (DDI_FAILURE);
93403831d35Sstevel 	}
93503831d35Sstevel 
93603831d35Sstevel 	if (man_get_our_etheraddr(&man_eaddr))
93703831d35Sstevel 		return (DDI_FAILURE);
93803831d35Sstevel 
93903831d35Sstevel 	instance = ddi_get_instance(dip);
94003831d35Sstevel 
94103831d35Sstevel 	/*
94203831d35Sstevel 	 * we assume that instance is always equal to zero.
94303831d35Sstevel 	 * and there will always only be one instance.
94403831d35Sstevel 	 * this is done because when dman opens itself via DMAN_INT_PATH,
94503831d35Sstevel 	 * the path assumes that the instance number is zero.
94603831d35Sstevel 	 * if we ever need to support multiple instances of the dman
94703831d35Sstevel 	 * driver or non-zero instances, this will have to change.
94803831d35Sstevel 	 */
94903831d35Sstevel 	ASSERT(instance == 0);
95003831d35Sstevel 
95103831d35Sstevel 	/*
95203831d35Sstevel 	 * Allocate per device info pointer and link in to global list of
95303831d35Sstevel 	 * MAN devices.
95403831d35Sstevel 	 */
95503831d35Sstevel 	if ((ddi_soft_state_zalloc(man_softstate, instance) != DDI_SUCCESS) ||
95603831d35Sstevel 	    ((manp = ddi_get_soft_state(man_softstate, instance)) == NULL)) {
95703831d35Sstevel 		cmn_err(CE_WARN, "man_attach: cannot zalloc soft state!");
95803831d35Sstevel 		return (DDI_FAILURE);
95903831d35Sstevel 	}
96003831d35Sstevel 
96103831d35Sstevel 	ddi_set_driver_private(dip, manp);
96203831d35Sstevel 	manp->man_dip = dip;
9635c066ec2SJerry Gilliam 	manp->man_meta_major = ddi_driver_major(dip);
96403831d35Sstevel 	manp->man_meta_ppa = instance;
96503831d35Sstevel 
96603831d35Sstevel 	/*
96703831d35Sstevel 	 * Set ethernet address. Note that this address is duplicated
96803831d35Sstevel 	 * at md_src_eaddr.
96903831d35Sstevel 	 */
97003831d35Sstevel 	ether_copy(&man_eaddr, &manp->man_eaddr);
97103831d35Sstevel 	manp->man_eaddr_v = 1;
97203831d35Sstevel 
97303831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_attach: set ether to %s",
97403831d35Sstevel 	    ether_sprintf(&manp->man_eaddr)));
97503831d35Sstevel 
97603831d35Sstevel 	/*
97703831d35Sstevel 	 * Initialize failover-related fields (timers and such),
97803831d35Sstevel 	 * taking values from properties if present.
97903831d35Sstevel 	 */
98003831d35Sstevel 	manp->man_init_time = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
98103831d35Sstevel 	    "init_time", MAN_INIT_TIME);
98203831d35Sstevel 
98303831d35Sstevel 	manp->man_linkcheck_time = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
98403831d35Sstevel 	    "linkcheck_time", MAN_LINKCHECK_TIME);
98503831d35Sstevel 
98603831d35Sstevel 	manp->man_linkstale_time = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
98703831d35Sstevel 	    "man_linkstale_time", MAN_LINKSTALE_TIME);
98803831d35Sstevel 
98903831d35Sstevel 	manp->man_linkstale_retries = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
99003831d35Sstevel 	    "man_linkstale_retries", MAN_LINKSTALE_RETRIES);
99103831d35Sstevel 
99203831d35Sstevel 	manp->man_dr_delay = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
99303831d35Sstevel 	    "man_dr_delay", MAN_DR_DELAY);
99403831d35Sstevel 
99503831d35Sstevel 	manp->man_dr_retries = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
99603831d35Sstevel 	    "man_dr_retries", MAN_DR_RETRIES);
99703831d35Sstevel 
99803831d35Sstevel 	manp->man_kstat_waittime = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
99903831d35Sstevel 	    "man_kstat_waittime", MAN_KSTAT_WAITTIME);
100003831d35Sstevel 
100103831d35Sstevel 	manp->man_dlpireset_time = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
100203831d35Sstevel 	    "man_dlpireset_time", MAN_DLPIRESET_TIME);
100303831d35Sstevel 
100403831d35Sstevel 	if (ddi_create_internal_pathname(dip, MAN_IDNAME, S_IFCHR,
100503831d35Sstevel 	    ddi_get_instance(dip)) == DDI_SUCCESS) {
100603831d35Sstevel 		minor_node_created = 1;
100703831d35Sstevel 	} else {
100803831d35Sstevel 		cmn_err(CE_WARN, "man_attach: failed for instance %d",
100903831d35Sstevel 		    ddi_get_instance(dip));
101003831d35Sstevel 		goto exit;
101103831d35Sstevel 	}
101203831d35Sstevel 
101303831d35Sstevel 	if (ddi_create_minor_node(dip, MAN_IDNAME, S_IFCHR,
101403831d35Sstevel 	    ddi_get_instance(dip), DDI_NT_NET, CLONE_DEV) == DDI_SUCCESS) {
101503831d35Sstevel 		minor_node_created = 1;
101603831d35Sstevel 	} else {
101703831d35Sstevel 		cmn_err(CE_WARN, "man_attach: failed for instance %d",
101803831d35Sstevel 		    ddi_get_instance(dip));
101903831d35Sstevel 		goto exit;
102003831d35Sstevel 	}
102103831d35Sstevel 
102203831d35Sstevel 	/*
102303831d35Sstevel 	 * Allocate meta kstat_t for this instance of the driver.
102403831d35Sstevel 	 * Note that each of man_path_t keeps track of the kstats
102503831d35Sstevel 	 * for the real devices via mp_last_knp.
102603831d35Sstevel 	 */
102703831d35Sstevel #ifdef	kstat
102803831d35Sstevel 	flag |= KSTAT_FLAG_PERSISTENT;
102903831d35Sstevel #endif
103003831d35Sstevel 	ksp = kstat_create(MAN_IDNAME, ddi_get_instance(dip), NULL, "net",
103103831d35Sstevel 	    KSTAT_TYPE_NAMED, MAN_NUMSTATS, flag);
103203831d35Sstevel 
103303831d35Sstevel 	if (ksp == NULL) {
103403831d35Sstevel 		cmn_err(CE_WARN, "man_attach(%d): kstat_create failed"
103503831d35Sstevel 		    " - manp(0x%p)", manp->man_meta_ppa,
103603831d35Sstevel 		    (void *)manp);
103703831d35Sstevel 		goto exit;
103803831d35Sstevel 	}
103903831d35Sstevel 
104003831d35Sstevel 	man_kstat_named_init(ksp->ks_data, MAN_NUMSTATS);
104103831d35Sstevel 	ksp->ks_update = man_kstat_update;
104203831d35Sstevel 	ksp->ks_private = (void *) manp;
104303831d35Sstevel 	manp->man_ksp = ksp;
104403831d35Sstevel 	kstat_install(manp->man_ksp);
104503831d35Sstevel 
104603831d35Sstevel 	ddi_report_dev(dip);
104703831d35Sstevel 
104803831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_attach(%d) returns DDI_SUCCESS",
104903831d35Sstevel 	    ddi_get_instance(dip)));
105003831d35Sstevel 
105103831d35Sstevel 	return (DDI_SUCCESS);
105203831d35Sstevel 
105303831d35Sstevel exit:
105403831d35Sstevel 	if (minor_node_created)
105503831d35Sstevel 		ddi_remove_minor_node(dip, NULL);
105603831d35Sstevel 	ddi_set_driver_private(dip, NULL);
105703831d35Sstevel 	ddi_soft_state_free(man_softstate, instance);
105803831d35Sstevel 
105903831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_attach(%d) eaddr returns DDI_FAILIRE",
106003831d35Sstevel 	    ddi_get_instance(dip)));
106103831d35Sstevel 
106203831d35Sstevel 	return (DDI_FAILURE);
106303831d35Sstevel 
106403831d35Sstevel }
106503831d35Sstevel 
106603831d35Sstevel static int
man_get_our_etheraddr(eaddr_t * eap)106703831d35Sstevel man_get_our_etheraddr(eaddr_t *eap)
106803831d35Sstevel {
106903831d35Sstevel 	manc_t	manc;
107003831d35Sstevel 	int	status = 0;
107103831d35Sstevel 
107203831d35Sstevel 	if (man_is_on_domain) {
107303831d35Sstevel 		if (status = man_get_iosram(&manc))
107403831d35Sstevel 			return (status);
107503831d35Sstevel 		ether_copy(&manc.manc_dom_eaddr, eap);
107603831d35Sstevel 	} else {
107703831d35Sstevel 		(void) localetheraddr((struct ether_addr *)NULL, eap);
107803831d35Sstevel 	}
107903831d35Sstevel 
108003831d35Sstevel 	return (status);
108103831d35Sstevel }
108203831d35Sstevel 
108303831d35Sstevel /*
108403831d35Sstevel  * man_detach - detach an instance of a driver
108503831d35Sstevel  *
108603831d35Sstevel  *	dip - devinfo of node
108703831d35Sstevel  * 	cmd - one of DDI_DETACH | DDI_SUSPEND
108803831d35Sstevel  *
108903831d35Sstevel  *	returns	- success - DDI_SUCCESS
109003831d35Sstevel  *		- failure - DDI_FAILURE
109103831d35Sstevel  */
109203831d35Sstevel static int
man_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)109303831d35Sstevel man_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
109403831d35Sstevel {
109503831d35Sstevel 	register man_t	*manp;		/* per instance data */
109603831d35Sstevel 	int		instance;
109703831d35Sstevel 
109803831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_detach(%d):\n", ddi_get_instance(dip)));
109903831d35Sstevel 
110003831d35Sstevel 	if (cmd != DDI_DETACH) {
110103831d35Sstevel 		MAN_DBG(MAN_INIT, ("man_detach: bad command %d\n", cmd));
110203831d35Sstevel 		return (DDI_FAILURE);
110303831d35Sstevel 	}
110403831d35Sstevel 
110503831d35Sstevel 	if (dip == NULL) {
110603831d35Sstevel 		MAN_DBG(MAN_INIT, ("man_detach: dip == NULL\n"));
110703831d35Sstevel 		return (DDI_FAILURE);
110803831d35Sstevel 	}
110903831d35Sstevel 
111003831d35Sstevel 	instance = ddi_get_instance(dip);
111103831d35Sstevel 
111203831d35Sstevel 	mutex_enter(&man_lock);
111303831d35Sstevel 
111403831d35Sstevel 	manp = (man_t *)ddi_get_soft_state(man_softstate, instance);
111503831d35Sstevel 	if (manp == NULL) {
111603831d35Sstevel 		mutex_exit(&man_lock);
111703831d35Sstevel 
111803831d35Sstevel 		cmn_err(CE_WARN, "man_detach: unable to get softstate"
111903831d35Sstevel 		    " for instance = %d, dip = 0x%p!\n", instance,
112003831d35Sstevel 		    (void *)dip);
112103831d35Sstevel 		return (DDI_FAILURE);
112203831d35Sstevel 	}
112303831d35Sstevel 
112403831d35Sstevel 	if (manp->man_refcnt != 0) {
112503831d35Sstevel 		mutex_exit(&man_lock);
112603831d35Sstevel 
112703831d35Sstevel 		cmn_err(CE_WARN, "man_detach: %s%d refcnt %d", MAN_IDNAME,
112803831d35Sstevel 		    instance, manp->man_refcnt);
112903831d35Sstevel 		MAN_DBGCALL(MAN_INIT, man_print_man(manp));
113003831d35Sstevel 
113103831d35Sstevel 		return (DDI_FAILURE);
113203831d35Sstevel 	}
113303831d35Sstevel 
113403831d35Sstevel 	ddi_remove_minor_node(dip, NULL);
113503831d35Sstevel 
113603831d35Sstevel 	mutex_exit(&man_lock);
113703831d35Sstevel 
113803831d35Sstevel 	kstat_delete(manp->man_ksp);
113903831d35Sstevel 	ddi_soft_state_free(man_softstate, instance);
114003831d35Sstevel 	ddi_set_driver_private(dip, NULL);
114103831d35Sstevel 
114203831d35Sstevel 	MAN_DBG(MAN_INIT, ("man_detach returns DDI_SUCCESS"));
114303831d35Sstevel 
114403831d35Sstevel 	return (DDI_SUCCESS);
114503831d35Sstevel }
114603831d35Sstevel 
114703831d35Sstevel /*
114803831d35Sstevel  * man_info:
114903831d35Sstevel  *	As a standard DLPI style-2, man_info() should always return
115003831d35Sstevel  *	DDI_FAILURE.
115103831d35Sstevel  *
115203831d35Sstevel  *	However, man_open() has special treatment for a direct open
115303831d35Sstevel  *	via kstr_open() without going through the CLONE driver.
115403831d35Sstevel  *	To make this special kstr_open() work, we need to map
115503831d35Sstevel  *	minor of 0 to instance 0.
115603831d35Sstevel  */
115703831d35Sstevel /*ARGSUSED*/
115803831d35Sstevel static int
man_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)115903831d35Sstevel man_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
116003831d35Sstevel {
116103831d35Sstevel 	minor_t minor;
116203831d35Sstevel 
116303831d35Sstevel 	switch (infocmd) {
116403831d35Sstevel 	case DDI_INFO_DEVT2DEVINFO:
116503831d35Sstevel 		break;
116603831d35Sstevel 
116703831d35Sstevel 	case DDI_INFO_DEVT2INSTANCE:
116803831d35Sstevel 		minor = getminor((dev_t)arg);
116903831d35Sstevel 		if (minor == 0) {
117003831d35Sstevel 			*result = (void *)(uintptr_t)minor;
117103831d35Sstevel 			return (DDI_SUCCESS);
117203831d35Sstevel 		}
117303831d35Sstevel 		break;
117403831d35Sstevel 	default:
117503831d35Sstevel 		break;
117603831d35Sstevel 	}
117703831d35Sstevel 	return (DDI_FAILURE);
117803831d35Sstevel }
117903831d35Sstevel 
118003831d35Sstevel /* Standard Device Driver entry points */
118103831d35Sstevel 
118203831d35Sstevel /*
118303831d35Sstevel  * man_open - open the device
118403831d35Sstevel  *
118503831d35Sstevel  *	rq - upper read queue of the stream
118603831d35Sstevel  *	devp - pointer to a device number
118703831d35Sstevel  *	flag - information passed from the user program open(2) system call
118803831d35Sstevel  *	sflag - stream flags
118903831d35Sstevel  *	credp - pointer to the cred(9S) user credential structure
119003831d35Sstevel  *
119103831d35Sstevel  *	returns	- success - 0
119203831d35Sstevel  *		- failure - errno value for failure
119303831d35Sstevel  */
119403831d35Sstevel /*ARGSUSED*/
119503831d35Sstevel static int
man_open(queue_t * rq,dev_t * devp,int flag,int sflag,cred_t * credp)119603831d35Sstevel man_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
119703831d35Sstevel {
119803831d35Sstevel 	int			minordev = -1;
119903831d35Sstevel 	manstr_t		*msp;
120003831d35Sstevel 	manstr_t		*tsp;
120103831d35Sstevel 	manstr_t		**prevmsp;
120203831d35Sstevel 	int			status = 0;
120303831d35Sstevel 
120403831d35Sstevel 	MAN_DBG(MAN_OCLOSE, ("man_open: rq(0x%p) sflag(0x%x)\n",
120503831d35Sstevel 	    (void *)rq, sflag));
120603831d35Sstevel 
120703831d35Sstevel 	ASSERT(rq);
120803831d35Sstevel 	ASSERT(sflag != MODOPEN);
120903831d35Sstevel 
121003831d35Sstevel 	/*
121103831d35Sstevel 	 * reopen; q_ptr set to msp at open completion.
121203831d35Sstevel 	 */
121303831d35Sstevel 	if (rq->q_ptr) {
121403831d35Sstevel 		return (0);
121503831d35Sstevel 	}
121603831d35Sstevel 
121703831d35Sstevel 	/*
121803831d35Sstevel 	 * Allocate and initialize manstr_t for this device.
121903831d35Sstevel 	 */
122003831d35Sstevel 	msp = man_kzalloc(sizeof (manstr_t), KM_SLEEP);
122103831d35Sstevel 	SETSTATE(msp, DL_UNATTACHED);
122203831d35Sstevel 	msp->ms_meta_ppa = -1;
122303831d35Sstevel 	msp->ms_rq = rq;
122403831d35Sstevel 	rq->q_ptr = WR(rq)->q_ptr = msp;
122503831d35Sstevel 
122603831d35Sstevel 	/*
122703831d35Sstevel 	 * Get the MAN driver configured on 1st open.  Note that the only way
122803831d35Sstevel 	 * we get sflag != CLONEOPEN is via the call in man_plumbctl().  All
122903831d35Sstevel 	 * CLONEOPEN calls to man_open will be via the file system
123003831d35Sstevel 	 * device node /dev/man, a pseudo clone device.
123103831d35Sstevel 	 */
123203831d35Sstevel 
123303831d35Sstevel 	qprocson(rq);
123403831d35Sstevel 
123503831d35Sstevel 	if (sflag == CLONEOPEN && man_config_state != MAN_CONFIGURED) {
123603831d35Sstevel 		/*
123703831d35Sstevel 		 * First open calls man_configure. Everyone qwaits until
123803831d35Sstevel 		 * we get it open. See man_open_ctl() comments for mutex
123903831d35Sstevel 		 * lock/synchronization info.
124003831d35Sstevel 		 */
124103831d35Sstevel 
124203831d35Sstevel 		mutex_enter(&man_lock);
124303831d35Sstevel 
124403831d35Sstevel 		if (man_config_state == MAN_UNCONFIGURED) {
124503831d35Sstevel 			man_config_state = MAN_CONFIGURING;
124603831d35Sstevel 			mutex_exit(&man_lock);
124703831d35Sstevel 			status = man_configure(rq);
124803831d35Sstevel 			if (status != 0)
124903831d35Sstevel 				goto exit;
125003831d35Sstevel 		} else {
125103831d35Sstevel 			while (man_config_state == MAN_CONFIGURING) {
125203831d35Sstevel 
125303831d35Sstevel 				mutex_exit(&man_lock);
125403831d35Sstevel 				status = qwait_sig(rq);
125503831d35Sstevel 
125603831d35Sstevel 				if (status == 0) {
125703831d35Sstevel 					status = EINTR;
125803831d35Sstevel 					goto exit;
125903831d35Sstevel 				}
126003831d35Sstevel 
126103831d35Sstevel 				mutex_enter(&man_lock);
126203831d35Sstevel 			}
126303831d35Sstevel 			mutex_exit(&man_lock);
126403831d35Sstevel 
126503831d35Sstevel 			if (man_config_error) {
126603831d35Sstevel 				status = man_config_error;
126703831d35Sstevel 				goto exit;
126803831d35Sstevel 			}
126903831d35Sstevel 		}
127003831d35Sstevel 	}
127103831d35Sstevel 
127203831d35Sstevel 	/*
127303831d35Sstevel 	 * Determine minor device number. man_open serialized by
127403831d35Sstevel 	 * D_MTPERMOD.
127503831d35Sstevel 	 */
127603831d35Sstevel 	prevmsp = &man_strup;
127703831d35Sstevel 	if (sflag == CLONEOPEN) {
127803831d35Sstevel 
127903831d35Sstevel 		minordev = 0;
128003831d35Sstevel 		for (; (tsp = *prevmsp) != NULL; prevmsp = &tsp->ms_next) {
128103831d35Sstevel 			if (minordev < tsp->ms_minor)
128203831d35Sstevel 				break;
128303831d35Sstevel 			minordev++;
128403831d35Sstevel 		}
128503831d35Sstevel 		*devp = makedevice(getmajor(*devp), minordev);
128603831d35Sstevel 
128703831d35Sstevel 	} else {
128803831d35Sstevel 		/*
128903831d35Sstevel 		 * Should only get here from man_plumbctl().
129003831d35Sstevel 		 */
129103831d35Sstevel 		/*LINTED E_ASSIGN_UINT_TO_SIGNED_INT*/
129203831d35Sstevel 		minordev = getminor(*devp);
129303831d35Sstevel 
129403831d35Sstevel 		/*
129503831d35Sstevel 		 * No need to protect this here as all opens are
129603831d35Sstevel 		 * qwaiting, and the bgthread (who is doing this open)
129703831d35Sstevel 		 * is the only one who mucks with this variable.
129803831d35Sstevel 		 */
129903831d35Sstevel 		man_ctl_wq = WR(rq);
130003831d35Sstevel 
130103831d35Sstevel 		ASSERT(minordev == 0);	/* TBD delete this */
130203831d35Sstevel 	}
130303831d35Sstevel 
130403831d35Sstevel 	msp->ms_meta_maj = getmajor(*devp);
130503831d35Sstevel 	msp->ms_minor = minordev;
130603831d35Sstevel 	if (minordev == 0)
130703831d35Sstevel 		msp->ms_flags = MAN_SFLAG_CONTROL;
130803831d35Sstevel 
130903831d35Sstevel 	/*
131003831d35Sstevel 	 * Link new entry into global list of active entries.
131103831d35Sstevel 	 */
131203831d35Sstevel 	msp->ms_next = *prevmsp;
131303831d35Sstevel 	*prevmsp = msp;
131403831d35Sstevel 
131503831d35Sstevel 
131603831d35Sstevel 	/*
131703831d35Sstevel 	 * Disable automatic enabling of our write service procedure.
131803831d35Sstevel 	 * We control this explicitly.
131903831d35Sstevel 	 */
132003831d35Sstevel 	noenable(WR(rq));
132103831d35Sstevel 
132203831d35Sstevel exit:
132303831d35Sstevel 	MAN_DBG(MAN_OCLOSE, ("man_open: exit rq(0x%p) minor %d errno %d\n",
132403831d35Sstevel 	    (void *)rq, minordev, status));
132503831d35Sstevel 
132603831d35Sstevel 	/*
132703831d35Sstevel 	 * Clean up on error.
132803831d35Sstevel 	 */
132903831d35Sstevel 	if (status) {
133003831d35Sstevel 		qprocsoff(rq);
133103831d35Sstevel 		rq->q_ptr = WR(rq)->q_ptr = NULL;
133203831d35Sstevel 		man_kfree((char *)msp, sizeof (manstr_t));
133303831d35Sstevel 	} else
133403831d35Sstevel 		(void) qassociate(rq, -1);
133503831d35Sstevel 
133603831d35Sstevel 	return (status);
133703831d35Sstevel }
133803831d35Sstevel 
133903831d35Sstevel /*
134003831d35Sstevel  * Get the driver configured.  Called from first man_open with exclusive
134103831d35Sstevel  * inner perimeter.
134203831d35Sstevel  */
134303831d35Sstevel static int
man_configure(queue_t * rq)134403831d35Sstevel man_configure(queue_t *rq)
134503831d35Sstevel {
134603831d35Sstevel 	man_work_t	*wp;
134703831d35Sstevel 	int		status = 0;
134803831d35Sstevel 
134903831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_configure:"));
135003831d35Sstevel 
135103831d35Sstevel 	/*
135203831d35Sstevel 	 * Initialize NDD parameters.
135303831d35Sstevel 	 */
135403831d35Sstevel 	if (!man_ndlist &&
135503831d35Sstevel 	    !man_param_register(man_param_arr, A_CNT(man_param_arr))) {
135603831d35Sstevel 		cmn_err(CE_WARN, "man_configure: man_param_register failed!");
135703831d35Sstevel 		man_config_error = ENOMEM;
135803831d35Sstevel 		goto exit;
135903831d35Sstevel 	}
136003831d35Sstevel 
136103831d35Sstevel 	mutex_enter(&man_lock);
136203831d35Sstevel 
136303831d35Sstevel 	/*
136403831d35Sstevel 	 * Start up background thread.
136503831d35Sstevel 	 */
136603831d35Sstevel 	if (man_bwork_id == NULL)
136703831d35Sstevel 		man_bwork_id = thread_create(NULL, 2 * DEFAULTSTKSZ,
136803831d35Sstevel 		    man_bwork, NULL, 0, &p0, TS_RUN, minclsyspri);
136903831d35Sstevel 
137003831d35Sstevel 	/*
137103831d35Sstevel 	 * Submit work to get control stream opened. Qwait until its
137203831d35Sstevel 	 * done. See man_open_ctl for mutex lock/synchronization info.
137303831d35Sstevel 	 */
137403831d35Sstevel 
137503831d35Sstevel 	if (man_ctl_lh == NULL) {
137603831d35Sstevel 		wp = man_work_alloc(MAN_WORK_OPEN_CTL, KM_SLEEP);
137703831d35Sstevel 		wp->mw_flags |= MAN_WFLAGS_QWAITER;
137803831d35Sstevel 		wp->mw_q = WR(rq);
137903831d35Sstevel 
138003831d35Sstevel 		/*
138103831d35Sstevel 		 * Submit work and wait. When man_open_ctl exits
138203831d35Sstevel 		 * man_open, it will cause qwait below to return.
138303831d35Sstevel 		 */
138403831d35Sstevel 		man_work_add(man_bwork_q, wp);
138503831d35Sstevel 		while (!(wp->mw_flags & MAN_WFLAGS_DONE)) {
138603831d35Sstevel 			mutex_exit(&man_lock);
138703831d35Sstevel 			qwait(rq);
138803831d35Sstevel 			mutex_enter(&man_lock);
138903831d35Sstevel 		}
139003831d35Sstevel 		status = wp->mw_status;
139103831d35Sstevel 		man_work_free(wp);
139203831d35Sstevel 
139303831d35Sstevel 	}
139403831d35Sstevel 	mutex_exit(&man_lock);
139503831d35Sstevel 
139603831d35Sstevel 	/*
139703831d35Sstevel 	 * If on domain, setup IOSRAM and build the pathgroups
139803831d35Sstevel 	 * automatically.
139903831d35Sstevel 	 */
140003831d35Sstevel 	if ((status == 0) && man_is_on_domain)
140103831d35Sstevel 		status = man_domain_configure();
140203831d35Sstevel 
140303831d35Sstevel exit:
140403831d35Sstevel 	mutex_enter(&man_lock);
140503831d35Sstevel 
140603831d35Sstevel 	man_config_error = status;
140703831d35Sstevel 	if (status != 0)
140803831d35Sstevel 		man_config_state = MAN_UNCONFIGURED;
140903831d35Sstevel 	else
141003831d35Sstevel 		man_config_state = MAN_CONFIGURED;
141103831d35Sstevel 
141203831d35Sstevel 	mutex_exit(&man_lock);
141303831d35Sstevel 
141403831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_configure: returns %d\n", status));
141503831d35Sstevel 
141603831d35Sstevel 	return (status);
141703831d35Sstevel }
141803831d35Sstevel 
141903831d35Sstevel /*
142003831d35Sstevel  * man_close - close the device
142103831d35Sstevel  *
142203831d35Sstevel  *	rq - upper read queue of the stream
142303831d35Sstevel  *
142403831d35Sstevel  *	returns	- success - 0
142503831d35Sstevel  *		- failure - errno value for failure
142603831d35Sstevel  */
142703831d35Sstevel static int
man_close(queue_t * rq)142803831d35Sstevel man_close(queue_t *rq)
142903831d35Sstevel {
143003831d35Sstevel 	manstr_t		*close_msp;
143103831d35Sstevel 	manstr_t		*msp;
143203831d35Sstevel 
143303831d35Sstevel 	MAN_DBG(MAN_OCLOSE, ("man_close: rq(0x%p)\n", (void *)rq));
143403831d35Sstevel 
143503831d35Sstevel 	qprocsoff(rq);
143603831d35Sstevel 	close_msp = (manstr_t *)rq->q_ptr;
143703831d35Sstevel 
143803831d35Sstevel 	/*
143903831d35Sstevel 	 * Unlink the per-Stream entry from the active list and free it.
144003831d35Sstevel 	 */
144103831d35Sstevel 	if (close_msp == man_strup)
144203831d35Sstevel 		man_strup = close_msp->ms_next;
144303831d35Sstevel 	else {
144403831d35Sstevel 		for (msp = man_strup; msp && msp->ms_next != close_msp; )
144503831d35Sstevel 			msp = msp->ms_next;
144603831d35Sstevel 
144703831d35Sstevel 		if (msp == NULL) {
144803831d35Sstevel 			cmn_err(CE_WARN, "man_close: no stream!");
144903831d35Sstevel 			return (ENODEV);
145003831d35Sstevel 		}
145103831d35Sstevel 
145203831d35Sstevel 		msp->ms_next = close_msp->ms_next;
145303831d35Sstevel 	}
145403831d35Sstevel 
145503831d35Sstevel 	if (close_msp->ms_dests != NULL) {
145603831d35Sstevel 		/*
145703831d35Sstevel 		 * Still DL_ATTACHED
145803831d35Sstevel 		 */
145903831d35Sstevel 		man_work_t *wp;
146003831d35Sstevel 
146103831d35Sstevel 		wp = man_work_alloc(MAN_WORK_CLOSE_STREAM, KM_SLEEP);
146203831d35Sstevel 		man_dodetach(close_msp, wp);
146303831d35Sstevel 	}
146403831d35Sstevel 
146503831d35Sstevel 	if (close_msp->ms_flags & MAN_SFLAG_CONTROL) {
146603831d35Sstevel 		/*
146703831d35Sstevel 		 * Driver about to unload.
146803831d35Sstevel 		 */
146903831d35Sstevel 		man_ctl_wq = NULL;
147003831d35Sstevel 	}
147103831d35Sstevel 
147203831d35Sstevel 	rq->q_ptr = WR(rq)->q_ptr = NULL;
147303831d35Sstevel 	man_kfree((char *)close_msp, sizeof (manstr_t));
147403831d35Sstevel 	(void) qassociate(rq, -1);
147503831d35Sstevel 
147603831d35Sstevel 	MAN_DBG(MAN_OCLOSE, ("man_close: exit\n"));
147703831d35Sstevel 
147803831d35Sstevel 	return (0);
147903831d35Sstevel }
148003831d35Sstevel 
148103831d35Sstevel /*
148203831d35Sstevel  * Ask bgthread to tear down lower stream and qwait
148303831d35Sstevel  * until its done.
148403831d35Sstevel  */
148503831d35Sstevel static void
man_dodetach(manstr_t * msp,man_work_t * wp)148603831d35Sstevel man_dodetach(manstr_t *msp, man_work_t *wp)
148703831d35Sstevel {
148803831d35Sstevel 	man_dest_t	*mdp;
148903831d35Sstevel 	int		i;
149003831d35Sstevel 	mblk_t		*mp;
149103831d35Sstevel 
149203831d35Sstevel 	mdp = msp->ms_dests;
149303831d35Sstevel 	msp->ms_dests = NULL;
149403831d35Sstevel 	msp->ms_destp = NULL;
149503831d35Sstevel 
149603831d35Sstevel 	/*
149703831d35Sstevel 	 * Excise lower dests array, set it closing and hand it to
149803831d35Sstevel 	 * background thread to dispose of.
149903831d35Sstevel 	 */
150003831d35Sstevel 	for (i = 0; i < MAN_MAX_DESTS; i++) {
150103831d35Sstevel 
150203831d35Sstevel 		mdp[i].md_state |= MAN_DSTATE_CLOSING;
150303831d35Sstevel 		mdp[i].md_msp = NULL;
150403831d35Sstevel 		mdp[i].md_rq = NULL;
150503831d35Sstevel 
150603831d35Sstevel 		if (mdp[i].md_lc_timer_id != 0) {
150703831d35Sstevel 			(void) quntimeout(man_ctl_wq, mdp[i].md_lc_timer_id);
150803831d35Sstevel 			mdp[i].md_lc_timer_id = 0;
150903831d35Sstevel 		}
151003831d35Sstevel 		if (mdp[i].md_bc_id != 0) {
151103831d35Sstevel 			qunbufcall(man_ctl_wq, mdp[i].md_bc_id);
151203831d35Sstevel 			mdp[i].md_bc_id = 0;
151303831d35Sstevel 		}
151403831d35Sstevel 
151503831d35Sstevel 		mutex_enter(&mdp[i].md_lock);
151603831d35Sstevel 		while ((mp = mdp[i].md_dmp_head) != NULL) {
151703831d35Sstevel 			mdp[i].md_dmp_head = mp->b_next;
151803831d35Sstevel 			mp->b_next = NULL;
151903831d35Sstevel 			freemsg(mp);
152003831d35Sstevel 		}
152103831d35Sstevel 		mdp[i].md_dmp_count = 0;
152203831d35Sstevel 		mdp[i].md_dmp_tail = NULL;
152303831d35Sstevel 		mutex_exit(&mdp[i].md_lock);
152403831d35Sstevel 	}
152503831d35Sstevel 
152603831d35Sstevel 	/*
152703831d35Sstevel 	 * Dump any DL type messages previously caught.
152803831d35Sstevel 	 */
152903831d35Sstevel 	man_dl_clean(&msp->ms_dl_mp);
153003831d35Sstevel 	man_dl_clean(&msp->ms_dlioc_mp);
153103831d35Sstevel 
153203831d35Sstevel 	/*
153303831d35Sstevel 	 * We need to clear fast path flag when dlioc messages are cleaned.
153403831d35Sstevel 	 */
153503831d35Sstevel 	msp->ms_flags &= ~MAN_SFLAG_FAST;
153603831d35Sstevel 
153703831d35Sstevel 	/*
153803831d35Sstevel 	 * MAN_WORK_CLOSE_STREAM work request preallocated by caller.
153903831d35Sstevel 	 */
154003831d35Sstevel 	ASSERT(wp->mw_type == MAN_WORK_CLOSE_STREAM);
154103831d35Sstevel 	ASSERT(mdp != NULL);
154203831d35Sstevel 	wp->mw_arg.a_mdp = mdp;
154303831d35Sstevel 	wp->mw_arg.a_ndests = MAN_MAX_DESTS;
154403831d35Sstevel 	wp->mw_arg.a_pg_id = -1;	/* Don't care */
154503831d35Sstevel 
154603831d35Sstevel 	mutex_enter(&man_lock);
154703831d35Sstevel 	man_work_add(man_bwork_q, wp);
154803831d35Sstevel 	msp->ms_manp->man_refcnt--;
154903831d35Sstevel 	mutex_exit(&man_lock);
155003831d35Sstevel 
155103831d35Sstevel 	msp->ms_manp = NULL;
155203831d35Sstevel 
155303831d35Sstevel }
155403831d35Sstevel 
155503831d35Sstevel 
155603831d35Sstevel /*
155703831d35Sstevel  * man_uwput - handle DLPI messages issued from upstream, the write
155803831d35Sstevel  * side of the upper half of multiplexor. Called with shared access to
155903831d35Sstevel  * the inner perimeter.
156003831d35Sstevel  *
156103831d35Sstevel  *	wq - upper write queue of mxx
156203831d35Sstevel  *	mp - mblk ptr to DLPI request
156303831d35Sstevel  */
156403831d35Sstevel static int
man_uwput(register queue_t * wq,register mblk_t * mp)156503831d35Sstevel man_uwput(register queue_t *wq, register mblk_t *mp)
156603831d35Sstevel {
156703831d35Sstevel 	register manstr_t	*msp;		/* per stream data */
156803831d35Sstevel 	register man_t		*manp;		/* per instance data */
156903831d35Sstevel 
157003831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
157103831d35Sstevel 
157203831d35Sstevel 	MAN_DBG(MAN_UWPUT, ("man_uwput: wq(0x%p) mp(0x%p) db_type(0x%x)"
157303831d35Sstevel 	    " msp(0x%p)\n",
157403831d35Sstevel 	    (void *)wq, (void *)mp, DB_TYPE(mp), (void *)msp));
157503831d35Sstevel #if DEBUG
157603831d35Sstevel 	if (man_debug & MAN_UWPUT) {
157703831d35Sstevel 		if (DB_TYPE(mp) == M_IOCTL) {
157803831d35Sstevel 			struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
157903831d35Sstevel 			MAN_DBG(MAN_UWPUT,
158003831d35Sstevel 			    ("man_uwput: M_IOCTL ioc_cmd(0x%x)\n",
158103831d35Sstevel 			    iocp->ioc_cmd));
158203831d35Sstevel 		} else if (DB_TYPE(mp) == M_CTL) {
158303831d35Sstevel 			struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
158403831d35Sstevel 			MAN_DBG(MAN_UWPUT,
158503831d35Sstevel 			    ("man_uwput: M_CTL ioc_cmd(0x%x)\n",
158603831d35Sstevel 			    iocp->ioc_cmd));
158703831d35Sstevel 		}
158803831d35Sstevel 	}
158903831d35Sstevel #endif	/* DEBUG */
159003831d35Sstevel 
159103831d35Sstevel 
159203831d35Sstevel 	switch (DB_TYPE(mp)) {
159303831d35Sstevel 	case M_DATA:
159403831d35Sstevel 		manp = msp->ms_manp;
159503831d35Sstevel 
159603831d35Sstevel 		if (((msp->ms_flags & (MAN_SFLAG_FAST | MAN_SFLAG_RAW)) == 0) ||
159703831d35Sstevel 		    (msp->ms_dlpistate != DL_IDLE) ||
159803831d35Sstevel 		    (manp == NULL)) {
159903831d35Sstevel 
160003831d35Sstevel 			merror(wq, mp, EPROTO);
160103831d35Sstevel 			break;
160203831d35Sstevel 		}
160303831d35Sstevel 
160403831d35Sstevel 		if (wq->q_first) {
160503831d35Sstevel 			(void) putq(wq, mp);
160603831d35Sstevel 			qenable(wq);
160703831d35Sstevel 		} else {
160803831d35Sstevel 			ehdr_t	*ep = (ehdr_t *)mp->b_rptr;
160903831d35Sstevel 
161003831d35Sstevel 			(void) man_start(wq, mp, &ep->ether_dhost);
161103831d35Sstevel 		}
161203831d35Sstevel 		break;
161303831d35Sstevel 
161403831d35Sstevel 	case M_PROTO:
161503831d35Sstevel 	case M_PCPROTO:
161603831d35Sstevel 		if ((DL_PRIM(mp) == DL_UNITDATA_IND) && !wq->q_first) {
161703831d35Sstevel 			(void) man_udreq(wq, mp);
161803831d35Sstevel 		} else {
161903831d35Sstevel 			(void) putq(wq, mp);
162003831d35Sstevel 			qenable(wq);
162103831d35Sstevel 		}
162203831d35Sstevel 		break;
162303831d35Sstevel 
162403831d35Sstevel 	case M_IOCTL:
162503831d35Sstevel 	case M_IOCDATA:
162603831d35Sstevel 		qwriter(wq, mp, man_ioctl, PERIM_INNER);
162703831d35Sstevel 		break;
162803831d35Sstevel 
162903831d35Sstevel 	case M_CTL:
163003831d35Sstevel 		freemsg(mp);
163103831d35Sstevel 		break;
163203831d35Sstevel 
163303831d35Sstevel 	case M_FLUSH:
163403831d35Sstevel 		MAN_DBG(MAN_UWPUT, ("man_wput: M_FLUSH\n"));
163503831d35Sstevel 		if (*mp->b_rptr & FLUSHW)
163603831d35Sstevel 			flushq(wq, FLUSHDATA);
163703831d35Sstevel 		if (*mp->b_rptr & FLUSHR) {
163803831d35Sstevel 			flushq(RD(wq), FLUSHDATA);
163903831d35Sstevel 			*mp->b_rptr &= ~FLUSHW;
164003831d35Sstevel 			qreply(wq, mp);
164103831d35Sstevel 		} else {
164203831d35Sstevel 			freemsg(mp);
164303831d35Sstevel 		}
164403831d35Sstevel 		break;
164503831d35Sstevel 
164603831d35Sstevel 	default:
164703831d35Sstevel 		MAN_DBG(MAN_WARN,
164803831d35Sstevel 		    ("man_uwput: illegal mblk(0x%p) type(0x%x)\n",
164903831d35Sstevel 		    (void *)mp, DB_TYPE(mp)));
165003831d35Sstevel 		freemsg(mp);
165103831d35Sstevel 		break;
165203831d35Sstevel 	} /* End switch */
165303831d35Sstevel 
165403831d35Sstevel 	MAN_DBG(MAN_UWPUT, ("man_uwput: exit wq(0x%p) mp(0x%p)\n",
165503831d35Sstevel 	    (void *)wq, (void *)mp));
165603831d35Sstevel 
165703831d35Sstevel 	return (0);
165803831d35Sstevel }
165903831d35Sstevel 
166003831d35Sstevel /*
166103831d35Sstevel  * man_start - handle data messages issued from upstream.  Send down
166203831d35Sstevel  * to particular man_dest based on ether_addr, otherwise send out to all
166303831d35Sstevel  * valid man_dests.
166403831d35Sstevel  *
166503831d35Sstevel  *	wq - upper write queue of mxx
166603831d35Sstevel  *	mp - mblk ptr to DLPI request
166703831d35Sstevel  * 	caller - Caller ID for decision making on canput failure
166803831d35Sstevel  *
166903831d35Sstevel  * Returns:
167003831d35Sstevel  *	0	- Data xmitted or No flow control situation detected.
167103831d35Sstevel  *	1	- Flow control situation detected.
167203831d35Sstevel  *
167303831d35Sstevel  * STREAMS Flow Control: can be used if there is only one destination
167403831d35Sstevel  * for a stream (1 to 1 multiplexor). In this case, we will use the upper
167503831d35Sstevel  * write queue to store mblks when in flow control. If there are multiple
167603831d35Sstevel  * destinations, we cannot use the STREAMs based flow control (1 to many
167703831d35Sstevel  * multiplexor). In this case, we will use the lower write queue to store
167803831d35Sstevel  * mblks when in flow control. Since destinations come and go, we may
167903831d35Sstevel  * transition between 1-to-1 and 1-to-m. So it may be the case that we have
168003831d35Sstevel  * some mblks stored on the upper queue, and some on the lower queue. However,
168103831d35Sstevel  * we will never send mblks out of order. See man_uwput and man_start_lower().
168203831d35Sstevel  *
168303831d35Sstevel  * A simple flow control mechanism is implemented for the deferred mblk list,
168403831d35Sstevel  * as this list is expected to be used temporarily for a very short
168503831d35Sstevel  * period required for switching paths. This flow control mechanism is
168603831d35Sstevel  * used only as a defensive approach to avoid infinite growth of this list.
168703831d35Sstevel  */
168803831d35Sstevel static int
man_start(register queue_t * wq,register mblk_t * mp,eaddr_t * eap)168903831d35Sstevel man_start(register queue_t *wq, register mblk_t *mp, eaddr_t *eap)
169003831d35Sstevel {
169103831d35Sstevel 	register manstr_t	*msp;		/* per stream data */
169203831d35Sstevel 	register man_dest_t	*mdp = NULL;	/* destination */
169303831d35Sstevel 	mblk_t			*tmp;
169403831d35Sstevel 	int			i;
169503831d35Sstevel 	int			status = 0;
169603831d35Sstevel 
169703831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
169803831d35Sstevel 
169903831d35Sstevel 	MAN_DBG(MAN_DATA, ("man_start: msp(0x%p) ether_addr(%s)\n",
170003831d35Sstevel 	    (void *)msp, ether_sprintf(eap)));
170103831d35Sstevel 
170203831d35Sstevel 	if (msp->ms_dests == NULL) {
170303831d35Sstevel 		cmn_err(CE_WARN, "man_start: no destinations");
170403831d35Sstevel 		freemsg(mp);
170503831d35Sstevel 		return (0);
170603831d35Sstevel 	}
170703831d35Sstevel 
170803831d35Sstevel 	/*
170903831d35Sstevel 	 * Optimization if only one valid destination.
171003831d35Sstevel 	 */
171103831d35Sstevel 	mdp = msp->ms_destp;
171203831d35Sstevel 
171303831d35Sstevel 	if (IS_UNICAST(eap)) {
171403831d35Sstevel 		queue_t			*flow_wq = NULL;
171503831d35Sstevel 
171603831d35Sstevel 		if (mdp == NULL) {
171703831d35Sstevel 			/*
171803831d35Sstevel 			 * TDB - This needs to be optimized (some bits in
171903831d35Sstevel 			 * ehp->dhost will act as an index.
172003831d35Sstevel 			 */
172103831d35Sstevel 			for (i = 0; i < MAN_MAX_DESTS; i++) {
172203831d35Sstevel 
172303831d35Sstevel 				mdp = &msp->ms_dests[i];
172403831d35Sstevel 
172503831d35Sstevel 				if ((mdp->md_state == MAN_DSTATE_READY) &&
172603831d35Sstevel 				    (ether_cmp(eap, &mdp->md_dst_eaddr) == 0))
172703831d35Sstevel 					break;
172803831d35Sstevel 				mdp = NULL;
172903831d35Sstevel 			}
173003831d35Sstevel 		} else {
173103831d35Sstevel 			/*
173203831d35Sstevel 			 * 1 to 1 multiplexing, use upper wq for flow control.
173303831d35Sstevel 			 */
173403831d35Sstevel 			flow_wq = wq;
173503831d35Sstevel 		}
173603831d35Sstevel 
173703831d35Sstevel 		if (mdp != NULL) {
173803831d35Sstevel 			/*
173903831d35Sstevel 			 * Its going somewhere specific
174003831d35Sstevel 			 */
174103831d35Sstevel 			status =  man_start_lower(mdp, mp, flow_wq, MAN_UPPER);
174203831d35Sstevel 
174303831d35Sstevel 		} else {
174403831d35Sstevel 			MAN_DBG(MAN_DATA, ("man_start: no destination"
174503831d35Sstevel 			    " for eaddr %s\n", ether_sprintf(eap)));
174603831d35Sstevel 			freemsg(mp);
174703831d35Sstevel 		}
174803831d35Sstevel 	} else {
174903831d35Sstevel 		/*
175003831d35Sstevel 		 * Broadcast or multicast - send everone a copy.
175103831d35Sstevel 		 */
175203831d35Sstevel 		if (mdp == NULL) {
175303831d35Sstevel 			for (i = 0; i < MAN_MAX_DESTS; i++) {
175403831d35Sstevel 				mdp = &msp->ms_dests[i];
175503831d35Sstevel 
175603831d35Sstevel 				if (mdp->md_state != MAN_DSTATE_READY)
175703831d35Sstevel 					continue;
175803831d35Sstevel 
175903831d35Sstevel 				if ((tmp = copymsg(mp)) != NULL) {
176003831d35Sstevel 					(void) man_start_lower(mdp, tmp,
176103831d35Sstevel 					    NULL, MAN_UPPER);
176203831d35Sstevel 				} else {
176303831d35Sstevel 					MAN_DBG(MAN_DATA, ("man_start: copymsg"
176403831d35Sstevel 					    " failed!"));
176503831d35Sstevel 				}
176603831d35Sstevel 			}
176703831d35Sstevel 			freemsg(mp);
176803831d35Sstevel 		} else {
176903831d35Sstevel 			if (mdp->md_state == MAN_DSTATE_READY)
177003831d35Sstevel 				status =  man_start_lower(mdp, mp, wq,
177103831d35Sstevel 				    MAN_UPPER);
177203831d35Sstevel 			else
177303831d35Sstevel 				freemsg(mp);
177403831d35Sstevel 		}
177503831d35Sstevel 	}
177603831d35Sstevel 	return (status);
177703831d35Sstevel }
177803831d35Sstevel 
177903831d35Sstevel /*
178003831d35Sstevel  * Send a DL_UNITDATA or M_DATA fastpath data mblk to a particular
178103831d35Sstevel  * destination. Others mblk types sent down via * man_dlpi_senddown().
178203831d35Sstevel  *
178303831d35Sstevel  * Returns:
178403831d35Sstevel  *	0	- Data xmitted
178503831d35Sstevel  *	1	- Data not xmitted due to flow control.
178603831d35Sstevel  */
178703831d35Sstevel static int
man_start_lower(man_dest_t * mdp,mblk_t * mp,queue_t * flow_wq,int caller)178803831d35Sstevel man_start_lower(man_dest_t *mdp, mblk_t *mp, queue_t *flow_wq, int caller)
178903831d35Sstevel {
179003831d35Sstevel 	queue_t		*wq = mdp->md_wq;
179103831d35Sstevel 	int		status = 0;
179203831d35Sstevel 
179303831d35Sstevel 	/*
179403831d35Sstevel 	 * Lower stream ready for data transmit.
179503831d35Sstevel 	 */
179603831d35Sstevel 	if (mdp->md_state == MAN_DSTATE_READY &&
179703831d35Sstevel 	    mdp->md_dlpistate == DL_IDLE) {
179803831d35Sstevel 
179903831d35Sstevel 		ASSERT(mdp->md_wq != NULL);
180003831d35Sstevel 
180103831d35Sstevel 		if (caller == MAN_UPPER) {
180203831d35Sstevel 			/*
180303831d35Sstevel 			 * Check for flow control conditions for lower
180403831d35Sstevel 			 * stream.
180503831d35Sstevel 			 */
180603831d35Sstevel 			if (mdp->md_dmp_head == NULL &&
180703831d35Sstevel 			    wq->q_first == NULL && canputnext(wq)) {
180803831d35Sstevel 
180903831d35Sstevel 				(void) putnext(wq, mp);
181003831d35Sstevel 
181103831d35Sstevel 			} else {
181203831d35Sstevel 				mutex_enter(&mdp->md_lock);
181303831d35Sstevel 				if (mdp->md_dmp_head != NULL) {
181403831d35Sstevel 					/*
181503831d35Sstevel 					 * A simple flow control mechanism.
181603831d35Sstevel 					 */
181703831d35Sstevel 					if (mdp->md_dmp_count >= MAN_HIWAT) {
181803831d35Sstevel 						freemsg(mp);
181903831d35Sstevel 					} else {
182003831d35Sstevel 						/*
182103831d35Sstevel 						 * Add 'mp' to the deferred
182203831d35Sstevel 						 * msg list.
182303831d35Sstevel 						 */
182403831d35Sstevel 						mdp->md_dmp_tail->b_next = mp;
182503831d35Sstevel 						mdp->md_dmp_tail = mp;
182603831d35Sstevel 						mdp->md_dmp_count +=
182703831d35Sstevel 						    msgsize(mp);
182803831d35Sstevel 					}
182903831d35Sstevel 					mutex_exit(&mdp->md_lock);
183003831d35Sstevel 					/*
183103831d35Sstevel 					 * Inform flow control situation
183203831d35Sstevel 					 * to the caller.
183303831d35Sstevel 					 */
183403831d35Sstevel 					status = 1;
183503831d35Sstevel 					qenable(wq);
183603831d35Sstevel 					goto exit;
183703831d35Sstevel 				}
183803831d35Sstevel 				mutex_exit(&mdp->md_lock);
183903831d35Sstevel 				/*
184003831d35Sstevel 				 * If 1 to 1 mux, use upper write queue for
184103831d35Sstevel 				 * flow control.
184203831d35Sstevel 				 */
184303831d35Sstevel 				if (flow_wq != NULL) {
184403831d35Sstevel 					/*
184503831d35Sstevel 					 * putbq() message and indicate
184603831d35Sstevel 					 * flow control situation to the
184703831d35Sstevel 					 * caller.
184803831d35Sstevel 					 */
1849*07d06da5SSurya Prakki 					(void) putbq(flow_wq, mp);
185003831d35Sstevel 					qenable(flow_wq);
185103831d35Sstevel 					status = 1;
185203831d35Sstevel 					goto exit;
185303831d35Sstevel 				}
185403831d35Sstevel 				/*
185503831d35Sstevel 				 * 1 to many mux, use lower write queue for
185603831d35Sstevel 				 * flow control. Be mindful not to overflow
185703831d35Sstevel 				 * the lower MAN STREAM q.
185803831d35Sstevel 				 */
185903831d35Sstevel 				if (canput(wq)) {
186003831d35Sstevel 					(void) putq(wq, mp);
186103831d35Sstevel 					qenable(wq);
186203831d35Sstevel 				} else {
186303831d35Sstevel 					MAN_DBG(MAN_DATA, ("man_start_lower:"
186403831d35Sstevel 					    " lower q flow controlled -"
186503831d35Sstevel 					    " discarding packet"));
186603831d35Sstevel 					freemsg(mp);
186703831d35Sstevel 					goto exit;
186803831d35Sstevel 				}
186903831d35Sstevel 			}
187003831d35Sstevel 
187103831d35Sstevel 		} else {
187203831d35Sstevel 			/*
187303831d35Sstevel 			 * man_lwsrv  is draining flow controlled mblks.
187403831d35Sstevel 			 */
187503831d35Sstevel 			if (canputnext(wq))
187603831d35Sstevel 				(void) putnext(wq, mp);
187703831d35Sstevel 			else
187803831d35Sstevel 				status = 1;
187903831d35Sstevel 		}
188003831d35Sstevel 		goto exit;
188103831d35Sstevel 	}
188203831d35Sstevel 
188303831d35Sstevel 	/*
188403831d35Sstevel 	 * Lower stream in transition, do flow control.
188503831d35Sstevel 	 */
188603831d35Sstevel 	status = 1;
188703831d35Sstevel 
188803831d35Sstevel 	if (mdp->md_state == MAN_DSTATE_NOTPRESENT) {
188903831d35Sstevel nodest:
189003831d35Sstevel 		cmn_err(CE_WARN,
189103831d35Sstevel 		    "man_start_lower: no dest for mdp(0x%p), caller(%d)!",
189203831d35Sstevel 		    (void *)mdp, caller);
189303831d35Sstevel 		if (caller == MAN_UPPER)
189403831d35Sstevel 			freemsg(mp);
189503831d35Sstevel 		goto exit;
189603831d35Sstevel 	}
189703831d35Sstevel 
189803831d35Sstevel 	if (mdp->md_state & MAN_DSTATE_CLOSING) {
189903831d35Sstevel 		MAN_DBG(MAN_DATA, ("man_start_lower: mdp(0x%p) closing",
190003831d35Sstevel 		    (void *)mdp));
190103831d35Sstevel 		if (caller == MAN_UPPER)
190203831d35Sstevel 			freemsg(mp);
190303831d35Sstevel 		goto exit;
190403831d35Sstevel 	}
190503831d35Sstevel 
190603831d35Sstevel 	if ((mdp->md_state & MAN_DSTATE_PLUMBING) ||
190703831d35Sstevel 	    (mdp->md_state == MAN_DSTATE_INITIALIZING) ||
190803831d35Sstevel 	    (mdp->md_dlpistate != DL_IDLE)) {
190903831d35Sstevel 		/*
191003831d35Sstevel 		 * Defer until PLUMBED and DL_IDLE. See man_lwsrv().
191103831d35Sstevel 		 */
191203831d35Sstevel 		if (caller == MAN_UPPER) {
191303831d35Sstevel 			/*
191403831d35Sstevel 			 * Upper stream sending data down, add to defered mblk
191503831d35Sstevel 			 * list for stream.
191603831d35Sstevel 			 */
191703831d35Sstevel 			mutex_enter(&mdp->md_lock);
191803831d35Sstevel 			if (mdp->md_dmp_count >= MAN_HIWAT) {
191903831d35Sstevel 				freemsg(mp);
192003831d35Sstevel 			} else {
192103831d35Sstevel 				if (mdp->md_dmp_head == NULL) {
192203831d35Sstevel 					ASSERT(mdp->md_dmp_tail == NULL);
192303831d35Sstevel 					mdp->md_dmp_head = mp;
192403831d35Sstevel 					mdp->md_dmp_tail = mp;
192503831d35Sstevel 				} else {
192603831d35Sstevel 					mdp->md_dmp_tail->b_next = mp;
192703831d35Sstevel 					mdp->md_dmp_tail = mp;
192803831d35Sstevel 				}
192903831d35Sstevel 				mdp->md_dmp_count += msgsize(mp);
193003831d35Sstevel 			}
193103831d35Sstevel 			mutex_exit(&mdp->md_lock);
193203831d35Sstevel 		}
193303831d35Sstevel 
193403831d35Sstevel 		goto exit;
193503831d35Sstevel 	}
193603831d35Sstevel 
193703831d35Sstevel exit:
193803831d35Sstevel 	return (status);
193903831d35Sstevel }
194003831d35Sstevel 
194103831d35Sstevel /*
194203831d35Sstevel  * man_ioctl - handle ioctl requests for this driver (I_PLINK/I_PUNLINK)
194303831d35Sstevel  * or pass thru to the physical driver below.  Note that most M_IOCTLs we
194403831d35Sstevel  * care about come down the control msp, but the IOC ones come down the IP.
194503831d35Sstevel  * Called with exclusive inner perimeter.
194603831d35Sstevel  *
194703831d35Sstevel  *	wq - upper write queue of mxx
194803831d35Sstevel  *	mp - mblk ptr to DLPI ioctl request
194903831d35Sstevel  */
195003831d35Sstevel static void
man_ioctl(register queue_t * wq,register mblk_t * mp)195103831d35Sstevel man_ioctl(register queue_t *wq, register mblk_t *mp)
195203831d35Sstevel {
195303831d35Sstevel 	manstr_t		*msp;
195403831d35Sstevel 	struct iocblk		*iocp;
195503831d35Sstevel 
195603831d35Sstevel 	iocp = (struct iocblk *)mp->b_rptr;
195703831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
195803831d35Sstevel 
195903831d35Sstevel #ifdef DEBUG
196003831d35Sstevel 	{
196103831d35Sstevel 		char			ioc_cmd[30];
196203831d35Sstevel 
1963*07d06da5SSurya Prakki 		(void) sprintf(ioc_cmd, "not handled IOCTL 0x%x",
1964*07d06da5SSurya Prakki 		    iocp->ioc_cmd);
196503831d35Sstevel 		MAN_DBG((MAN_SWITCH | MAN_PATH | MAN_DLPI),
196603831d35Sstevel 		    ("man_ioctl: wq(0x%p) mp(0x%p) cmd(%s)\n",
196703831d35Sstevel 		    (void *)wq, (void *)mp,
196803831d35Sstevel 		    (iocp->ioc_cmd == I_PLINK) ? "I_PLINK" :
196903831d35Sstevel 		    (iocp->ioc_cmd == I_PUNLINK) ? "I_PUNLINK" :
197003831d35Sstevel 		    (iocp->ioc_cmd == MAN_SETPATH) ? "MAN_SETPATH" :
197103831d35Sstevel 		    (iocp->ioc_cmd == DL_IOC_HDR_INFO) ? "DL_IOC_HDR_INFO" :
197203831d35Sstevel 		    (iocp->ioc_cmd == DLIOCRAW) ? "DLIOCRAW" : ioc_cmd));
197303831d35Sstevel 	}
197403831d35Sstevel #endif /* DEBUG */
197503831d35Sstevel 
197603831d35Sstevel 
197703831d35Sstevel 	/*
197803831d35Sstevel 	 *  Handle the requests...
197903831d35Sstevel 	 */
198003831d35Sstevel 	switch ((unsigned int)iocp->ioc_cmd) {
198103831d35Sstevel 
198203831d35Sstevel 	case I_PLINK:
198303831d35Sstevel 		man_plink(wq, mp);
198403831d35Sstevel 		break;
198503831d35Sstevel 
198603831d35Sstevel 	case I_PUNLINK:
198703831d35Sstevel 		man_unplink(wq, mp);
198803831d35Sstevel 		break;
198903831d35Sstevel 
199003831d35Sstevel 	case MAN_SETPATH:
199103831d35Sstevel 		man_setpath(wq, mp);
199203831d35Sstevel 		break;
199303831d35Sstevel 
199403831d35Sstevel 	case MAN_GETEADDR:
199503831d35Sstevel 		man_geteaddr(wq, mp);
199603831d35Sstevel 		break;
199703831d35Sstevel 
199803831d35Sstevel 	case MAN_SET_LINKCHECK_TIME:
199903831d35Sstevel 		man_set_linkcheck_time(wq, mp);
200003831d35Sstevel 		break;
200103831d35Sstevel 
200203831d35Sstevel 	case MAN_SET_SC_IPADDRS:
200303831d35Sstevel 		man_set_sc_ipaddrs(wq, mp);
200403831d35Sstevel 		break;
200503831d35Sstevel 
200603831d35Sstevel 	case MAN_SET_SC_IP6ADDRS:
200703831d35Sstevel 		man_set_sc_ip6addrs(wq, mp);
200803831d35Sstevel 		break;
200903831d35Sstevel 
201003831d35Sstevel 	case DLIOCRAW:
201103831d35Sstevel 		if (man_dlioc(msp, mp))
201203831d35Sstevel 			miocnak(wq, mp, 0, ENOMEM);
201303831d35Sstevel 		else {
201403831d35Sstevel 			msp->ms_flags |= MAN_SFLAG_RAW;
201503831d35Sstevel 			miocack(wq, mp, 0, 0);
201603831d35Sstevel 		}
201703831d35Sstevel 		break;
201803831d35Sstevel 
201903831d35Sstevel 	case DL_IOC_HDR_INFO:
202003831d35Sstevel 		man_dl_ioc_hdr_info(wq, mp);
202103831d35Sstevel 		break;
202203831d35Sstevel 
202303831d35Sstevel 	case MAN_ND_GET:
202403831d35Sstevel 	case MAN_ND_SET:
202503831d35Sstevel 		man_nd_getset(wq, mp);
202603831d35Sstevel 		break;
202703831d35Sstevel 
202803831d35Sstevel 	default:
202903831d35Sstevel 		MAN_DBG(MAN_DDI, ("man_ioctl: unknown ioc_cmd %d\n",
203003831d35Sstevel 		    (unsigned int)iocp->ioc_cmd));
203103831d35Sstevel 		miocnak(wq, mp, 0, EINVAL);
203203831d35Sstevel 		break;
203303831d35Sstevel 	}
203403831d35Sstevel exit:
203503831d35Sstevel 	MAN_DBG((MAN_SWITCH | MAN_PATH | MAN_DLPI), ("man_ioctl: exit\n"));
203603831d35Sstevel 
203703831d35Sstevel }
203803831d35Sstevel 
203903831d35Sstevel /*
204003831d35Sstevel  * man_plink: handle I_PLINK requests on the control stream
204103831d35Sstevel  */
204203831d35Sstevel void
man_plink(queue_t * wq,mblk_t * mp)204303831d35Sstevel man_plink(queue_t *wq, mblk_t *mp)
204403831d35Sstevel {
204503831d35Sstevel 	struct linkblk	*linkp;
204603831d35Sstevel 	man_linkrec_t	*lrp;
204703831d35Sstevel 	int		status = 0;
204803831d35Sstevel 
204903831d35Sstevel 	linkp = (struct linkblk *)mp->b_cont->b_rptr;
205003831d35Sstevel 
205103831d35Sstevel 	/*
205203831d35Sstevel 	 * Create a record to hold lower stream info. man_plumb will
205303831d35Sstevel 	 * retrieve it after calling ldi_ioctl(I_PLINK)
205403831d35Sstevel 	 */
205503831d35Sstevel 	lrp = man_kzalloc(sizeof (man_linkrec_t), KM_NOSLEEP);
205603831d35Sstevel 	if (lrp == NULL) {
205703831d35Sstevel 		status = ENOMEM;
205803831d35Sstevel 		goto exit;
205903831d35Sstevel 	}
206003831d35Sstevel 
206103831d35Sstevel 	lrp->l_muxid = linkp->l_index;
206203831d35Sstevel 	lrp->l_wq = linkp->l_qbot;
206303831d35Sstevel 	lrp->l_rq = RD(linkp->l_qbot);
206403831d35Sstevel 
206503831d35Sstevel 	man_linkrec_insert(lrp);
206603831d35Sstevel 
206703831d35Sstevel exit:
206803831d35Sstevel 	if (status)
206903831d35Sstevel 		miocnak(wq, mp, 0, status);
207003831d35Sstevel 	else
207103831d35Sstevel 		miocack(wq, mp, 0, 0);
207203831d35Sstevel 
207303831d35Sstevel }
207403831d35Sstevel 
207503831d35Sstevel /*
207603831d35Sstevel  * man_unplink - handle I_PUNLINK requests on the control stream
207703831d35Sstevel  */
207803831d35Sstevel void
man_unplink(queue_t * wq,mblk_t * mp)207903831d35Sstevel man_unplink(queue_t *wq, mblk_t *mp)
208003831d35Sstevel {
208103831d35Sstevel 	struct linkblk	*linkp;
208203831d35Sstevel 
208303831d35Sstevel 	linkp = (struct linkblk *)mp->b_cont->b_rptr;
208403831d35Sstevel 	RD(linkp->l_qbot)->q_ptr = NULL;
208503831d35Sstevel 	WR(linkp->l_qbot)->q_ptr = NULL;
208603831d35Sstevel 	miocack(wq, mp, 0, 0);
208703831d35Sstevel }
208803831d35Sstevel 
208903831d35Sstevel void
man_linkrec_insert(man_linkrec_t * lrp)209003831d35Sstevel man_linkrec_insert(man_linkrec_t *lrp)
209103831d35Sstevel {
209203831d35Sstevel 	mutex_enter(&man_lock);
209303831d35Sstevel 
209403831d35Sstevel 	lrp->l_next = man_linkrec_head;
209503831d35Sstevel 	man_linkrec_head = lrp;
209603831d35Sstevel 
209703831d35Sstevel 	mutex_exit(&man_lock);
209803831d35Sstevel 
209903831d35Sstevel }
210003831d35Sstevel 
210103831d35Sstevel static queue_t *
man_linkrec_find(int muxid)210203831d35Sstevel man_linkrec_find(int muxid)
210303831d35Sstevel {
210403831d35Sstevel 	man_linkrec_t	*lpp;
210503831d35Sstevel 	man_linkrec_t	*lp;
210603831d35Sstevel 	queue_t		*wq = NULL;
210703831d35Sstevel 
210803831d35Sstevel 	mutex_enter(&man_lock);
210903831d35Sstevel 
211003831d35Sstevel 	if (man_linkrec_head == NULL)
211103831d35Sstevel 		goto exit;
211203831d35Sstevel 
211303831d35Sstevel 	lp = lpp = man_linkrec_head;
211403831d35Sstevel 	if (lpp->l_muxid == muxid) {
211503831d35Sstevel 		man_linkrec_head = lpp->l_next;
211603831d35Sstevel 	} else {
211703831d35Sstevel 		for (lp = lpp->l_next; lp; lp = lp->l_next) {
211803831d35Sstevel 			if (lp->l_muxid == muxid)
211903831d35Sstevel 				break;
212003831d35Sstevel 			lpp = lp;
212103831d35Sstevel 		}
212203831d35Sstevel 	}
212303831d35Sstevel 
212403831d35Sstevel 	if (lp == NULL)
212503831d35Sstevel 		goto exit;
212603831d35Sstevel 
212703831d35Sstevel 	wq = lp->l_wq;
212803831d35Sstevel 	ASSERT(wq != NULL);
212903831d35Sstevel 
213003831d35Sstevel 	lpp->l_next = lp->l_next;
213103831d35Sstevel 	man_kfree(lp, sizeof (man_linkrec_t));
213203831d35Sstevel 
213303831d35Sstevel exit:
213403831d35Sstevel 	mutex_exit(&man_lock);
213503831d35Sstevel 
213603831d35Sstevel 	return (wq);
213703831d35Sstevel }
213803831d35Sstevel 
213903831d35Sstevel /*
214003831d35Sstevel  * Set instance linkcheck timer value.
214103831d35Sstevel  */
214203831d35Sstevel static void
man_set_linkcheck_time(queue_t * wq,mblk_t * mp)214303831d35Sstevel man_set_linkcheck_time(queue_t *wq, mblk_t *mp)
214403831d35Sstevel {
214503831d35Sstevel 	mi_time_t	*mtp;
214603831d35Sstevel 	int		error;
214703831d35Sstevel 	man_t		*manp;
214803831d35Sstevel 
214903831d35Sstevel 	MAN_DBG(MAN_LINK, ("man_set_linkcheck_time: enter"));
215003831d35Sstevel 
215103831d35Sstevel 	error = miocpullup(mp, sizeof (mi_time_t));
215203831d35Sstevel 	if (error != 0)
215303831d35Sstevel 		goto exit;
215403831d35Sstevel 
215503831d35Sstevel 	mtp = (mi_time_t *)mp->b_cont->b_rptr;
215603831d35Sstevel 
215703831d35Sstevel 	MAN_DBG(MAN_LINK, ("man_set_linkcheck_time: mtp"));
215803831d35Sstevel 	MAN_DBGCALL(MAN_LINK, man_print_mtp(mtp));
215903831d35Sstevel 
216003831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, mtp->mtp_man_ppa);
216103831d35Sstevel 	if (manp == NULL) {
216203831d35Sstevel 		error = ENODEV;
216303831d35Sstevel 		goto exit;
216403831d35Sstevel 	}
216503831d35Sstevel 
216603831d35Sstevel 	manp->man_linkcheck_time = mtp->mtp_time;
216703831d35Sstevel exit:
216803831d35Sstevel 	if (error)
216903831d35Sstevel 		miocnak(wq, mp, 0, error);
217003831d35Sstevel 	else
217103831d35Sstevel 		miocack(wq, mp, sizeof (mi_time_t), 0);
217203831d35Sstevel }
217303831d35Sstevel 
217403831d35Sstevel /*
217503831d35Sstevel  * Man path ioctl processing. Should only happen on the SSC. Called
217603831d35Sstevel  * with exclusive inner perimeter.
217703831d35Sstevel  */
217803831d35Sstevel static void
man_setpath(queue_t * wq,mblk_t * mp)217903831d35Sstevel man_setpath(queue_t *wq, mblk_t *mp)
218003831d35Sstevel {
218103831d35Sstevel 	mi_path_t		*mip;
218203831d35Sstevel 	int			error;
218303831d35Sstevel 
218403831d35Sstevel 	error = miocpullup(mp, sizeof (mi_path_t));
218503831d35Sstevel 	if (error != 0)
218603831d35Sstevel 		goto exit;
218703831d35Sstevel 
218803831d35Sstevel 	mip = (mi_path_t *)mp->b_cont->b_rptr;
218903831d35Sstevel 	mutex_enter(&man_lock);
219003831d35Sstevel 	error = man_pg_cmd(mip, NULL);
219103831d35Sstevel 	mutex_exit(&man_lock);
219203831d35Sstevel 
219303831d35Sstevel exit:
219403831d35Sstevel 	if (error)
219503831d35Sstevel 		miocnak(wq, mp, 0, error);
219603831d35Sstevel 	else
219703831d35Sstevel 		miocack(wq, mp, sizeof (mi_path_t), 0);
219803831d35Sstevel }
219903831d35Sstevel 
220003831d35Sstevel /*
220103831d35Sstevel  * Get the local ethernet address of this machine.
220203831d35Sstevel  */
220303831d35Sstevel static void
man_geteaddr(queue_t * wq,mblk_t * mp)220403831d35Sstevel man_geteaddr(queue_t *wq, mblk_t *mp)
220503831d35Sstevel {
220603831d35Sstevel 	eaddr_t			*eap;
220703831d35Sstevel 	int			error;
220803831d35Sstevel 
220903831d35Sstevel 	error = miocpullup(mp, sizeof (eaddr_t));
221003831d35Sstevel 	if (error != 0) {
221103831d35Sstevel 		miocnak(wq, mp, 0, error);
221203831d35Sstevel 		return;
221303831d35Sstevel 	}
221403831d35Sstevel 
221503831d35Sstevel 	eap = (eaddr_t *)mp->b_cont->b_rptr;
221603831d35Sstevel 	(void) localetheraddr(NULL, eap);
221703831d35Sstevel 	miocack(wq, mp, sizeof (eaddr_t), 0);
221803831d35Sstevel }
221903831d35Sstevel 
222003831d35Sstevel /*
222103831d35Sstevel  * Set my SC and other SC IPv4 addresses for use in man_pinger routine.
222203831d35Sstevel  */
222303831d35Sstevel static void
man_set_sc_ipaddrs(queue_t * wq,mblk_t * mp)222403831d35Sstevel man_set_sc_ipaddrs(queue_t *wq, mblk_t *mp)
222503831d35Sstevel {
222603831d35Sstevel 	int			error;
222703831d35Sstevel 
222803831d35Sstevel 	error = miocpullup(mp, sizeof (man_sc_ipaddrs_t));
222903831d35Sstevel 	if (error != 0)
223003831d35Sstevel 		goto exit;
223103831d35Sstevel 
223203831d35Sstevel 	man_sc_ipaddrs = *(man_sc_ipaddrs_t *)mp->b_cont->b_rptr;
223303831d35Sstevel 
223403831d35Sstevel #ifdef DEBUG
223503831d35Sstevel 	{
223603831d35Sstevel 		char	buf[INET_ADDRSTRLEN];
223703831d35Sstevel 
223803831d35Sstevel 		(void) inet_ntop(AF_INET,
223903831d35Sstevel 		    (void *) &man_sc_ipaddrs.ip_other_sc_ipaddr,
224003831d35Sstevel 		    buf, INET_ADDRSTRLEN);
224103831d35Sstevel 		MAN_DBG(MAN_CONFIG, ("ip_other_sc_ipaddr = %s", buf));
224203831d35Sstevel 		(void) inet_ntop(AF_INET,
224303831d35Sstevel 		    (void *) &man_sc_ipaddrs.ip_my_sc_ipaddr,
224403831d35Sstevel 		    buf, INET_ADDRSTRLEN);
224503831d35Sstevel 		MAN_DBG(MAN_CONFIG, ("ip_my_sc_ipaddr = %s", buf));
224603831d35Sstevel 	}
224703831d35Sstevel #endif /* DEBUG */
224803831d35Sstevel exit:
224903831d35Sstevel 	if (error)
225003831d35Sstevel 		miocnak(wq, mp, 0, error);
225103831d35Sstevel 	else
225203831d35Sstevel 		miocack(wq, mp, sizeof (man_sc_ipaddrs_t), 0);
225303831d35Sstevel }
225403831d35Sstevel 
225503831d35Sstevel /*
225603831d35Sstevel  * Set my SC and other SC IPv6 addresses for use in man_pinger routine.
225703831d35Sstevel  */
225803831d35Sstevel static void
man_set_sc_ip6addrs(queue_t * wq,mblk_t * mp)225903831d35Sstevel man_set_sc_ip6addrs(queue_t *wq, mblk_t *mp)
226003831d35Sstevel {
226103831d35Sstevel 	int			error;
226203831d35Sstevel 
226303831d35Sstevel 	error = miocpullup(mp, sizeof (man_sc_ip6addrs_t));
226403831d35Sstevel 	if (error != 0)
226503831d35Sstevel 		goto exit;
226603831d35Sstevel 
226703831d35Sstevel 	man_sc_ip6addrs = *(man_sc_ip6addrs_t *)mp->b_cont->b_rptr;
226803831d35Sstevel 
226903831d35Sstevel #ifdef DEBUG
227003831d35Sstevel 	{
227103831d35Sstevel 		char	buf[INET6_ADDRSTRLEN];
227203831d35Sstevel 
227303831d35Sstevel 		(void) inet_ntop(AF_INET6,
227403831d35Sstevel 		    (void *) &man_sc_ip6addrs.ip6_other_sc_ipaddr,
227503831d35Sstevel 		    buf, INET6_ADDRSTRLEN);
227603831d35Sstevel 		MAN_DBG(MAN_CONFIG, ("ip6_other_sc_ipaddr = %s", buf));
227703831d35Sstevel 		(void) inet_ntop(AF_INET6,
227803831d35Sstevel 		    (void *) &man_sc_ip6addrs.ip6_my_sc_ipaddr,
227903831d35Sstevel 		    buf, INET6_ADDRSTRLEN);
228003831d35Sstevel 		MAN_DBG(MAN_CONFIG, ("ip6_my_sc_ipaddr = %s", buf));
228103831d35Sstevel 	}
228203831d35Sstevel #endif /* DEBUG */
228303831d35Sstevel exit:
228403831d35Sstevel 	if (error)
228503831d35Sstevel 		miocnak(wq, mp, 0, error);
228603831d35Sstevel 	else
228703831d35Sstevel 		miocack(wq, mp, sizeof (man_sc_ip6addrs_t), 0);
228803831d35Sstevel }
228903831d35Sstevel 
229003831d35Sstevel /*
229103831d35Sstevel  * M_DATA fastpath info request.
229203831d35Sstevel  */
229303831d35Sstevel static void
man_dl_ioc_hdr_info(queue_t * wq,mblk_t * mp)229403831d35Sstevel man_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp)
229503831d35Sstevel {
229603831d35Sstevel 	manstr_t		*msp;
229703831d35Sstevel 	man_t			*manp;
229803831d35Sstevel 	mblk_t			*nmp;
229903831d35Sstevel 	man_dladdr_t		*dlap;
230003831d35Sstevel 	dl_unitdata_req_t	*dludp;
230103831d35Sstevel 	struct	ether_header	*headerp;
230203831d35Sstevel 	t_uscalar_t		off, len;
230303831d35Sstevel 	int			status = 0;
230403831d35Sstevel 
230503831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_dl_ioc_hdr_info: enter"));
230603831d35Sstevel 
230703831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
230803831d35Sstevel 	manp = msp->ms_manp;
230903831d35Sstevel 	if (manp == NULL) {
231003831d35Sstevel 		status = EINVAL;
231103831d35Sstevel 		goto exit;
231203831d35Sstevel 	}
231303831d35Sstevel 
231403831d35Sstevel 	status = miocpullup(mp, sizeof (dl_unitdata_req_t) + MAN_ADDRL);
231503831d35Sstevel 	if (status != 0)
231603831d35Sstevel 		goto exit;
231703831d35Sstevel 
231803831d35Sstevel 	/*
231903831d35Sstevel 	 * Sanity check the DL_UNITDATA_REQ destination address
232003831d35Sstevel 	 * offset and length values.
232103831d35Sstevel 	 */
232203831d35Sstevel 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
232303831d35Sstevel 	off = dludp->dl_dest_addr_offset;
232403831d35Sstevel 	len = dludp->dl_dest_addr_length;
232503831d35Sstevel 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
232603831d35Sstevel 	    !MBLKIN(mp->b_cont, off, len) || len != MAN_ADDRL) {
232703831d35Sstevel 		status = EINVAL;
232803831d35Sstevel 		goto exit;
232903831d35Sstevel 	}
233003831d35Sstevel 
233103831d35Sstevel 	dlap = (man_dladdr_t  *)(mp->b_cont->b_rptr + off);
233203831d35Sstevel 
233303831d35Sstevel 	/*
233403831d35Sstevel 	 * Allocate a new mblk to hold the ether header.
233503831d35Sstevel 	 */
233603831d35Sstevel 	if ((nmp = allocb(ETHERHEADER_SIZE, BPRI_MED)) == NULL) {
233703831d35Sstevel 		status = ENOMEM;
233803831d35Sstevel 		goto exit;
233903831d35Sstevel 	}
234003831d35Sstevel 
234103831d35Sstevel 	/* We only need one dl_ioc_hdr mblk for replay */
234203831d35Sstevel 	if (!(msp->ms_flags & MAN_SFLAG_FAST))
234303831d35Sstevel 		status = man_dl_catch(&msp->ms_dlioc_mp, mp);
234403831d35Sstevel 
234503831d35Sstevel 	/* Forward the packet to all lower destinations. */
234603831d35Sstevel 	if ((status != 0) || ((status = man_dlpi_senddown(msp, mp)) != 0)) {
234703831d35Sstevel 		freemsg(nmp);
234803831d35Sstevel 		goto exit;
234903831d35Sstevel 	}
235003831d35Sstevel 
235103831d35Sstevel 	nmp->b_wptr += ETHERHEADER_SIZE;
235203831d35Sstevel 
235303831d35Sstevel 	/*
235403831d35Sstevel 	 * Fill in the ether header.
235503831d35Sstevel 	 */
235603831d35Sstevel 	headerp = (struct ether_header *)nmp->b_rptr;
235703831d35Sstevel 	ether_copy(&dlap->dl_phys, &headerp->ether_dhost);
235803831d35Sstevel 	ether_copy(&manp->man_eaddr, &headerp->ether_shost);
235903831d35Sstevel 	put_ether_type(headerp, dlap->dl_sap);
236003831d35Sstevel 
236103831d35Sstevel 	/*
236203831d35Sstevel 	 * Link new mblk in after the "request" mblks.
236303831d35Sstevel 	 */
236403831d35Sstevel 	linkb(mp, nmp);
236503831d35Sstevel 
236603831d35Sstevel exit:
236703831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_dl_ioc_hdr_info: returns, status = %d",
236803831d35Sstevel 	    status));
236903831d35Sstevel 
237003831d35Sstevel 	if (status) {
237103831d35Sstevel 		miocnak(wq, mp, 0, status);
237203831d35Sstevel 	} else {
237303831d35Sstevel 		msp = (manstr_t *)wq->q_ptr;
237403831d35Sstevel 		msp->ms_flags |= MAN_SFLAG_FAST;
237503831d35Sstevel 		miocack(wq, mp, msgsize(mp->b_cont), 0);
237603831d35Sstevel 	}
237703831d35Sstevel 
237803831d35Sstevel }
237903831d35Sstevel 
238003831d35Sstevel /*
238103831d35Sstevel  * man_uwsrv - Upper write queue service routine to handle deferred
238203831d35Sstevel  * DLPI messages issued from upstream, the write side of the upper half
238303831d35Sstevel  * of multiplexor. It is also used by man_bwork to switch the lower
238403831d35Sstevel  * multiplexor.
238503831d35Sstevel  *
238603831d35Sstevel  *	wq - upper write queue of mxx
238703831d35Sstevel  */
238803831d35Sstevel static int
man_uwsrv(queue_t * wq)238903831d35Sstevel man_uwsrv(queue_t *wq)
239003831d35Sstevel {
239103831d35Sstevel 	register mblk_t		*mp;
239203831d35Sstevel 	manstr_t		*msp;		/* per stream data */
239303831d35Sstevel 	man_t			*manp;		/* per instance data */
239403831d35Sstevel 	ehdr_t			*ep;
239503831d35Sstevel 	int			status;
239603831d35Sstevel 
239703831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
239803831d35Sstevel 
239903831d35Sstevel 	MAN_DBG(MAN_UWSRV, ("man_uwsrv: wq(0x%p) msp", (void *)wq));
240003831d35Sstevel 	MAN_DBGCALL(MAN_UWSRV, man_print_msp(msp));
240103831d35Sstevel 
240203831d35Sstevel 	if (msp == NULL)
240303831d35Sstevel 		goto done;
240403831d35Sstevel 
240503831d35Sstevel 	manp = msp->ms_manp;
240603831d35Sstevel 
240703831d35Sstevel 	while (mp = getq(wq)) {
240803831d35Sstevel 
240903831d35Sstevel 		switch (DB_TYPE(mp)) {
241003831d35Sstevel 		/*
241103831d35Sstevel 		 * Can probably remove this as I never put data messages
241203831d35Sstevel 		 * here.
241303831d35Sstevel 		 */
241403831d35Sstevel 		case M_DATA:
241503831d35Sstevel 			if (manp) {
241603831d35Sstevel 				ep = (ehdr_t *)mp->b_rptr;
241703831d35Sstevel 				status = man_start(wq, mp, &ep->ether_dhost);
241803831d35Sstevel 				if (status) {
241903831d35Sstevel 					/*
242003831d35Sstevel 					 * man_start() indicated flow control
242103831d35Sstevel 					 * situation, stop processing now.
242203831d35Sstevel 					 */
242303831d35Sstevel 					goto break_loop;
242403831d35Sstevel 				}
242503831d35Sstevel 			} else
242603831d35Sstevel 				freemsg(mp);
242703831d35Sstevel 			break;
242803831d35Sstevel 
242903831d35Sstevel 		case M_PROTO:
243003831d35Sstevel 		case M_PCPROTO:
243103831d35Sstevel 			status = man_proto(wq, mp);
243203831d35Sstevel 			if (status) {
243303831d35Sstevel 				/*
243403831d35Sstevel 				 * man_proto() indicated flow control
243503831d35Sstevel 				 * situation detected by man_start(),
243603831d35Sstevel 				 * stop processing now.
243703831d35Sstevel 				 */
243803831d35Sstevel 				goto break_loop;
243903831d35Sstevel 			}
244003831d35Sstevel 			break;
244103831d35Sstevel 
244203831d35Sstevel 		default:
244303831d35Sstevel 			MAN_DBG(MAN_UWSRV, ("man_uwsrv: discarding mp(0x%p)",
244403831d35Sstevel 			    (void *)mp));
244503831d35Sstevel 			freemsg(mp);
244603831d35Sstevel 			break;
244703831d35Sstevel 		}
244803831d35Sstevel 	}
244903831d35Sstevel 
245003831d35Sstevel break_loop:
245103831d35Sstevel 	/*
245203831d35Sstevel 	 * Check to see if bgthread wants us to do something inside the
245303831d35Sstevel 	 * perimeter.
245403831d35Sstevel 	 */
245503831d35Sstevel 	if ((msp->ms_flags & MAN_SFLAG_CONTROL) &&
245603831d35Sstevel 	    man_iwork_q->q_work != NULL) {
245703831d35Sstevel 
245803831d35Sstevel 		man_iwork();
245903831d35Sstevel 	}
246003831d35Sstevel 
246103831d35Sstevel done:
246203831d35Sstevel 
246303831d35Sstevel 	MAN_DBG(MAN_UWSRV, ("man_uwsrv: returns"));
246403831d35Sstevel 
246503831d35Sstevel 	return (0);
246603831d35Sstevel }
246703831d35Sstevel 
246803831d35Sstevel 
246903831d35Sstevel /*
247003831d35Sstevel  * man_proto - handle DLPI protocol requests issued from upstream.
247103831d35Sstevel  * Called by man_uwsrv().  We disassociate upper and lower multiplexor
247203831d35Sstevel  * DLPI state transitions. The upper stream here (manstr_t) transitions
247303831d35Sstevel  * appropriately, saves the DLPI requests via man_dlpi(), and then
247403831d35Sstevel  * arranges for the DLPI request to be sent down via man_dlpi_senddown() if
247503831d35Sstevel  * appropriate.
247603831d35Sstevel  *
247703831d35Sstevel  *	wq - upper write queue of mxx
247803831d35Sstevel  *	mp - mbl ptr to protocol request
247903831d35Sstevel  */
248003831d35Sstevel static int
man_proto(queue_t * wq,mblk_t * mp)248103831d35Sstevel man_proto(queue_t *wq, mblk_t *mp)
248203831d35Sstevel {
248303831d35Sstevel 	union DL_primitives	*dlp;
248403831d35Sstevel 	int			flow_status = 0;
248503831d35Sstevel 
248603831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
248703831d35Sstevel 
248803831d35Sstevel 	MAN_DBG((MAN_UWSRV | MAN_DLPI),
248903831d35Sstevel 	    ("man_proto: mp(0x%p) prim(%s)\n", (void *)mp,
249003831d35Sstevel 	    dps[dlp->dl_primitive]));
249103831d35Sstevel 
249203831d35Sstevel 	switch (dlp->dl_primitive) {
249303831d35Sstevel 	case DL_UNITDATA_REQ:
249403831d35Sstevel 		flow_status = man_udreq(wq, mp);
249503831d35Sstevel 		break;
249603831d35Sstevel 
249703831d35Sstevel 	case DL_ATTACH_REQ:
249803831d35Sstevel 		man_areq(wq, mp);
249903831d35Sstevel 		break;
250003831d35Sstevel 
250103831d35Sstevel 	case DL_DETACH_REQ:
250203831d35Sstevel 		man_dreq(wq, mp);
250303831d35Sstevel 		break;
250403831d35Sstevel 
250503831d35Sstevel 	case DL_BIND_REQ:
250603831d35Sstevel 		man_breq(wq, mp);
250703831d35Sstevel 		break;
250803831d35Sstevel 
250903831d35Sstevel 	case DL_UNBIND_REQ:
251003831d35Sstevel 		man_ubreq(wq, mp);
251103831d35Sstevel 		break;
251203831d35Sstevel 
251303831d35Sstevel 	case DL_INFO_REQ:
251403831d35Sstevel 		man_ireq(wq, mp);
251503831d35Sstevel 		break;
251603831d35Sstevel 
251703831d35Sstevel 	case DL_PROMISCON_REQ:
251803831d35Sstevel 		man_ponreq(wq, mp);
251903831d35Sstevel 		break;
252003831d35Sstevel 
252103831d35Sstevel 	case DL_PROMISCOFF_REQ:
252203831d35Sstevel 		man_poffreq(wq, mp);
252303831d35Sstevel 		break;
252403831d35Sstevel 
252503831d35Sstevel 	case DL_ENABMULTI_REQ:
252603831d35Sstevel 		man_emreq(wq, mp);
252703831d35Sstevel 		break;
252803831d35Sstevel 
252903831d35Sstevel 	case DL_DISABMULTI_REQ:
253003831d35Sstevel 		man_dmreq(wq, mp);
253103831d35Sstevel 		break;
253203831d35Sstevel 
253303831d35Sstevel 	case DL_PHYS_ADDR_REQ:
253403831d35Sstevel 		man_pareq(wq, mp);
253503831d35Sstevel 		break;
253603831d35Sstevel 
253703831d35Sstevel 	case DL_SET_PHYS_ADDR_REQ:
253803831d35Sstevel 		man_spareq(wq, mp);
253903831d35Sstevel 		break;
254003831d35Sstevel 
254103831d35Sstevel 	default:
254203831d35Sstevel 		MAN_DBG((MAN_UWSRV | MAN_DLPI), ("man_proto: prim(%d)\n",
254303831d35Sstevel 		    dlp->dl_primitive));
254403831d35Sstevel 		dlerrorack(wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0);
254503831d35Sstevel 		break;
254603831d35Sstevel 
254703831d35Sstevel 	} /* End switch */
254803831d35Sstevel 
254903831d35Sstevel 	MAN_DBG((MAN_UWSRV | MAN_DLPI), ("man_proto: exit\n"));
255003831d35Sstevel 	return (flow_status);
255103831d35Sstevel 
255203831d35Sstevel }
255303831d35Sstevel 
255403831d35Sstevel static int
man_udreq(queue_t * wq,mblk_t * mp)255503831d35Sstevel man_udreq(queue_t *wq, mblk_t *mp)
255603831d35Sstevel {
255703831d35Sstevel 	manstr_t		*msp;
255803831d35Sstevel 	dl_unitdata_req_t	*dludp;
255903831d35Sstevel 	mblk_t	*nmp;
256003831d35Sstevel 	man_dladdr_t		*dlap;
256103831d35Sstevel 	t_uscalar_t 		off, len;
256203831d35Sstevel 	int 			flow_status = 0;
256303831d35Sstevel 
256403831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
256503831d35Sstevel 
256603831d35Sstevel 
256703831d35Sstevel 	if (msp->ms_dlpistate != DL_IDLE) {
256803831d35Sstevel 		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
256903831d35Sstevel 		return (flow_status);
257003831d35Sstevel 	}
257103831d35Sstevel 	dludp = (dl_unitdata_req_t *)mp->b_rptr;
257203831d35Sstevel 	off = dludp->dl_dest_addr_offset;
257303831d35Sstevel 	len = dludp->dl_dest_addr_length;
257403831d35Sstevel 
257503831d35Sstevel 	/*
257603831d35Sstevel 	 * Validate destination address format.
257703831d35Sstevel 	 */
257803831d35Sstevel 	if (!MBLKIN(mp, off, len) || (len != MAN_ADDRL)) {
257903831d35Sstevel 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADADDR, 0);
258003831d35Sstevel 		return (flow_status);
258103831d35Sstevel 	}
258203831d35Sstevel 
258303831d35Sstevel 	/*
258403831d35Sstevel 	 * Error if no M_DATA follows.
258503831d35Sstevel 	 */
258603831d35Sstevel 	nmp = mp->b_cont;
258703831d35Sstevel 	if (nmp == NULL) {
258803831d35Sstevel 		dluderrorind(wq, mp, mp->b_rptr + off, len, DL_BADDATA, 0);
258903831d35Sstevel 		return (flow_status);
259003831d35Sstevel 	}
259103831d35Sstevel 
259203831d35Sstevel 	dlap = (man_dladdr_t *)(mp->b_rptr + off);
259303831d35Sstevel 
259403831d35Sstevel 	flow_status = man_start(wq, mp, &dlap->dl_phys);
259503831d35Sstevel 	return (flow_status);
259603831d35Sstevel }
259703831d35Sstevel 
259803831d35Sstevel /*
259903831d35Sstevel  * Handle DL_ATTACH_REQ.
260003831d35Sstevel  */
260103831d35Sstevel static void
man_areq(queue_t * wq,mblk_t * mp)260203831d35Sstevel man_areq(queue_t *wq, mblk_t *mp)
260303831d35Sstevel {
260403831d35Sstevel 	man_t			*manp;	/* per instance data */
260503831d35Sstevel 	manstr_t		*msp;	/* per stream data */
260603831d35Sstevel 	short			ppa;
260703831d35Sstevel 	union DL_primitives	*dlp;
260803831d35Sstevel 	mblk_t			*preq = NULL;
260903831d35Sstevel 	int			did_refcnt = FALSE;
261003831d35Sstevel 	int			dlerror = 0;
261103831d35Sstevel 	int			status = 0;
261203831d35Sstevel 
261303831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
261403831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
261503831d35Sstevel 
261603831d35Sstevel 	/*
261703831d35Sstevel 	 * Attach us to MAN PPA (device instance).
261803831d35Sstevel 	 */
261903831d35Sstevel 	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
262003831d35Sstevel 		dlerror = DL_BADPRIM;
262103831d35Sstevel 		goto exit;
262203831d35Sstevel 	}
262303831d35Sstevel 
262403831d35Sstevel 	if (msp->ms_dlpistate != DL_UNATTACHED) {
262503831d35Sstevel 		dlerror = DL_OUTSTATE;
262603831d35Sstevel 		goto exit;
262703831d35Sstevel 	}
262803831d35Sstevel 
262903831d35Sstevel 	ppa = dlp->attach_req.dl_ppa;
263003831d35Sstevel 	if (ppa == -1 || qassociate(wq, ppa) != 0) {
263103831d35Sstevel 		dlerror = DL_BADPPA;
263203831d35Sstevel 		MAN_DBG(MAN_WARN, ("man_areq: bad PPA %d", ppa));
263303831d35Sstevel 		goto exit;
263403831d35Sstevel 	}
263503831d35Sstevel 
263603831d35Sstevel 	mutex_enter(&man_lock);
263703831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, ppa);
263803831d35Sstevel 	ASSERT(manp != NULL);	/* qassociate() succeeded */
263903831d35Sstevel 
264003831d35Sstevel 	manp->man_refcnt++;
264103831d35Sstevel 	did_refcnt = TRUE;
264203831d35Sstevel 	mutex_exit(&man_lock);
264303831d35Sstevel 
264403831d35Sstevel 	/*
264503831d35Sstevel 	 * Create a DL replay list for the lower stream. These wont
264603831d35Sstevel 	 * actually be sent down until the lower streams are made active
264703831d35Sstevel 	 * (sometime after the call to man_init_dests below).
264803831d35Sstevel 	 */
264903831d35Sstevel 	preq = man_alloc_physreq_mp(&manp->man_eaddr);
265003831d35Sstevel 	if (preq == NULL) {
265103831d35Sstevel 		dlerror = DL_SYSERR;
265203831d35Sstevel 		status = ENOMEM;
265303831d35Sstevel 		goto exit;
265403831d35Sstevel 	}
265503831d35Sstevel 
265603831d35Sstevel 	/*
265703831d35Sstevel 	 * Make copy for dlpi resync of upper and lower streams.
265803831d35Sstevel 	 */
265903831d35Sstevel 	if (man_dlpi(msp, mp)) {
266003831d35Sstevel 		dlerror = DL_SYSERR;
266103831d35Sstevel 		status = ENOMEM;
266203831d35Sstevel 		goto exit;
266303831d35Sstevel 	}
266403831d35Sstevel 
266503831d35Sstevel 	/* TBD - need to clean off ATTACH req on failure here. */
266603831d35Sstevel 	if (man_dlpi(msp, preq)) {
266703831d35Sstevel 		dlerror = DL_SYSERR;
266803831d35Sstevel 		status = ENOMEM;
266903831d35Sstevel 		goto exit;
267003831d35Sstevel 	}
267103831d35Sstevel 
267203831d35Sstevel 	/*
267303831d35Sstevel 	 * man_init_dests/man_start_dest needs these set before call.
267403831d35Sstevel 	 */
267503831d35Sstevel 	msp->ms_manp = manp;
267603831d35Sstevel 	msp->ms_meta_ppa = ppa;
267703831d35Sstevel 
267803831d35Sstevel 	/*
267903831d35Sstevel 	 *  Allocate and init lower destination structures.
268003831d35Sstevel 	 */
268103831d35Sstevel 	ASSERT(msp->ms_dests == NULL);
268203831d35Sstevel 	if (man_init_dests(manp, msp)) {
268303831d35Sstevel 		mblk_t	 *tmp;
268403831d35Sstevel 
268503831d35Sstevel 		/*
268603831d35Sstevel 		 * If we cant get the lower streams ready, then
268703831d35Sstevel 		 * remove the messages from the DL replay list and
268803831d35Sstevel 		 * fail attach.
268903831d35Sstevel 		 */
269003831d35Sstevel 		while ((tmp = msp->ms_dl_mp) != NULL) {
269103831d35Sstevel 			msp->ms_dl_mp = msp->ms_dl_mp->b_next;
269203831d35Sstevel 			tmp->b_next = tmp->b_prev = NULL;
269303831d35Sstevel 			freemsg(tmp);
269403831d35Sstevel 		}
269503831d35Sstevel 
269603831d35Sstevel 		msp->ms_manp = NULL;
269703831d35Sstevel 		msp->ms_meta_ppa = -1;
269803831d35Sstevel 
269903831d35Sstevel 		dlerror = DL_SYSERR;
270003831d35Sstevel 		status = ENOMEM;
270103831d35Sstevel 		goto exit;
270203831d35Sstevel 	}
270303831d35Sstevel 
270403831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_areq: ppa 0x%x man_refcnt: %d\n",
270503831d35Sstevel 	    ppa, manp->man_refcnt));
270603831d35Sstevel 
270703831d35Sstevel 	SETSTATE(msp, DL_UNBOUND);
270803831d35Sstevel 
270903831d35Sstevel exit:
271003831d35Sstevel 	if (dlerror == 0) {
271103831d35Sstevel 		dlokack(wq, mp, DL_ATTACH_REQ);
271203831d35Sstevel 	} else {
271303831d35Sstevel 		if (did_refcnt) {
271403831d35Sstevel 			mutex_enter(&man_lock);
271503831d35Sstevel 			manp->man_refcnt--;
271603831d35Sstevel 			mutex_exit(&man_lock);
271703831d35Sstevel 		}
271803831d35Sstevel 		dlerrorack(wq, mp, DL_ATTACH_REQ, dlerror, status);
271903831d35Sstevel 		(void) qassociate(wq, -1);
272003831d35Sstevel 	}
272103831d35Sstevel 	if (preq != NULL)
272203831d35Sstevel 		freemsg(preq);
272303831d35Sstevel 
272403831d35Sstevel }
272503831d35Sstevel 
272603831d35Sstevel /*
272703831d35Sstevel  * Called at DL_ATTACH time.
272803831d35Sstevel  * Man_lock is held to protect pathgroup list(man_pg).
272903831d35Sstevel  */
273003831d35Sstevel static int
man_init_dests(man_t * manp,manstr_t * msp)273103831d35Sstevel man_init_dests(man_t *manp, manstr_t *msp)
273203831d35Sstevel {
273303831d35Sstevel 	man_dest_t	*mdp;
273403831d35Sstevel 	man_pg_t	*mpg;
273503831d35Sstevel 	int		i;
273603831d35Sstevel 
273703831d35Sstevel 	mdp = man_kzalloc(MAN_DEST_ARRAY_SIZE, KM_NOSLEEP);
273803831d35Sstevel 	if (mdp == NULL)
273903831d35Sstevel 		return (ENOMEM);
274003831d35Sstevel 
274103831d35Sstevel 	msp->ms_dests = mdp;
274203831d35Sstevel 
274303831d35Sstevel 	mutex_enter(&man_lock);
274403831d35Sstevel 	for (i = 0; i < MAN_MAX_DESTS; i++) {
274503831d35Sstevel 
274603831d35Sstevel 		mdp[i].md_muxid = -1;	/* muxid 0 is valid */
274703831d35Sstevel 		mutex_init(&mdp->md_lock, NULL, MUTEX_DRIVER, NULL);
274803831d35Sstevel 
274903831d35Sstevel 		mpg = man_find_pg_by_id(manp->man_pg, i);
275003831d35Sstevel 
275103831d35Sstevel 		if (mpg && man_find_active_path(mpg->mpg_pathp))
275203831d35Sstevel 			man_start_dest(&mdp[i], msp, mpg);
275303831d35Sstevel 	}
275403831d35Sstevel 	mutex_exit(&man_lock);
275503831d35Sstevel 
275603831d35Sstevel 	return (0);
275703831d35Sstevel }
275803831d35Sstevel 
275903831d35Sstevel /*
276003831d35Sstevel  * Get a destination ready for use.
276103831d35Sstevel  */
276203831d35Sstevel static void
man_start_dest(man_dest_t * mdp,manstr_t * msp,man_pg_t * mpg)276303831d35Sstevel man_start_dest(man_dest_t *mdp, manstr_t *msp, man_pg_t *mpg)
276403831d35Sstevel {
276503831d35Sstevel 	man_path_t	*ap;
276603831d35Sstevel 
276703831d35Sstevel 	mdp->md_muxid = -1;
276803831d35Sstevel 	mdp->md_dlpistate = DL_UNATTACHED;
276903831d35Sstevel 	mdp->md_msp = msp;
277003831d35Sstevel 	mdp->md_rq = msp->ms_rq;
277103831d35Sstevel 	mdp->md_pg_id = mpg->mpg_pg_id;
277203831d35Sstevel 
277303831d35Sstevel 	ASSERT(msp->ms_manp);
277403831d35Sstevel 
277503831d35Sstevel 	ether_copy(&msp->ms_manp->man_eaddr, &mdp->md_src_eaddr);
277603831d35Sstevel 	ether_copy(&mpg->mpg_dst_eaddr, &mdp->md_dst_eaddr);
277703831d35Sstevel 
277803831d35Sstevel 	ap = man_find_active_path(mpg->mpg_pathp);
277903831d35Sstevel 	ASSERT(ap);
278003831d35Sstevel 	mdp->md_device = ap->mp_device;
278103831d35Sstevel 
278203831d35Sstevel 	/*
278303831d35Sstevel 	 * Set up linktimers so that first time through, we will do
278403831d35Sstevel 	 * a failover.
278503831d35Sstevel 	 */
278603831d35Sstevel 	mdp->md_linkstate = MAN_LINKFAIL;
278703831d35Sstevel 	mdp->md_state = MAN_DSTATE_INITIALIZING;
278803831d35Sstevel 	mdp->md_lc_timer_id = qtimeout(man_ctl_wq, man_linkcheck_timer,
278903831d35Sstevel 	    (void *)mdp, man_gettimer(MAN_TIMER_INIT, mdp));
279003831d35Sstevel 
279103831d35Sstevel 	/*
279203831d35Sstevel 	 * As an optimization, if there is only one destination,
279303831d35Sstevel 	 * remember the destination pointer. Used by man_start().
279403831d35Sstevel 	 */
279503831d35Sstevel 	man_set_optimized_dest(msp);
279603831d35Sstevel 
279703831d35Sstevel 	MAN_DBG(MAN_DEST, ("man_start_dest: mdp"));
279803831d35Sstevel 	MAN_DBGCALL(MAN_DEST, man_print_mdp(mdp));
279903831d35Sstevel }
280003831d35Sstevel 
280103831d35Sstevel static void
man_set_optimized_dest(manstr_t * msp)280203831d35Sstevel man_set_optimized_dest(manstr_t *msp)
280303831d35Sstevel {
280403831d35Sstevel 	int		count = 0;
280503831d35Sstevel 	int		i;
280603831d35Sstevel 	man_dest_t	*mdp = NULL;
280703831d35Sstevel 
280803831d35Sstevel 	for (i = 0; i < MAN_MAX_DESTS; i++) {
280903831d35Sstevel 		if (msp->ms_dests[i].md_msp != NULL) {
281003831d35Sstevel 			count++;
281103831d35Sstevel 			mdp = &msp->ms_dests[i];
281203831d35Sstevel 		}
281303831d35Sstevel 	}
281403831d35Sstevel 
281503831d35Sstevel 	if (count == 1)
281603831d35Sstevel 		msp->ms_destp = mdp;
281703831d35Sstevel 	else
281803831d35Sstevel 		msp->ms_destp = NULL;
281903831d35Sstevel 
282003831d35Sstevel }
282103831d35Sstevel 
282203831d35Sstevel /*
282303831d35Sstevel  * Catch dlpi message for replaying, and arrange to send it down
282403831d35Sstevel  * to any destinations not PLUMBING. See man_dlpi_replay().
282503831d35Sstevel  */
282603831d35Sstevel static int
man_dlpi(manstr_t * msp,mblk_t * mp)282703831d35Sstevel man_dlpi(manstr_t *msp, mblk_t *mp)
282803831d35Sstevel {
282903831d35Sstevel 	int	status;
283003831d35Sstevel 
283103831d35Sstevel 	status = man_dl_catch(&msp->ms_dl_mp, mp);
283203831d35Sstevel 	if (status == 0)
283303831d35Sstevel 		status = man_dlpi_senddown(msp, mp);
283403831d35Sstevel 
283503831d35Sstevel 	return (status);
283603831d35Sstevel }
283703831d35Sstevel 
283803831d35Sstevel /*
283903831d35Sstevel  * Catch IOCTL type DL_ messages.
284003831d35Sstevel  */
284103831d35Sstevel static int
man_dlioc(manstr_t * msp,mblk_t * mp)284203831d35Sstevel man_dlioc(manstr_t *msp, mblk_t *mp)
284303831d35Sstevel {
284403831d35Sstevel 	int status;
284503831d35Sstevel 
284603831d35Sstevel 	status = man_dl_catch(&msp->ms_dlioc_mp, mp);
284703831d35Sstevel 	if (status == 0)
284803831d35Sstevel 		status = man_dlpi_senddown(msp, mp);
284903831d35Sstevel 
285003831d35Sstevel 	return (status);
285103831d35Sstevel }
285203831d35Sstevel 
285303831d35Sstevel /*
285403831d35Sstevel  * We catch all DLPI messages that we have to resend to a new AP'ed
285503831d35Sstevel  * device to put him in the right state.  We link these messages together
285603831d35Sstevel  * w/ their b_next fields and hang it off of msp->ms_dl_mp.  We
285703831d35Sstevel  * must be careful to restore b_next fields before doing dupmsg/freemsg!
285803831d35Sstevel  *
285903831d35Sstevel  *	msp - pointer of stream struct to process
286003831d35Sstevel  *	mblk - pointer to DLPI request to catch
286103831d35Sstevel  */
286203831d35Sstevel static int
man_dl_catch(mblk_t ** mplist,mblk_t * mp)286303831d35Sstevel man_dl_catch(mblk_t **mplist, mblk_t *mp)
286403831d35Sstevel {
286503831d35Sstevel 	mblk_t			*dupmp;
286603831d35Sstevel 	mblk_t			*tmp;
286703831d35Sstevel 	unsigned		prim;
286803831d35Sstevel 	int			status = 0;
286903831d35Sstevel 
287003831d35Sstevel 	dupmp = copymsg(mp);
287103831d35Sstevel 	if (dupmp == NULL) {
287203831d35Sstevel 		status = ENOMEM;
287303831d35Sstevel 		goto exit;
287403831d35Sstevel 	}
287503831d35Sstevel 
287603831d35Sstevel 
287703831d35Sstevel 	if (*mplist == NULL)
287803831d35Sstevel 		*mplist = dupmp;
287903831d35Sstevel 	else {
288003831d35Sstevel 		for (tmp = *mplist; tmp->b_next; )
288103831d35Sstevel 			tmp = tmp->b_next;
288203831d35Sstevel 
288303831d35Sstevel 		tmp->b_next = dupmp;
288403831d35Sstevel 	}
288503831d35Sstevel 
288603831d35Sstevel 	prim = DL_PRIM(mp);
288703831d35Sstevel 	MAN_DBG(MAN_DLPI,
288803831d35Sstevel 	    ("man_dl_catch: adding %s\n",
288903831d35Sstevel 	    (prim == DL_IOC_HDR_INFO) ? "DL_IOC_HDR_INFO" :
289003831d35Sstevel 	    (prim == DLIOCRAW) ? "DLIOCRAW" :
289103831d35Sstevel 	    (prim == DL_PROMISCON_REQ) ? promisc[DL_PROMISCON_TYPE(mp)] :
289203831d35Sstevel 	    dps[prim]));
289303831d35Sstevel 
289403831d35Sstevel exit:
289503831d35Sstevel 
289603831d35Sstevel 	return (status);
289703831d35Sstevel }
289803831d35Sstevel 
289903831d35Sstevel /*
290003831d35Sstevel  * Send down a single DLPI M_[PC]PROTO to all currently valid dests.
290103831d35Sstevel  *
290203831d35Sstevel  *	msp - ptr to NDM stream structure DL_ messages was received on.
290303831d35Sstevel  *	mp - ptr to mblk containing DL_ request.
290403831d35Sstevel  */
290503831d35Sstevel static int
man_dlpi_senddown(manstr_t * msp,mblk_t * mp)290603831d35Sstevel man_dlpi_senddown(manstr_t *msp, mblk_t *mp)
290703831d35Sstevel {
290803831d35Sstevel 	man_dest_t	*mdp;
290903831d35Sstevel 	int		i;
291003831d35Sstevel 	mblk_t		*rmp[MAN_MAX_DESTS];	/* Copy to replay */
291103831d35Sstevel 	int		dstate[MAN_MAX_DESTS];
291203831d35Sstevel 	int		no_dests = TRUE;
291303831d35Sstevel 	int		status = 0;
291403831d35Sstevel 
291503831d35Sstevel 	if (msp->ms_dests == NULL)
291603831d35Sstevel 		goto exit;
291703831d35Sstevel 
291803831d35Sstevel 	for (i = 0; i < MAN_MAX_DESTS; i++) {
291903831d35Sstevel 		mdp = &msp->ms_dests[i];
292003831d35Sstevel 		if (mdp->md_state == MAN_DSTATE_READY) {
292103831d35Sstevel 			dstate[i] = TRUE;
292203831d35Sstevel 			no_dests = FALSE;
292303831d35Sstevel 		} else {
292403831d35Sstevel 			dstate[i] = FALSE;
292503831d35Sstevel 		}
292603831d35Sstevel 		rmp[i] = NULL;
292703831d35Sstevel 	}
292803831d35Sstevel 
292903831d35Sstevel 	if (no_dests)
293003831d35Sstevel 		goto exit;
293103831d35Sstevel 
293203831d35Sstevel 	/*
293303831d35Sstevel 	 * Build replay and duplicate list for all possible destinations.
293403831d35Sstevel 	 */
293503831d35Sstevel 	for (i = 0; i < MAN_MAX_DESTS; i++) {
293603831d35Sstevel 		if (dstate[i]) {
293703831d35Sstevel 			rmp[i] = copymsg(mp);
293803831d35Sstevel 			if (rmp[i] == NULL) {
293903831d35Sstevel 				status = ENOMEM;
294003831d35Sstevel 				break;
294103831d35Sstevel 			}
294203831d35Sstevel 		}
294303831d35Sstevel 	}
294403831d35Sstevel 
294503831d35Sstevel 	if (status == 0) {
294603831d35Sstevel 		for (i = 0; i < MAN_MAX_DESTS; i++)
294703831d35Sstevel 			if (dstate[i]) {
294803831d35Sstevel 				mdp = &msp->ms_dests[i];
294903831d35Sstevel 
295003831d35Sstevel 				ASSERT(mdp->md_wq != NULL);
295103831d35Sstevel 				ASSERT(mp->b_next == NULL);
295203831d35Sstevel 				ASSERT(mp->b_prev == NULL);
295303831d35Sstevel 
295403831d35Sstevel 				man_dlpi_replay(mdp, rmp[i]);
295503831d35Sstevel 			}
295603831d35Sstevel 	} else {
295703831d35Sstevel 		for (; i >= 0; i--)
295803831d35Sstevel 			if (dstate[i] && rmp[i])
295903831d35Sstevel 				freemsg(rmp[i]);
296003831d35Sstevel 	}
296103831d35Sstevel 
296203831d35Sstevel exit:
296303831d35Sstevel 	return (status);
296403831d35Sstevel }
296503831d35Sstevel 
296603831d35Sstevel /*
296703831d35Sstevel  * man_dlpi_replay - traverse the list of DLPI requests and reapply them to
296803831d35Sstevel  * get the upper and lower streams into the same state. Called holding inner
296903831d35Sstevel  * perimeter lock exclusive. Note thet we defer M_IOCTL type dlpi messages
297003831d35Sstevel  * until we get an OK_ACK to our ATTACH (see man_lrsrv and
297103831d35Sstevel  * man_dlioc_replay).
297203831d35Sstevel  *
297303831d35Sstevel  * 	mdp - pointer to lower queue (destination)
297403831d35Sstevel  *	rmp - list of mblks to send down stream.
297503831d35Sstevel  */
297603831d35Sstevel static void
man_dlpi_replay(man_dest_t * mdp,mblk_t * rmp)297703831d35Sstevel man_dlpi_replay(man_dest_t *mdp, mblk_t *rmp)
297803831d35Sstevel {
297903831d35Sstevel 	mblk_t			*mp;
298003831d35Sstevel 	union DL_primitives	*dlp = NULL;
298103831d35Sstevel 
298203831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_dlpi_replay: mdp(0x%p)", (void *)mdp));
298303831d35Sstevel 
298403831d35Sstevel 	while (rmp) {
298503831d35Sstevel 		mp = rmp;
298603831d35Sstevel 		rmp = rmp->b_next;
298703831d35Sstevel 		mp->b_prev = mp->b_next = NULL;
298803831d35Sstevel 
298903831d35Sstevel 		dlp = (union DL_primitives *)mp->b_rptr;
299003831d35Sstevel 		MAN_DBG(MAN_DLPI,
299103831d35Sstevel 		    ("man_dlpi_replay: mdp(0x%p) sending %s\n",
299203831d35Sstevel 		    (void *)mdp,
299303831d35Sstevel 		    (dlp->dl_primitive == DL_IOC_HDR_INFO) ?
299403831d35Sstevel 		    "DL_IOC_HDR_INFO" : (dlp->dl_primitive == DLIOCRAW) ?
299503831d35Sstevel 		    "DLIOCRAW" : dps[(unsigned)(dlp->dl_primitive)]));
299603831d35Sstevel 
299703831d35Sstevel 		if (dlp->dl_primitive == DL_ATTACH_REQ) {
299803831d35Sstevel 			/*
299903831d35Sstevel 			 * insert the lower devices ppa.
300003831d35Sstevel 			 */
300103831d35Sstevel 			dlp->attach_req.dl_ppa = mdp->md_device.mdev_ppa;
300203831d35Sstevel 		}
300303831d35Sstevel 
300403831d35Sstevel 		(void) putnext(mdp->md_wq, mp);
300503831d35Sstevel 	}
300603831d35Sstevel 
300703831d35Sstevel }
300803831d35Sstevel 
300903831d35Sstevel static void
man_dreq(queue_t * wq,mblk_t * mp)301003831d35Sstevel man_dreq(queue_t *wq, mblk_t *mp)
301103831d35Sstevel {
301203831d35Sstevel 	manstr_t	*msp;	/* per stream data */
301303831d35Sstevel 	man_work_t	*wp;
301403831d35Sstevel 
301503831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
301603831d35Sstevel 
301703831d35Sstevel 	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
301803831d35Sstevel 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
301903831d35Sstevel 		return;
302003831d35Sstevel 	}
302103831d35Sstevel 
302203831d35Sstevel 	if (msp->ms_dlpistate != DL_UNBOUND) {
302303831d35Sstevel 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
302403831d35Sstevel 		return;
302503831d35Sstevel 	}
302603831d35Sstevel 
302703831d35Sstevel 	ASSERT(msp->ms_dests != NULL);
302803831d35Sstevel 
302903831d35Sstevel 	wp = man_work_alloc(MAN_WORK_CLOSE_STREAM, KM_NOSLEEP);
303003831d35Sstevel 	if (wp == NULL) {
303103831d35Sstevel 		dlerrorack(wq, mp, DL_DETACH_REQ, DL_SYSERR, ENOMEM);
303203831d35Sstevel 		return;
303303831d35Sstevel 	}
303403831d35Sstevel 	man_dodetach(msp, wp);
303503831d35Sstevel 	(void) qassociate(wq, -1);
303603831d35Sstevel 
303703831d35Sstevel 	SETSTATE(msp, DL_UNATTACHED);
303803831d35Sstevel 
303903831d35Sstevel 	dlokack(wq, mp, DL_DETACH_REQ);
304003831d35Sstevel }
304103831d35Sstevel 
304203831d35Sstevel static void
man_dl_clean(mblk_t ** mplist)304303831d35Sstevel man_dl_clean(mblk_t **mplist)
304403831d35Sstevel {
304503831d35Sstevel 	mblk_t	*tmp;
304603831d35Sstevel 
304703831d35Sstevel 	/*
304803831d35Sstevel 	 * Toss everything.
304903831d35Sstevel 	 */
305003831d35Sstevel 	while ((tmp = *mplist) != NULL) {
305103831d35Sstevel 		*mplist = (*mplist)->b_next;
305203831d35Sstevel 		tmp->b_next = tmp->b_prev = NULL;
305303831d35Sstevel 		freemsg(tmp);
305403831d35Sstevel 	}
305503831d35Sstevel 
305603831d35Sstevel }
305703831d35Sstevel 
305803831d35Sstevel /*
305903831d35Sstevel  * man_dl_release - Remove the corresponding DLPI request from the
306003831d35Sstevel  * catch list. Walk thru the catch list looking for the other half of
306103831d35Sstevel  * the pair and delete it.  If we are detaching, delete the entire list.
306203831d35Sstevel  *
306303831d35Sstevel  *	msp - pointer of stream struct to process
306403831d35Sstevel  *	mp  - pointer to mblk to first half of pair.  We will delete other
306503831d35Sstevel  * 		half of pair based on this.
306603831d35Sstevel  */
306703831d35Sstevel static void
man_dl_release(mblk_t ** mplist,mblk_t * mp)306803831d35Sstevel man_dl_release(mblk_t **mplist, mblk_t *mp)
306903831d35Sstevel {
307003831d35Sstevel 	uchar_t			match_dbtype;
307103831d35Sstevel 	mblk_t			*tmp;
307203831d35Sstevel 	mblk_t			*tmpp;
307303831d35Sstevel 	int			matched = FALSE;
307403831d35Sstevel 
307503831d35Sstevel 	if (*mplist == NULL)
307603831d35Sstevel 		goto exit;
307703831d35Sstevel 
307803831d35Sstevel 	match_dbtype = DB_TYPE(mp);
307903831d35Sstevel 
308003831d35Sstevel 	/*
308103831d35Sstevel 	 * Currently we only clean DL_ PROTO type messages. There is
308203831d35Sstevel 	 * no way to turn off M_CTL or DL_IOC stuff other than sending
308303831d35Sstevel 	 * down a DL_DETACH, which resets everything.
308403831d35Sstevel 	 */
308503831d35Sstevel 	if (match_dbtype != M_PROTO && match_dbtype != M_PCPROTO) {
308603831d35Sstevel 		goto exit;
308703831d35Sstevel 	}
308803831d35Sstevel 
308903831d35Sstevel 	/*
309003831d35Sstevel 	 * Selectively find a caught mblk that matches this one and
309103831d35Sstevel 	 * remove it from the list
309203831d35Sstevel 	 */
309303831d35Sstevel 	tmp = tmpp = *mplist;
309403831d35Sstevel 	matched = man_match_proto(mp, tmp);
309503831d35Sstevel 	if (matched) {
309603831d35Sstevel 		*mplist = tmp->b_next;
309703831d35Sstevel 		tmp->b_next = tmp->b_prev = NULL;
309803831d35Sstevel 	} else {
309903831d35Sstevel 		for (tmp = tmp->b_next; tmp != NULL; tmp = tmp->b_next) {
310003831d35Sstevel 			if (matched = man_match_proto(mp, tmp))
310103831d35Sstevel 				break;
310203831d35Sstevel 			tmpp = tmp;
310303831d35Sstevel 		}
310403831d35Sstevel 
310503831d35Sstevel 		if (matched) {
310603831d35Sstevel 			tmpp->b_next = tmp->b_next;
310703831d35Sstevel 			tmp->b_next = tmp->b_prev = NULL;
310803831d35Sstevel 		}
310903831d35Sstevel 	}
311003831d35Sstevel 
311103831d35Sstevel exit:
311203831d35Sstevel 	if (matched) {
311303831d35Sstevel 
311403831d35Sstevel 		MAN_DBG(MAN_DLPI, ("man_dl_release: release %s",
311503831d35Sstevel 		    (DL_PRIM(mp) == DL_IOC_HDR_INFO) ? "DL_IOC_HDR_INFO" :
311603831d35Sstevel 		    (DL_PRIM(mp) == DLIOCRAW) ? "DLIOCRAW" :
311703831d35Sstevel 		    dps[(int)DL_PRIM(mp)]));
311803831d35Sstevel 
311903831d35Sstevel 		freemsg(tmp);
312003831d35Sstevel 	}
312103831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_dl_release: returns"));
312203831d35Sstevel 
312303831d35Sstevel }
312403831d35Sstevel 
312503831d35Sstevel /*
312603831d35Sstevel  * Compare two DL_ messages. If they are complimentary (e.g. DL_UNBIND
312703831d35Sstevel  * compliments DL_BIND), return true.
312803831d35Sstevel  */
312903831d35Sstevel static int
man_match_proto(mblk_t * mp1,mblk_t * mp2)313003831d35Sstevel man_match_proto(mblk_t *mp1, mblk_t *mp2)
313103831d35Sstevel {
313203831d35Sstevel 	t_uscalar_t	prim1;
313303831d35Sstevel 	t_uscalar_t	prim2;
313403831d35Sstevel 	int		matched = FALSE;
313503831d35Sstevel 
313603831d35Sstevel 	/*
313703831d35Sstevel 	 * Primitive to clean off list.
313803831d35Sstevel 	 */
313903831d35Sstevel 	prim1 = DL_PRIM(mp1);
314003831d35Sstevel 	prim2 = DL_PRIM(mp2);
314103831d35Sstevel 
314203831d35Sstevel 	switch (prim1) {
314303831d35Sstevel 	case DL_UNBIND_REQ:
314403831d35Sstevel 		if (prim2 == DL_BIND_REQ)
314503831d35Sstevel 			matched = TRUE;
314603831d35Sstevel 		break;
314703831d35Sstevel 
314803831d35Sstevel 	case DL_PROMISCOFF_REQ:
314903831d35Sstevel 		if (prim2 == DL_PROMISCON_REQ) {
315003831d35Sstevel 			dl_promiscoff_req_t	*poff1;
315103831d35Sstevel 			dl_promiscoff_req_t	*poff2;
315203831d35Sstevel 
315303831d35Sstevel 			poff1 = (dl_promiscoff_req_t *)mp1->b_rptr;
315403831d35Sstevel 			poff2 = (dl_promiscoff_req_t *)mp2->b_rptr;
315503831d35Sstevel 
315603831d35Sstevel 			if (poff1->dl_level == poff2->dl_level)
315703831d35Sstevel 				matched = TRUE;
315803831d35Sstevel 		}
315903831d35Sstevel 		break;
316003831d35Sstevel 
316103831d35Sstevel 	case DL_DISABMULTI_REQ:
316203831d35Sstevel 		if (prim2 == DL_ENABMULTI_REQ) {
316303831d35Sstevel 			union DL_primitives	*dlp;
316403831d35Sstevel 			t_uscalar_t		off;
316503831d35Sstevel 			eaddr_t			*addrp1;
316603831d35Sstevel 			eaddr_t			*addrp2;
316703831d35Sstevel 
316803831d35Sstevel 			dlp = (union DL_primitives *)mp1->b_rptr;
316903831d35Sstevel 			off = dlp->disabmulti_req.dl_addr_offset;
317003831d35Sstevel 			addrp1 = (eaddr_t *)(mp1->b_rptr + off);
317103831d35Sstevel 
317203831d35Sstevel 			dlp = (union DL_primitives *)mp2->b_rptr;
317303831d35Sstevel 			off = dlp->disabmulti_req.dl_addr_offset;
317403831d35Sstevel 			addrp2 = (eaddr_t *)(mp2->b_rptr + off);
317503831d35Sstevel 
317603831d35Sstevel 			if (ether_cmp(addrp1, addrp2) == 0)
317703831d35Sstevel 				matched = 1;
317803831d35Sstevel 		}
317903831d35Sstevel 		break;
318003831d35Sstevel 
318103831d35Sstevel 	default:
318203831d35Sstevel 		break;
318303831d35Sstevel 	}
318403831d35Sstevel 
318503831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_match_proto returns %d", matched));
318603831d35Sstevel 
318703831d35Sstevel 	return (matched);
318803831d35Sstevel }
318903831d35Sstevel 
319003831d35Sstevel /*
319103831d35Sstevel  * Bind upper stream to a particular SAP. Called with exclusive innerperim
319203831d35Sstevel  * QPAIR, shared outerperim.
319303831d35Sstevel  */
319403831d35Sstevel static void
man_breq(queue_t * wq,mblk_t * mp)319503831d35Sstevel man_breq(queue_t *wq, mblk_t *mp)
319603831d35Sstevel {
319703831d35Sstevel 	man_t			*manp;	/* per instance data */
319803831d35Sstevel 	manstr_t		*msp;	/* per stream data */
319903831d35Sstevel 	union DL_primitives	*dlp;
320003831d35Sstevel 	man_dladdr_t		man_addr;
320103831d35Sstevel 	t_uscalar_t		sap;
320203831d35Sstevel 	t_uscalar_t		xidtest;
320303831d35Sstevel 
320403831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
320503831d35Sstevel 
320603831d35Sstevel 	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
320703831d35Sstevel 		dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
320803831d35Sstevel 		return;
320903831d35Sstevel 	}
321003831d35Sstevel 
321103831d35Sstevel 	if (msp->ms_dlpistate != DL_UNBOUND) {
321203831d35Sstevel 		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
321303831d35Sstevel 		return;
321403831d35Sstevel 	}
321503831d35Sstevel 
321603831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
321703831d35Sstevel 	manp = msp->ms_manp;			/* valid after attach */
321803831d35Sstevel 	sap = dlp->bind_req.dl_sap;
321903831d35Sstevel 	xidtest = dlp->bind_req.dl_xidtest_flg;
322003831d35Sstevel 
322103831d35Sstevel 	ASSERT(manp);
322203831d35Sstevel 
322303831d35Sstevel 	if (xidtest) {
322403831d35Sstevel 		dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
322503831d35Sstevel 		return;
322603831d35Sstevel 	}
322703831d35Sstevel 
322803831d35Sstevel 	if (sap > ETHERTYPE_MAX) {
322903831d35Sstevel 		dlerrorack(wq, mp, DL_BIND_REQ, DL_BADSAP, 0);
323003831d35Sstevel 		return;
323103831d35Sstevel 	}
323203831d35Sstevel 
323303831d35Sstevel 	if (man_dlpi(msp, mp)) {
323403831d35Sstevel 		dlerrorack(wq, mp, DL_BIND_REQ, DL_SYSERR, ENOMEM);
323503831d35Sstevel 		return;
323603831d35Sstevel 	}
323703831d35Sstevel 
323803831d35Sstevel 	msp->ms_sap = sap;
323903831d35Sstevel 
324003831d35Sstevel 	SETSTATE(msp, DL_IDLE);
324103831d35Sstevel 
324203831d35Sstevel 	man_addr.dl_sap = msp->ms_sap;
324303831d35Sstevel 	ether_copy(&msp->ms_manp->man_eaddr, &man_addr.dl_phys);
324403831d35Sstevel 
324503831d35Sstevel 	dlbindack(wq, mp, msp->ms_sap, &man_addr, MAN_ADDRL, 0, 0);
324603831d35Sstevel 
324703831d35Sstevel }
324803831d35Sstevel 
324903831d35Sstevel static void
man_ubreq(queue_t * wq,mblk_t * mp)325003831d35Sstevel man_ubreq(queue_t *wq, mblk_t *mp)
325103831d35Sstevel {
325203831d35Sstevel 	manstr_t		*msp;	/* per stream data */
325303831d35Sstevel 
325403831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
325503831d35Sstevel 
325603831d35Sstevel 	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
325703831d35Sstevel 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
325803831d35Sstevel 		return;
325903831d35Sstevel 	}
326003831d35Sstevel 
326103831d35Sstevel 	if (msp->ms_dlpistate != DL_IDLE) {
326203831d35Sstevel 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
326303831d35Sstevel 		return;
326403831d35Sstevel 	}
326503831d35Sstevel 
326603831d35Sstevel 	if (man_dlpi_senddown(msp, mp)) {
326703831d35Sstevel 		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_SYSERR, ENOMEM);
326803831d35Sstevel 		return;
326903831d35Sstevel 	}
327003831d35Sstevel 
327103831d35Sstevel 	man_dl_release(&msp->ms_dl_mp, mp);
327203831d35Sstevel 
327303831d35Sstevel 	SETSTATE(msp, DL_UNBOUND);
327403831d35Sstevel 
327503831d35Sstevel 	dlokack(wq, mp, DL_UNBIND_REQ);
327603831d35Sstevel 
327703831d35Sstevel }
327803831d35Sstevel 
327903831d35Sstevel static void
man_ireq(queue_t * wq,mblk_t * mp)328003831d35Sstevel man_ireq(queue_t *wq, mblk_t *mp)
328103831d35Sstevel {
328203831d35Sstevel 	manstr_t	*msp;
328303831d35Sstevel 	dl_info_ack_t	*dlip;
328403831d35Sstevel 	man_dladdr_t	*dlap;
328503831d35Sstevel 	eaddr_t		*ep;
328603831d35Sstevel 	size_t	size;
328703831d35Sstevel 
328803831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
328903831d35Sstevel 
329003831d35Sstevel 	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
329103831d35Sstevel 		dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
329203831d35Sstevel 		return;
329303831d35Sstevel 	}
329403831d35Sstevel 
329503831d35Sstevel 	/* Exchange current msg for a DL_INFO_ACK. */
329603831d35Sstevel 	size = sizeof (dl_info_ack_t) + MAN_ADDRL + ETHERADDRL;
329703831d35Sstevel 	mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK);
329803831d35Sstevel 	if (mp == NULL) {
329903831d35Sstevel 		MAN_DBG(MAN_DLPI, ("man_ireq: man_ireq: mp == NULL."));
330003831d35Sstevel 		return;
330103831d35Sstevel 	}
330203831d35Sstevel 
330303831d35Sstevel 	/* Fill in the DL_INFO_ACK fields and reply. */
330403831d35Sstevel 	dlip = (dl_info_ack_t *)mp->b_rptr;
330503831d35Sstevel 	*dlip = man_infoack;
330603831d35Sstevel 	dlip->dl_current_state = msp->ms_dlpistate;
330703831d35Sstevel 	dlap = (man_dladdr_t *)(mp->b_rptr + dlip->dl_addr_offset);
330803831d35Sstevel 	dlap->dl_sap = msp->ms_sap;
330903831d35Sstevel 
331003831d35Sstevel 	/*
331103831d35Sstevel 	 * If attached, return physical address.
331203831d35Sstevel 	 */
331303831d35Sstevel 	if (msp->ms_manp != NULL) {
331403831d35Sstevel 		ether_copy(&msp->ms_manp->man_eaddr, &dlap->dl_phys);
331503831d35Sstevel 	} else {
331603831d35Sstevel 		bzero((caddr_t)&dlap->dl_phys, ETHERADDRL);
331703831d35Sstevel 	}
331803831d35Sstevel 
331903831d35Sstevel 	ep = (struct ether_addr *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
332003831d35Sstevel 	ether_copy(&etherbroadcast, ep);
332103831d35Sstevel 
332203831d35Sstevel 	qreply(wq, mp);
332303831d35Sstevel 
332403831d35Sstevel }
332503831d35Sstevel 
332603831d35Sstevel 
332703831d35Sstevel static void
man_ponreq(queue_t * wq,mblk_t * mp)332803831d35Sstevel man_ponreq(queue_t *wq, mblk_t *mp)
332903831d35Sstevel {
333003831d35Sstevel 	manstr_t	*msp;
333103831d35Sstevel 	int		flag;
333203831d35Sstevel 
333303831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
333403831d35Sstevel 
333503831d35Sstevel 	if (MBLKL(mp) < DL_PROMISCON_REQ_SIZE) {
333603831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCON_REQ, DL_BADPRIM, 0);
333703831d35Sstevel 		return;
333803831d35Sstevel 	}
333903831d35Sstevel 
334003831d35Sstevel 	switch (((dl_promiscon_req_t *)mp->b_rptr)->dl_level) {
334103831d35Sstevel 	case DL_PROMISC_PHYS:
334203831d35Sstevel 		flag = MAN_SFLAG_ALLPHYS;
334303831d35Sstevel 		break;
334403831d35Sstevel 
334503831d35Sstevel 	case DL_PROMISC_SAP:
334603831d35Sstevel 		flag = MAN_SFLAG_ALLSAP;
334703831d35Sstevel 		break;
334803831d35Sstevel 
334903831d35Sstevel 	case DL_PROMISC_MULTI:
335003831d35Sstevel 		flag = MAN_SFLAG_ALLMULTI;
335103831d35Sstevel 		break;
335203831d35Sstevel 
335303831d35Sstevel 	default:
335403831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCON_REQ, DL_NOTSUPPORTED, 0);
335503831d35Sstevel 		return;
335603831d35Sstevel 	}
335703831d35Sstevel 
335803831d35Sstevel 	/*
335903831d35Sstevel 	 * Catch request for replay, and forward down to any lower
336003831d35Sstevel 	 * lower stream.
336103831d35Sstevel 	 */
336203831d35Sstevel 	if (man_dlpi(msp, mp)) {
336303831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCON_REQ, DL_SYSERR, ENOMEM);
336403831d35Sstevel 		return;
336503831d35Sstevel 	}
336603831d35Sstevel 
336703831d35Sstevel 	msp->ms_flags |= flag;
336803831d35Sstevel 
336903831d35Sstevel 	dlokack(wq, mp, DL_PROMISCON_REQ);
337003831d35Sstevel 
337103831d35Sstevel }
337203831d35Sstevel 
337303831d35Sstevel static void
man_poffreq(queue_t * wq,mblk_t * mp)337403831d35Sstevel man_poffreq(queue_t *wq, mblk_t *mp)
337503831d35Sstevel {
337603831d35Sstevel 	manstr_t		*msp;
337703831d35Sstevel 	int			flag;
337803831d35Sstevel 
337903831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
338003831d35Sstevel 
338103831d35Sstevel 	if (MBLKL(mp) < DL_PROMISCOFF_REQ_SIZE) {
338203831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCOFF_REQ, DL_BADPRIM, 0);
338303831d35Sstevel 		return;
338403831d35Sstevel 	}
338503831d35Sstevel 
338603831d35Sstevel 	switch (((dl_promiscoff_req_t *)mp->b_rptr)->dl_level) {
338703831d35Sstevel 	case DL_PROMISC_PHYS:
338803831d35Sstevel 		flag = MAN_SFLAG_ALLPHYS;
338903831d35Sstevel 		break;
339003831d35Sstevel 
339103831d35Sstevel 	case DL_PROMISC_SAP:
339203831d35Sstevel 		flag = MAN_SFLAG_ALLSAP;
339303831d35Sstevel 		break;
339403831d35Sstevel 
339503831d35Sstevel 	case DL_PROMISC_MULTI:
339603831d35Sstevel 		flag = MAN_SFLAG_ALLMULTI;
339703831d35Sstevel 		break;
339803831d35Sstevel 
339903831d35Sstevel 	default:
340003831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCOFF_REQ, DL_NOTSUPPORTED, 0);
340103831d35Sstevel 		return;
340203831d35Sstevel 	}
340303831d35Sstevel 
340403831d35Sstevel 	if ((msp->ms_flags & flag) == 0) {
340503831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCOFF_REQ, DL_NOTENAB, 0);
340603831d35Sstevel 		return;
340703831d35Sstevel 	}
340803831d35Sstevel 
340903831d35Sstevel 	if (man_dlpi_senddown(msp, mp)) {
341003831d35Sstevel 		dlerrorack(wq, mp, DL_PROMISCOFF_REQ, DL_SYSERR, ENOMEM);
341103831d35Sstevel 		return;
341203831d35Sstevel 	}
341303831d35Sstevel 
341403831d35Sstevel 	man_dl_release(&msp->ms_dl_mp, mp);
341503831d35Sstevel 
341603831d35Sstevel 	msp->ms_flags &= ~flag;
341703831d35Sstevel 
341803831d35Sstevel 	dlokack(wq, mp, DL_PROMISCOFF_REQ);
341903831d35Sstevel 
342003831d35Sstevel }
342103831d35Sstevel 
342203831d35Sstevel /*
342303831d35Sstevel  * Enable multicast requests. We might need to track addresses instead of
342403831d35Sstevel  * just passing things through (see eri_dmreq) - TBD.
342503831d35Sstevel  */
342603831d35Sstevel static void
man_emreq(queue_t * wq,mblk_t * mp)342703831d35Sstevel man_emreq(queue_t *wq, mblk_t *mp)
342803831d35Sstevel {
342903831d35Sstevel 	manstr_t		*msp;
343003831d35Sstevel 	union DL_primitives	*dlp;
343103831d35Sstevel 	eaddr_t			*addrp;
343203831d35Sstevel 	t_uscalar_t		off;
343303831d35Sstevel 	t_uscalar_t		len;
343403831d35Sstevel 
343503831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
343603831d35Sstevel 
343703831d35Sstevel 	if (MBLKL(mp) < DL_ENABMULTI_REQ_SIZE) {
343803831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_BADPRIM, 0);
343903831d35Sstevel 		return;
344003831d35Sstevel 	}
344103831d35Sstevel 
344203831d35Sstevel 	if (msp->ms_dlpistate == DL_UNATTACHED) {
344303831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_OUTSTATE, 0);
344403831d35Sstevel 		return;
344503831d35Sstevel 	}
344603831d35Sstevel 
344703831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
344803831d35Sstevel 	len = dlp->enabmulti_req.dl_addr_length;
344903831d35Sstevel 	off = dlp->enabmulti_req.dl_addr_offset;
345003831d35Sstevel 	addrp = (struct ether_addr *)(mp->b_rptr + off);
345103831d35Sstevel 
345203831d35Sstevel 	if ((len != ETHERADDRL) ||
345303831d35Sstevel 	    !MBLKIN(mp, off, len) ||
345403831d35Sstevel 	    ((addrp->ether_addr_octet[0] & 01) == 0)) {
345503831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_BADADDR, 0);
345603831d35Sstevel 		return;
345703831d35Sstevel 	}
345803831d35Sstevel 
345903831d35Sstevel 	/*
346003831d35Sstevel 	 * Catch request for replay, and forward down to any lower
346103831d35Sstevel 	 * lower stream.
346203831d35Sstevel 	 */
346303831d35Sstevel 	if (man_dlpi(msp, mp)) {
346403831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOMEM);
346503831d35Sstevel 		return;
346603831d35Sstevel 	}
346703831d35Sstevel 
346803831d35Sstevel 	dlokack(wq, mp, DL_ENABMULTI_REQ);
346903831d35Sstevel 
347003831d35Sstevel }
347103831d35Sstevel 
347203831d35Sstevel static void
man_dmreq(queue_t * wq,mblk_t * mp)347303831d35Sstevel man_dmreq(queue_t *wq, mblk_t *mp)
347403831d35Sstevel {
347503831d35Sstevel 	manstr_t		*msp;
347603831d35Sstevel 	union DL_primitives	*dlp;
347703831d35Sstevel 	eaddr_t			*addrp;
347803831d35Sstevel 	t_uscalar_t		off;
347903831d35Sstevel 	t_uscalar_t		len;
348003831d35Sstevel 
348103831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
348203831d35Sstevel 
348303831d35Sstevel 	if (MBLKL(mp) < DL_DISABMULTI_REQ_SIZE) {
348403831d35Sstevel 		dlerrorack(wq, mp, DL_DISABMULTI_REQ, DL_BADPRIM, 0);
348503831d35Sstevel 		return;
348603831d35Sstevel 	}
348703831d35Sstevel 
348803831d35Sstevel 	if (msp->ms_dlpistate == DL_UNATTACHED) {
348903831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_OUTSTATE, 0);
349003831d35Sstevel 		return;
349103831d35Sstevel 	}
349203831d35Sstevel 
349303831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
349403831d35Sstevel 	len = dlp->enabmulti_req.dl_addr_length;
349503831d35Sstevel 	off = dlp->enabmulti_req.dl_addr_offset;
349603831d35Sstevel 	addrp = (struct ether_addr *)(mp->b_rptr + off);
349703831d35Sstevel 
349803831d35Sstevel 	if ((len != ETHERADDRL) ||
349903831d35Sstevel 	    !MBLKIN(mp, off, len) ||
350003831d35Sstevel 	    ((addrp->ether_addr_octet[0] & 01) == 0)) {
350103831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_BADADDR, 0);
350203831d35Sstevel 		return;
350303831d35Sstevel 	}
350403831d35Sstevel 
350503831d35Sstevel 	if (man_dlpi_senddown(msp, mp)) {
350603831d35Sstevel 		dlerrorack(wq, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOMEM);
350703831d35Sstevel 		return;
350803831d35Sstevel 	}
350903831d35Sstevel 
351003831d35Sstevel 	man_dl_release(&msp->ms_dl_mp, mp);
351103831d35Sstevel 
351203831d35Sstevel 	dlokack(wq, mp, DL_DISABMULTI_REQ);
351303831d35Sstevel 
351403831d35Sstevel }
351503831d35Sstevel 
351603831d35Sstevel static void
man_pareq(queue_t * wq,mblk_t * mp)351703831d35Sstevel man_pareq(queue_t *wq, mblk_t *mp)
351803831d35Sstevel {
351903831d35Sstevel 	manstr_t		*msp;
352003831d35Sstevel 	union	DL_primitives	*dlp;
352103831d35Sstevel 	uint32_t		type;
352203831d35Sstevel 	struct	ether_addr	addr;
352303831d35Sstevel 
352403831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
352503831d35Sstevel 
352603831d35Sstevel 	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
352703831d35Sstevel 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
352803831d35Sstevel 		return;
352903831d35Sstevel 	}
353003831d35Sstevel 
353103831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
353203831d35Sstevel 	type = dlp->physaddr_req.dl_addr_type;
353303831d35Sstevel 	if (msp->ms_manp == NULL) {
353403831d35Sstevel 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
353503831d35Sstevel 		return;
353603831d35Sstevel 	}
353703831d35Sstevel 
353803831d35Sstevel 	switch (type) {
353903831d35Sstevel 	case	DL_FACT_PHYS_ADDR:
354003831d35Sstevel 		(void) localetheraddr((struct ether_addr *)NULL, &addr);
354103831d35Sstevel 		break;
354203831d35Sstevel 
354303831d35Sstevel 	case	DL_CURR_PHYS_ADDR:
354403831d35Sstevel 		ether_bcopy(&msp->ms_manp->man_eaddr, &addr);
354503831d35Sstevel 		break;
354603831d35Sstevel 
354703831d35Sstevel 	default:
354803831d35Sstevel 		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
354903831d35Sstevel 		return;
355003831d35Sstevel 	}
355103831d35Sstevel 
355203831d35Sstevel 	dlphysaddrack(wq, mp, &addr, ETHERADDRL);
355303831d35Sstevel }
355403831d35Sstevel 
355503831d35Sstevel /*
355603831d35Sstevel  * TBD - this routine probably should be protected w/ an ndd
355703831d35Sstevel  * tuneable, or a man.conf parameter.
355803831d35Sstevel  */
355903831d35Sstevel static void
man_spareq(queue_t * wq,mblk_t * mp)356003831d35Sstevel man_spareq(queue_t *wq, mblk_t *mp)
356103831d35Sstevel {
356203831d35Sstevel 	manstr_t		*msp;
356303831d35Sstevel 	union DL_primitives	*dlp;
356403831d35Sstevel 	t_uscalar_t		off;
356503831d35Sstevel 	t_uscalar_t		len;
356603831d35Sstevel 	eaddr_t			*addrp;
356703831d35Sstevel 
356803831d35Sstevel 	msp = (manstr_t *)wq->q_ptr;
356903831d35Sstevel 
357003831d35Sstevel 	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
357103831d35Sstevel 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
357203831d35Sstevel 		return;
357303831d35Sstevel 	}
357403831d35Sstevel 
357503831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
357603831d35Sstevel 	len = dlp->set_physaddr_req.dl_addr_length;
357703831d35Sstevel 	off = dlp->set_physaddr_req.dl_addr_offset;
357803831d35Sstevel 
357903831d35Sstevel 	if (!MBLKIN(mp, off, len)) {
358003831d35Sstevel 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
358103831d35Sstevel 		return;
358203831d35Sstevel 	}
358303831d35Sstevel 
358403831d35Sstevel 	addrp = (struct ether_addr *)(mp->b_rptr + off);
358503831d35Sstevel 
358603831d35Sstevel 	/*
358703831d35Sstevel 	 * Error if length of address isn't right or the address
358803831d35Sstevel 	 * specified is a multicast or broadcast address.
358903831d35Sstevel 	 */
359003831d35Sstevel 	if ((len != ETHERADDRL) ||
359103831d35Sstevel 	    ((addrp->ether_addr_octet[0] & 01) == 1) ||
359203831d35Sstevel 	    (ether_cmp(addrp, &etherbroadcast) == 0)) {
359303831d35Sstevel 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
359403831d35Sstevel 		return;
359503831d35Sstevel 	}
359603831d35Sstevel 	/*
359703831d35Sstevel 	 * Error if this stream is not attached to a device.
359803831d35Sstevel 	 */
359903831d35Sstevel 	if (msp->ms_manp == NULL) {
360003831d35Sstevel 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
360103831d35Sstevel 		return;
360203831d35Sstevel 	}
360303831d35Sstevel 
360403831d35Sstevel 	/*
360503831d35Sstevel 	 * We will also resend DL_SET_PHYS_ADDR_REQ for each dest
360603831d35Sstevel 	 * when it is linked under us.
360703831d35Sstevel 	 */
360803831d35Sstevel 	if (man_dlpi_senddown(msp, mp)) {
360903831d35Sstevel 		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOMEM);
361003831d35Sstevel 		return;
361103831d35Sstevel 	}
361203831d35Sstevel 
361303831d35Sstevel 	ether_copy(addrp, msp->ms_manp->man_eaddr.ether_addr_octet);
361403831d35Sstevel 
361503831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_sareq: snagged %s\n",
361603831d35Sstevel 	    ether_sprintf(&msp->ms_manp->man_eaddr)));
361703831d35Sstevel 
361803831d35Sstevel 	dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ);
361903831d35Sstevel 
362003831d35Sstevel }
362103831d35Sstevel 
362203831d35Sstevel /*
362303831d35Sstevel  * These routines make up the lower part of the MAN streams framework.
362403831d35Sstevel  */
362503831d35Sstevel 
362603831d35Sstevel /*
362703831d35Sstevel  * man_lwsrv - Deferred mblks for down stream. We end up here when
362803831d35Sstevel  * the destination is not DL_IDLE when traffic comes downstream.
362903831d35Sstevel  *
363003831d35Sstevel  *	wq - lower write queue of mxx
363103831d35Sstevel  */
363203831d35Sstevel static int
man_lwsrv(queue_t * wq)363303831d35Sstevel man_lwsrv(queue_t *wq)
363403831d35Sstevel {
363503831d35Sstevel 	mblk_t		*mp;
363603831d35Sstevel 	mblk_t		*mlistp;
363703831d35Sstevel 	man_dest_t	*mdp;
363803831d35Sstevel 	size_t		count;
363903831d35Sstevel 
364003831d35Sstevel 	mdp = (man_dest_t *)wq->q_ptr;
364103831d35Sstevel 
364203831d35Sstevel 	MAN_DBG(MAN_LWSRV, ("man_lwsrv: wq(0x%p) mdp(0x%p)"
364303831d35Sstevel 	    " md_rq(0x%p)\n", (void *)wq, (void *)mdp,
364403831d35Sstevel 	    mdp ? (void *)mdp->md_rq : NULL));
364503831d35Sstevel 
364603831d35Sstevel 	if (mdp == NULL)
364703831d35Sstevel 		goto exit;
364803831d35Sstevel 
364903831d35Sstevel 	if (mdp->md_state & MAN_DSTATE_CLOSING) {
365003831d35Sstevel 			flushq(wq, FLUSHDATA);
365103831d35Sstevel 			flushq(RD(wq), FLUSHDATA);
365203831d35Sstevel 			goto exit;
365303831d35Sstevel 	}
365403831d35Sstevel 
365503831d35Sstevel 	/*
365603831d35Sstevel 	 * Arrange to send deferred mp's first, then mblks on the
365703831d35Sstevel 	 * service queue. Since we are exclusive in the inner perimeter,
365803831d35Sstevel 	 * we dont have to worry about md_lock, like the put procedures,
365903831d35Sstevel 	 * which are MTPUTSHARED.
366003831d35Sstevel 	 */
366103831d35Sstevel 	mutex_enter(&mdp->md_lock);
366203831d35Sstevel 	mlistp = mdp->md_dmp_head;
366303831d35Sstevel 	mdp->md_dmp_head = NULL;
366403831d35Sstevel 	count = mdp->md_dmp_count;
366503831d35Sstevel 	mdp->md_dmp_count = 0;
366603831d35Sstevel 	mutex_exit(&mdp->md_lock);
366703831d35Sstevel 
366803831d35Sstevel 	while (mlistp != NULL) {
366903831d35Sstevel 		mp = mlistp;
367003831d35Sstevel 		mlistp = mp->b_next;
367103831d35Sstevel 		mp->b_next = NULL;
367203831d35Sstevel 		count -= msgsize(mp);
367303831d35Sstevel 		if (man_start_lower(mdp, mp, NULL, MAN_LOWER)) {
367403831d35Sstevel 
367503831d35Sstevel 			mutex_enter(&mdp->md_lock);
367603831d35Sstevel 			mdp->md_dmp_count += count + msgsize(mp);
367703831d35Sstevel 			mp->b_next = mlistp;
367803831d35Sstevel 			mdp->md_dmp_head = mp;
367903831d35Sstevel 			mutex_exit(&mdp->md_lock);
368003831d35Sstevel 			goto exit;
368103831d35Sstevel 		}
368203831d35Sstevel 	}
368303831d35Sstevel 	mdp->md_dmp_tail = NULL;
368403831d35Sstevel 
368503831d35Sstevel 	while (mp = getq(wq)) {
368603831d35Sstevel 		if (man_start_lower(mdp, mp, NULL, MAN_LOWER)) {
368703831d35Sstevel 			/*
368803831d35Sstevel 			 * Put it back on queue, making sure to avoid
368903831d35Sstevel 			 * infinite loop mentioned in putbq(9F)
369003831d35Sstevel 			 */
369103831d35Sstevel 			noenable(wq);
3692*07d06da5SSurya Prakki 			(void) putbq(wq, mp);
369303831d35Sstevel 			enableok(wq);
369403831d35Sstevel 
369503831d35Sstevel 			break;
369603831d35Sstevel 		}
369703831d35Sstevel 	}
369803831d35Sstevel 
369903831d35Sstevel exit:
370003831d35Sstevel 
370103831d35Sstevel 	return (0);
370203831d35Sstevel }
370303831d35Sstevel 
370403831d35Sstevel /*
370503831d35Sstevel  * man_lrput - handle DLPI messages issued from downstream.
370603831d35Sstevel  *
370703831d35Sstevel  *	rq - lower read queue of mxx
370803831d35Sstevel  *	mp - mblk ptr to DLPI request
370903831d35Sstevel  *
371003831d35Sstevel  *	returns 0
371103831d35Sstevel  */
371203831d35Sstevel static int
man_lrput(queue_t * rq,mblk_t * mp)371303831d35Sstevel man_lrput(queue_t *rq, mblk_t *mp)
371403831d35Sstevel {
371503831d35Sstevel 	man_dest_t	*mdp;
371603831d35Sstevel 	manstr_t	*msp;
371703831d35Sstevel 
371803831d35Sstevel #if defined(DEBUG)
371903831d35Sstevel 	union DL_primitives	*dlp;
372003831d35Sstevel 	t_uscalar_t		prim = MAN_DLPI_MAX_PRIM + 1;
372103831d35Sstevel 	char			*prim_str;
372203831d35Sstevel #endif  /* DEBUG */
372303831d35Sstevel 
372403831d35Sstevel 	mdp = (man_dest_t *)rq->q_ptr;
372503831d35Sstevel 
372603831d35Sstevel #if defined(DEBUG)
372703831d35Sstevel 	if (DB_TYPE(mp) == M_PROTO) {
372803831d35Sstevel 		dlp = (union DL_primitives *)mp->b_rptr;
372903831d35Sstevel 		prim = dlp->dl_primitive;
373003831d35Sstevel 	}
373103831d35Sstevel 
373203831d35Sstevel 	prim_str = (prim > MAN_DLPI_MAX_PRIM) ? "NON DLPI" :
373303831d35Sstevel 	    (prim == DL_IOC_HDR_INFO) ? "DL_IOC_HDR_INFO" :
373403831d35Sstevel 	    (prim == DLIOCRAW) ? "DLIOCRAW" :
373503831d35Sstevel 	    dps[(unsigned int)prim];
373603831d35Sstevel 	MAN_DBG(MAN_LRPUT, ("man_lrput: rq(0x%p) mp(0x%p) mdp(0x%p)"
373703831d35Sstevel 	    " db_type(0x%x) dl_prim %s", (void *)rq,
373803831d35Sstevel 	    (void *)mp, (void *)mdp, DB_TYPE(mp), prim_str));
373903831d35Sstevel 	MAN_DBGCALL(MAN_LRPUT2, man_print_mdp(mdp));
374003831d35Sstevel #endif  /* DEBUG */
374103831d35Sstevel 
374203831d35Sstevel 	if (DB_TYPE(mp) == M_FLUSH) {
374303831d35Sstevel 		/* Turn around */
374403831d35Sstevel 		if (*mp->b_rptr & FLUSHW) {
374503831d35Sstevel 			*mp->b_rptr &= ~FLUSHR;
374603831d35Sstevel 			qreply(rq, mp);
374703831d35Sstevel 		} else
374803831d35Sstevel 			freemsg(mp);
374903831d35Sstevel 		return (0);
375003831d35Sstevel 	}
375103831d35Sstevel 
375203831d35Sstevel 	if (mdp == NULL || mdp->md_state != MAN_DSTATE_READY) {
375303831d35Sstevel 
375403831d35Sstevel 		MAN_DBG(MAN_LRPUT, ("man_lrput: not ready mdp(0x%p),"
375503831d35Sstevel 		    " state(%d)", (void *)mdp, mdp ? mdp->md_state : -1));
375603831d35Sstevel 		freemsg(mp);
375703831d35Sstevel 		return (0);
375803831d35Sstevel 	}
375903831d35Sstevel 
376003831d35Sstevel 	/*
376103831d35Sstevel 	 * If we have a destination in the right state, forward on datagrams.
376203831d35Sstevel 	 */
376303831d35Sstevel 	if (MAN_IS_DATA(mp)) {
376403831d35Sstevel 		if (mdp->md_dlpistate == DL_IDLE && canputnext(mdp->md_rq)) {
376503831d35Sstevel 
376603831d35Sstevel 			msp = mdp->md_msp;
376703831d35Sstevel 			if (!(msp->ms_flags & MAN_SFLAG_PROMISC))
376803831d35Sstevel 				mdp->md_rcvcnt++; /* Count for failover */
376903831d35Sstevel 			/*
377003831d35Sstevel 			 * go put mblk_t directly up to next queue.
377103831d35Sstevel 			 */
377203831d35Sstevel 			MAN_DBG(MAN_LRPUT, ("man_lrput: putnext to rq(0x%p)",
377303831d35Sstevel 			    (void *)mdp->md_rq));
377403831d35Sstevel 			(void) putnext(mdp->md_rq, mp);
377503831d35Sstevel 		} else {
377603831d35Sstevel 			freemsg(mp);
377703831d35Sstevel 		}
377803831d35Sstevel 	} else {
377903831d35Sstevel 		/*
378003831d35Sstevel 		 * Handle in man_lrsrv with exclusive inner perimeter lock.
378103831d35Sstevel 		 */
3782*07d06da5SSurya Prakki 		(void) putq(rq, mp);
378303831d35Sstevel 	}
378403831d35Sstevel 
378503831d35Sstevel 	return (0);
378603831d35Sstevel }
378703831d35Sstevel 
378803831d35Sstevel /*
378903831d35Sstevel  * Either this is a response from our attempt to sync the upper and lower
379003831d35Sstevel  * stream states, or its data. If its not data. Do DL_* response processing
379103831d35Sstevel  * and transition md_dlpistate accordingly. If its data, toss it.
379203831d35Sstevel  */
379303831d35Sstevel static int
man_lrsrv(queue_t * rq)379403831d35Sstevel man_lrsrv(queue_t *rq)
379503831d35Sstevel {
379603831d35Sstevel 	man_dest_t		*mdp;
379703831d35Sstevel 	mblk_t			*mp;
379803831d35Sstevel 	union DL_primitives	*dlp;
379903831d35Sstevel 	ulong_t			prim;
380003831d35Sstevel 	ulong_t			cprim;
380103831d35Sstevel 	int			need_dl_reset = FALSE;
380203831d35Sstevel 
380303831d35Sstevel #if defined(DEBUG)
380403831d35Sstevel 		struct iocblk	*iocp;
380503831d35Sstevel 		char		ioc_cmd[256];
380603831d35Sstevel #endif  /* DEBUG */
380703831d35Sstevel 
380803831d35Sstevel 	MAN_DBG(MAN_LRSRV, ("man_lrsrv: rq(0x%p)", (void *)rq));
380903831d35Sstevel 
381003831d35Sstevel 	mdp = (man_dest_t *)rq->q_ptr;
381103831d35Sstevel 
381203831d35Sstevel 	if ((mdp == NULL) || (mdp->md_state & MAN_DSTATE_CLOSING)) {
381303831d35Sstevel 			flushq(rq, FLUSHDATA);
381403831d35Sstevel 			flushq(WR(rq), FLUSHDATA);
381503831d35Sstevel 			goto exit;
381603831d35Sstevel 	}
381703831d35Sstevel 
381803831d35Sstevel 	while (mp = getq(rq)) {
381903831d35Sstevel 
382003831d35Sstevel 
382103831d35Sstevel 	/*
382203831d35Sstevel 	 * If we're not connected, or its a datagram, toss it.
382303831d35Sstevel 	 */
382403831d35Sstevel 	if (MAN_IS_DATA(mp) || mdp->md_state != MAN_DSTATE_READY) {
382503831d35Sstevel 
382603831d35Sstevel 		MAN_DBG(MAN_LRSRV, ("man_lrsrv: dropping mblk mdp(0x%p)"
382703831d35Sstevel 		    " is_data(%d)", (void *)mdp, MAN_IS_DATA(mp)));
382803831d35Sstevel 		freemsg(mp);
382903831d35Sstevel 		continue;
383003831d35Sstevel 	}
383103831d35Sstevel 
383203831d35Sstevel 	/*
383303831d35Sstevel 	 * Should be response to man_dlpi_replay. Discard unless there
383403831d35Sstevel 	 * is a failure we care about.
383503831d35Sstevel 	 */
383603831d35Sstevel 
383703831d35Sstevel 	switch (DB_TYPE(mp)) {
383803831d35Sstevel 	case M_PROTO:
383903831d35Sstevel 	case M_PCPROTO:
384003831d35Sstevel 		/* Do proto processing below. */
384103831d35Sstevel 		break;
384203831d35Sstevel 
384303831d35Sstevel 	case M_IOCNAK:
384403831d35Sstevel 		/*
384503831d35Sstevel 		 * DL_IOC* failed for some reason.
384603831d35Sstevel 		 */
384703831d35Sstevel 		need_dl_reset = TRUE;
384803831d35Sstevel 
384903831d35Sstevel #if defined(DEBUG)
385003831d35Sstevel 		iocp = (struct iocblk *)mp->b_rptr;
385103831d35Sstevel 
3852*07d06da5SSurya Prakki 		(void) sprintf(ioc_cmd, "0x%x", iocp->ioc_cmd);
385303831d35Sstevel 		MAN_DBG(MAN_LRSRV, ("man_lrsrv: M_IOCNAK err %d for cmd(%s)\n",
385403831d35Sstevel 		    iocp->ioc_error,
385503831d35Sstevel 		    (iocp->ioc_cmd == DL_IOC_HDR_INFO) ? "DL_IOC_HDR_INFO" :
385603831d35Sstevel 		    (iocp->ioc_cmd == DLIOCRAW) ? "DLIOCRAW" : ioc_cmd));
385703831d35Sstevel #endif  /* DEBUG */
385803831d35Sstevel 
385903831d35Sstevel 		/* FALLTHRU */
386003831d35Sstevel 
386103831d35Sstevel 	case M_IOCACK:
386203831d35Sstevel 	case M_CTL:
386303831d35Sstevel 		/*
386403831d35Sstevel 		 * OK response from DL_IOC*, ignore.
386503831d35Sstevel 		 */
386603831d35Sstevel 		goto dl_reset;
386703831d35Sstevel 	}
386803831d35Sstevel 
386903831d35Sstevel 	dlp = (union DL_primitives *)mp->b_rptr;
387003831d35Sstevel 	prim = dlp->dl_primitive;
387103831d35Sstevel 
387203831d35Sstevel 	MAN_DBG(MAN_LRSRV, ("man_lrsrv: prim %s", dps[(int)prim]));
387303831d35Sstevel 
387403831d35Sstevel 	/*
387503831d35Sstevel 	 * DLPI state processing big theory: We do not rigorously check
387603831d35Sstevel 	 * DLPI states (e.g. PENDING stuff). Simple rules:
387703831d35Sstevel 	 *
387803831d35Sstevel 	 * 	1) If we see an OK_ACK to an ATTACH_REQ, dlpistate = DL_UNBOUND.
387903831d35Sstevel 	 *	2) If we see an BIND_ACK to a BIND_REQ, dlpistate = DL_IDLE.
388003831d35Sstevel 	 *	3) If we see a OK_ACK response to an UNBIND_REQ
388103831d35Sstevel 	 *	   dlpistate = DL_UNBOUND.
388203831d35Sstevel 	 *	4) If we see a OK_ACK response to a DETACH_REQ,
388303831d35Sstevel 	 *	   dlpistate = DL_UNATTACHED.
388403831d35Sstevel 	 *
388503831d35Sstevel 	 * Everything that isn't handle by 1-4 above is handled by 5)
388603831d35Sstevel 	 *
388703831d35Sstevel 	 *	5) A NAK to any DL_* messages we care about causes
388803831d35Sstevel 	 *	   dlpistate = DL_UNATTACHED and man_reset_dlpi to run
388903831d35Sstevel 	 *
389003831d35Sstevel 	 * TBD - need a reset counter so we can try a switch if it gets
389103831d35Sstevel 	 * too high.
389203831d35Sstevel 	 */
389303831d35Sstevel 
389403831d35Sstevel 	switch (prim) {
389503831d35Sstevel 	case DL_OK_ACK:
389603831d35Sstevel 		cprim = dlp->ok_ack.dl_correct_primitive;
389703831d35Sstevel 
389803831d35Sstevel 		switch (cprim) {
389903831d35Sstevel 		case DL_ATTACH_REQ:
390003831d35Sstevel 			if (man_dlioc_replay(mdp)) {
390103831d35Sstevel 				D_SETSTATE(mdp, DL_UNBOUND);
390203831d35Sstevel 			} else {
390303831d35Sstevel 				need_dl_reset = TRUE;
390403831d35Sstevel 				break;
390503831d35Sstevel 			}
390603831d35Sstevel 			break;
390703831d35Sstevel 
390803831d35Sstevel 		case DL_DETACH_REQ:
390903831d35Sstevel 			D_SETSTATE(mdp, DL_UNATTACHED);
391003831d35Sstevel 			break;
391103831d35Sstevel 
391203831d35Sstevel 		case DL_UNBIND_REQ:
391303831d35Sstevel 			/*
391403831d35Sstevel 			 * Cancel timer and set md_dlpistate.
391503831d35Sstevel 			 */
391603831d35Sstevel 			D_SETSTATE(mdp, DL_UNBOUND);
391703831d35Sstevel 
391803831d35Sstevel 			ASSERT(mdp->md_bc_id == 0);
391903831d35Sstevel 			if (mdp->md_lc_timer_id != 0) {
392003831d35Sstevel 				(void) quntimeout(man_ctl_wq,
392103831d35Sstevel 				    mdp->md_lc_timer_id);
392203831d35Sstevel 				mdp->md_lc_timer_id = 0;
392303831d35Sstevel 			}
392403831d35Sstevel 		}
392503831d35Sstevel 		MAN_DBG(MAN_DLPI,
392603831d35Sstevel 		    ("		cprim %s", dps[(int)cprim]));
392703831d35Sstevel 		break;
392803831d35Sstevel 
392903831d35Sstevel 	case DL_BIND_ACK:
393003831d35Sstevel 		/*
393103831d35Sstevel 		 * We're ready for data. Get man_lwsrv to run to
393203831d35Sstevel 		 * process any defered data and start linkcheck timer.
393303831d35Sstevel 		 */
393403831d35Sstevel 		D_SETSTATE(mdp, DL_IDLE);
393503831d35Sstevel 		qenable(mdp->md_wq);
393603831d35Sstevel 		mdp->md_linkstate = MAN_LINKGOOD;
393703831d35Sstevel 		if (man_needs_linkcheck(mdp)) {
393803831d35Sstevel 			mdp->md_lc_timer_id = qtimeout(man_ctl_wq,
393903831d35Sstevel 			    man_linkcheck_timer, (void *)mdp,
394003831d35Sstevel 			    man_gettimer(MAN_TIMER_LINKCHECK, mdp));
394103831d35Sstevel 		}
394203831d35Sstevel 
394303831d35Sstevel 		break;
394403831d35Sstevel 
394503831d35Sstevel 	case DL_ERROR_ACK:
394603831d35Sstevel 		cprim = dlp->error_ack.dl_error_primitive;
394703831d35Sstevel 		switch (cprim) {
394803831d35Sstevel 		case DL_ATTACH_REQ:
394903831d35Sstevel 		case DL_BIND_REQ:
395003831d35Sstevel 		case DL_DISABMULTI_REQ:
395103831d35Sstevel 		case DL_ENABMULTI_REQ:
395203831d35Sstevel 		case DL_PROMISCON_REQ:
395303831d35Sstevel 		case DL_PROMISCOFF_REQ:
395403831d35Sstevel 		case DL_SET_PHYS_ADDR_REQ:
395503831d35Sstevel 			need_dl_reset = TRUE;
395603831d35Sstevel 			break;
395703831d35Sstevel 
395803831d35Sstevel 		/*
395903831d35Sstevel 		 * ignore error TBD (better comment)
396003831d35Sstevel 		 */
396103831d35Sstevel 		case DL_UNBIND_REQ:
396203831d35Sstevel 		case DL_DETACH_REQ:
396303831d35Sstevel 			break;
396403831d35Sstevel 		}
396503831d35Sstevel 
396603831d35Sstevel 		MAN_DBG(MAN_DLPI,
396703831d35Sstevel 		    ("\tdl_errno %d dl_unix_errno %d cprim %s",
396803831d35Sstevel 		    dlp->error_ack.dl_errno, dlp->error_ack.dl_unix_errno,
396903831d35Sstevel 		    dps[(int)cprim]));
397003831d35Sstevel 		break;
397103831d35Sstevel 
397203831d35Sstevel 	case DL_UDERROR_IND:
397303831d35Sstevel 		MAN_DBG(MAN_DLPI,
397403831d35Sstevel 		    ("\tdl_errno %d unix_errno %d",
397503831d35Sstevel 		    dlp->uderror_ind.dl_errno,
397603831d35Sstevel 		    dlp->uderror_ind.dl_unix_errno));
397703831d35Sstevel 		break;
397803831d35Sstevel 
397903831d35Sstevel 	case DL_INFO_ACK:
398003831d35Sstevel 		break;
398103831d35Sstevel 
398203831d35Sstevel 	default:
398303831d35Sstevel 		/*
398403831d35Sstevel 		 * We should not get here.
398503831d35Sstevel 		 */
398603831d35Sstevel 		cmn_err(CE_WARN, "man_lrsrv: unexpected DL prim 0x%lx!",
398703831d35Sstevel 		    prim);
398803831d35Sstevel 		need_dl_reset = TRUE;
398903831d35Sstevel 		break;
399003831d35Sstevel 	}
399103831d35Sstevel 
399203831d35Sstevel dl_reset:
399303831d35Sstevel 	freemsg(mp);
399403831d35Sstevel 
399503831d35Sstevel 	if (need_dl_reset) {
399603831d35Sstevel 		man_pg_t	*mpg;
399703831d35Sstevel 		man_path_t	*mp;
399803831d35Sstevel 
399903831d35Sstevel 		if (qsize(rq)) {	/* Dump all messages. */
400003831d35Sstevel 			flushq(rq, FLUSHDATA);
400103831d35Sstevel 			flushq(WR(rq), FLUSHDATA);
400203831d35Sstevel 		}
400303831d35Sstevel 
400403831d35Sstevel 		mdp->md_dlpierrors++;
400503831d35Sstevel 		D_SETSTATE(mdp, DL_UNATTACHED);
400603831d35Sstevel 		if (mdp->md_lc_timer_id != 0) {
400703831d35Sstevel 			(void) quntimeout(man_ctl_wq, mdp->md_lc_timer_id);
400803831d35Sstevel 			mdp->md_lc_timer_id = 0;
400903831d35Sstevel 		}
401003831d35Sstevel 
401103831d35Sstevel 		mutex_enter(&man_lock);
401203831d35Sstevel 		ASSERT(mdp->md_msp != NULL);
401303831d35Sstevel 		ASSERT(mdp->md_msp->ms_manp != NULL);
401403831d35Sstevel 		mpg = man_find_pg_by_id(mdp->md_msp->ms_manp->man_pg,
401503831d35Sstevel 		    mdp->md_pg_id);
401603831d35Sstevel 		ASSERT(mpg != NULL);
401703831d35Sstevel 		mp = man_find_path_by_ppa(mpg->mpg_pathp,
401803831d35Sstevel 		    mdp->md_device.mdev_ppa);
401903831d35Sstevel 		ASSERT(mp != NULL);
402003831d35Sstevel 		mp->mp_device.mdev_state |= MDEV_FAILED;
402103831d35Sstevel 		if ((mdp->md_dlpierrors >= MAN_MAX_DLPIERRORS) &&
402203831d35Sstevel 		    (man_is_on_domain ||
402303831d35Sstevel 		    mdp->md_msp->ms_manp->man_meta_ppa == 1)) {
402403831d35Sstevel 			/*
402503831d35Sstevel 			 * Autoswitching is disabled for instance 0
402603831d35Sstevel 			 * on the SC as we expect the domain to
402703831d35Sstevel 			 * initiate the path switching.
402803831d35Sstevel 			 */
402903831d35Sstevel 			(void) man_do_autoswitch((man_dest_t *)mdp);
403003831d35Sstevel 			MAN_DBG(MAN_WARN, ("man_lrsrv: dlpi failure(%d,%d),"
403103831d35Sstevel 			    " switching path", mdp->md_device.mdev_major,
403203831d35Sstevel 			    mdp->md_device.mdev_ppa));
403303831d35Sstevel 		} else {
403403831d35Sstevel 			mdp->md_lc_timer_id = qtimeout(man_ctl_wq,
403503831d35Sstevel 			    man_reset_dlpi, (void *)mdp,
403603831d35Sstevel 			    man_gettimer(MAN_TIMER_DLPIRESET, mdp));
403703831d35Sstevel 		}
403803831d35Sstevel 		mutex_exit(&man_lock);
403903831d35Sstevel 	}
404003831d35Sstevel 
404103831d35Sstevel 
404203831d35Sstevel 	} /* End while (getq()) */
404303831d35Sstevel 
404403831d35Sstevel exit:
404503831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_lrsrv: returns"));
404603831d35Sstevel 
404703831d35Sstevel 	return (0);
404803831d35Sstevel }
404903831d35Sstevel 
405003831d35Sstevel static int
man_needs_linkcheck(man_dest_t * mdp)405103831d35Sstevel man_needs_linkcheck(man_dest_t *mdp)
405203831d35Sstevel {
405303831d35Sstevel 	/*
405403831d35Sstevel 	 * Not ready for linkcheck.
405503831d35Sstevel 	 */
405603831d35Sstevel 	if (mdp->md_msp == NULL || mdp->md_msp->ms_manp == NULL)
405703831d35Sstevel 		return (0);
405803831d35Sstevel 
405903831d35Sstevel 	/*
406003831d35Sstevel 	 * Linkchecking needs to be done on IP streams. For domain, all
406103831d35Sstevel 	 * driver instances need checking, for SC only instance 1 needs it.
406203831d35Sstevel 	 */
406303831d35Sstevel 	if ((man_is_on_domain || mdp->md_msp->ms_manp->man_meta_ppa == 1) &&
406403831d35Sstevel 	    (mdp->md_msp->ms_sap == ETHERTYPE_IP ||
406503831d35Sstevel 	    mdp->md_msp->ms_sap == ETHERTYPE_IPV6))
406603831d35Sstevel 
406703831d35Sstevel 		return (1);
406803831d35Sstevel 
406903831d35Sstevel 	/*
407003831d35Sstevel 	 * Linkcheck not need on this link.
407103831d35Sstevel 	 */
407203831d35Sstevel 	return (0);
407303831d35Sstevel }
407403831d35Sstevel 
407503831d35Sstevel /*
407603831d35Sstevel  * The following routines process work requests posted to man_iwork_q
407703831d35Sstevel  * from the non-STREAMS half of the driver (see man_bwork.c). The work
407803831d35Sstevel  * requires access to the inner perimeter lock of the driver. This
407903831d35Sstevel  * lock is acquired by man_uwsrv, who calls man_iwork to process the
408003831d35Sstevel  * man_iwork_q->
408103831d35Sstevel  */
408203831d35Sstevel 
408303831d35Sstevel /*
408403831d35Sstevel  * The man_bwork has posted some work for us to do inside the
408503831d35Sstevel  * perimeter. This mainly involves updating lower multiplexor data
408603831d35Sstevel  * structures (non-blocking type stuff). So, we can hold the man_lock
408703831d35Sstevel  * until we are done processing all work items. Note that some of these
408803831d35Sstevel  * routines in turn submit work back to the bgthread, which they can do
408903831d35Sstevel  * since we hold the man_lock.
409003831d35Sstevel  */
409103831d35Sstevel static void
man_iwork()409203831d35Sstevel man_iwork()
409303831d35Sstevel {
409403831d35Sstevel 	man_work_t	*wp;
409503831d35Sstevel 	int		wp_finished;
409603831d35Sstevel 
409703831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_iwork: q_work(0x%p)",
409803831d35Sstevel 	    (void *)man_iwork_q->q_work));
409903831d35Sstevel 
410003831d35Sstevel 	mutex_enter(&man_lock);
410103831d35Sstevel 
410203831d35Sstevel 	while (man_iwork_q->q_work) {
410303831d35Sstevel 
410403831d35Sstevel 		wp = man_iwork_q->q_work;
410503831d35Sstevel 		man_iwork_q->q_work = wp->mw_next;
410603831d35Sstevel 		wp->mw_next = NULL;
410703831d35Sstevel 
410803831d35Sstevel 		mutex_exit(&man_lock);
410903831d35Sstevel 
411003831d35Sstevel 		MAN_DBG(MAN_SWITCH, ("man_iwork: type %s",
411103831d35Sstevel 		    _mw_type[wp->mw_type]));
411203831d35Sstevel 
411303831d35Sstevel 		wp_finished = TRUE;
411403831d35Sstevel 
411503831d35Sstevel 		switch (wp->mw_type) {
411603831d35Sstevel 		case MAN_WORK_DRATTACH:
411703831d35Sstevel 			(void) man_do_dr_attach(wp);
411803831d35Sstevel 			break;
411903831d35Sstevel 
412003831d35Sstevel 		case MAN_WORK_DRSWITCH:
412103831d35Sstevel 			/*
412203831d35Sstevel 			 * Return status to man_dr_detach immediately. If
412303831d35Sstevel 			 * no error submitting SWITCH request, man_iswitch
412403831d35Sstevel 			 * or man_bclose will cv_signal man_dr_detach on
412503831d35Sstevel 			 * completion of SWITCH work request.
412603831d35Sstevel 			 */
412703831d35Sstevel 			if (man_do_dr_switch(wp) == 0)
412803831d35Sstevel 				wp_finished = FALSE;
412903831d35Sstevel 			break;
413003831d35Sstevel 
413103831d35Sstevel 		case MAN_WORK_DRDETACH:
413203831d35Sstevel 			man_do_dr_detach(wp);
413303831d35Sstevel 			break;
413403831d35Sstevel 
413503831d35Sstevel 		case MAN_WORK_SWITCH:
413603831d35Sstevel 			if (man_iswitch(wp))
413703831d35Sstevel 				wp_finished = FALSE;
413803831d35Sstevel 			break;
413903831d35Sstevel 
414003831d35Sstevel 		case MAN_WORK_KSTAT_UPDATE:
414103831d35Sstevel 			man_do_kstats(wp);
414203831d35Sstevel 			break;
414303831d35Sstevel 
414403831d35Sstevel 		default:
414503831d35Sstevel 			cmn_err(CE_WARN, "man_iwork: "
414603831d35Sstevel 			    "illegal work type(%d)", wp->mw_type);
414703831d35Sstevel 			break;
414803831d35Sstevel 		}
414903831d35Sstevel 
415003831d35Sstevel 		mutex_enter(&man_lock);
415103831d35Sstevel 
415203831d35Sstevel 		/*
415303831d35Sstevel 		 * If we've completed the work request, delete, or
415403831d35Sstevel 		 * cv_signal waiter.
415503831d35Sstevel 		 */
415603831d35Sstevel 		if (wp_finished) {
415703831d35Sstevel 			wp->mw_flags |= MAN_WFLAGS_DONE;
415803831d35Sstevel 
415903831d35Sstevel 			if (wp->mw_flags & MAN_WFLAGS_CVWAITER)
416003831d35Sstevel 				cv_signal(&wp->mw_cv);
416103831d35Sstevel 			else
416203831d35Sstevel 				man_work_free(wp);
416303831d35Sstevel 		}
416403831d35Sstevel 	}
416503831d35Sstevel 
416603831d35Sstevel 	mutex_exit(&man_lock);
416703831d35Sstevel }
416803831d35Sstevel 
416903831d35Sstevel /*
417003831d35Sstevel  * man_dr_detach has submitted a request to DRSWITCH a path.
417103831d35Sstevel  * He is in cv_wait_sig(wp->mw_cv). We forward the work request on to
417203831d35Sstevel  * man_bwork as a switch request. It should end up back at
417303831d35Sstevel  * man_iwork, who will cv_signal(wp->mw_cv) man_dr_detach.
417403831d35Sstevel  *
417503831d35Sstevel  * Called holding inner perimeter lock.
417603831d35Sstevel  * man_lock is held to synchronize access to pathgroup list(man_pg).
417703831d35Sstevel  */
417803831d35Sstevel static int
man_do_dr_switch(man_work_t * wp)417903831d35Sstevel man_do_dr_switch(man_work_t *wp)
418003831d35Sstevel {
418103831d35Sstevel 	man_t		*manp;
418203831d35Sstevel 	man_pg_t	*mpg;
418303831d35Sstevel 	man_path_t	*mp;
418403831d35Sstevel 	man_path_t	*ap;
418503831d35Sstevel 	man_adest_t	*adp;
418603831d35Sstevel 	mi_path_t	mpath;
418703831d35Sstevel 	int		status = 0;
418803831d35Sstevel 
418903831d35Sstevel 	adp = &wp->mw_arg;
419003831d35Sstevel 
419103831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_do_dr_switch: pg_id %d work:", adp->a_pg_id));
419203831d35Sstevel 	MAN_DBGCALL(MAN_SWITCH, man_print_work(wp));
419303831d35Sstevel 
419403831d35Sstevel 	mutex_enter(&man_lock);
419503831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, adp->a_man_ppa);
419603831d35Sstevel 	if (manp == NULL || manp->man_pg == NULL) {
419703831d35Sstevel 		status = ENODEV;
419803831d35Sstevel 		goto exit;
419903831d35Sstevel 	}
420003831d35Sstevel 
420103831d35Sstevel 	mpg = man_find_pg_by_id(manp->man_pg, adp->a_pg_id);
420203831d35Sstevel 	if (mpg == NULL) {
420303831d35Sstevel 		status = ENODEV;
420403831d35Sstevel 		goto exit;
420503831d35Sstevel 	}
420603831d35Sstevel 
420703831d35Sstevel 	if (mpg->mpg_flags & MAN_PG_SWITCHING) {
420803831d35Sstevel 		status = EAGAIN;
420903831d35Sstevel 		goto exit;
421003831d35Sstevel 	}
421103831d35Sstevel 
421203831d35Sstevel 	/*
421303831d35Sstevel 	 * Check to see if detaching device is active. If so, activate
421403831d35Sstevel 	 * an alternate.
421503831d35Sstevel 	 */
421603831d35Sstevel 	mp = man_find_active_path(mpg->mpg_pathp);
421703831d35Sstevel 	if (mp && mp->mp_device.mdev_ppa == adp->a_sf_dev.mdev_ppa) {
421803831d35Sstevel 
421903831d35Sstevel 		ap = man_find_alternate_path(mpg->mpg_pathp);
422003831d35Sstevel 		if (ap == NULL) {
422103831d35Sstevel 			status = EBUSY;
422203831d35Sstevel 			goto exit;
422303831d35Sstevel 		}
422403831d35Sstevel 
422503831d35Sstevel 		bzero((char *)&mpath, sizeof (mi_path_t));
422603831d35Sstevel 
422703831d35Sstevel 		mpath.mip_cmd = MI_PATH_ACTIVATE;
422803831d35Sstevel 		mpath.mip_man_ppa = 0;
422903831d35Sstevel 		mpath.mip_pg_id = 0;
423003831d35Sstevel 		mpath.mip_devs[0] = ap->mp_device;
423103831d35Sstevel 		mpath.mip_ndevs = 1;
423203831d35Sstevel 		ether_copy(&manp->man_eaddr, &mpath.mip_eaddr);
423303831d35Sstevel 
423403831d35Sstevel 		/*
423503831d35Sstevel 		 * DR thread is sleeping on wp->mw_cv. We change the work
423603831d35Sstevel 		 * request from DRSWITCH to SWITCH and submit it to
423703831d35Sstevel 		 * for processing by man_bwork (via man_pg_cmd). At
423803831d35Sstevel 		 * completion the SWITCH work request is processed by
423903831d35Sstevel 		 * man_iswitch() or man_bclose and the DR thread will
424003831d35Sstevel 		 * be cv_signal'd.
424103831d35Sstevel 		 */
424203831d35Sstevel 		wp->mw_type = MAN_WORK_SWITCH;
424303831d35Sstevel 		if (status = man_pg_cmd(&mpath, wp))
424403831d35Sstevel 			goto exit;
424503831d35Sstevel 
424603831d35Sstevel 	} else {
424703831d35Sstevel 		/*
424803831d35Sstevel 		 * Tell man_dr_detach that detaching device is not currently
424903831d35Sstevel 		 * in use.
425003831d35Sstevel 		 */
425103831d35Sstevel 		status = ENODEV;
425203831d35Sstevel 	}
425303831d35Sstevel 
425403831d35Sstevel exit:
425503831d35Sstevel 	if (status) {
425603831d35Sstevel 		/*
425703831d35Sstevel 		 * ENODEV is a noop, not really an error.
425803831d35Sstevel 		 */
425903831d35Sstevel 		if (status != ENODEV)
426003831d35Sstevel 			wp->mw_status = status;
426103831d35Sstevel 	}
426203831d35Sstevel 	mutex_exit(&man_lock);
426303831d35Sstevel 
426403831d35Sstevel 	return (status);
426503831d35Sstevel }
426603831d35Sstevel 
426703831d35Sstevel /*
426803831d35Sstevel  * man_dr_attach has submitted a request to DRATTACH a path,
426903831d35Sstevel  * add that path to the path list.
427003831d35Sstevel  *
427103831d35Sstevel  * Called holding perimeter lock.
427203831d35Sstevel  */
427303831d35Sstevel static int
man_do_dr_attach(man_work_t * wp)427403831d35Sstevel man_do_dr_attach(man_work_t *wp)
427503831d35Sstevel {
427603831d35Sstevel 	man_t		*manp;
427703831d35Sstevel 	man_adest_t	*adp;
427803831d35Sstevel 	mi_path_t	mpath;
427903831d35Sstevel 	manc_t		manc;
428003831d35Sstevel 	int		status = 0;
428103831d35Sstevel 
428203831d35Sstevel 	adp = &wp->mw_arg;
428303831d35Sstevel 
428403831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_do_dr_attach: pg_id %d work:", adp->a_pg_id));
428503831d35Sstevel 	MAN_DBGCALL(MAN_SWITCH, man_print_work(wp));
428603831d35Sstevel 
428703831d35Sstevel 	mutex_enter(&man_lock);
428803831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, adp->a_man_ppa);
428903831d35Sstevel 	if (manp == NULL || manp->man_pg == NULL) {
429003831d35Sstevel 		status = ENODEV;
429103831d35Sstevel 		goto exit;
429203831d35Sstevel 	}
429303831d35Sstevel 
429403831d35Sstevel 	if (status = man_get_iosram(&manc)) {
429503831d35Sstevel 		goto exit;
429603831d35Sstevel 	}
429703831d35Sstevel 	/*
429803831d35Sstevel 	 * Extract SC ethernet address from IOSRAM.
429903831d35Sstevel 	 */
430003831d35Sstevel 	ether_copy(&manc.manc_sc_eaddr, &mpath.mip_eaddr);
430103831d35Sstevel 
430203831d35Sstevel 	mpath.mip_pg_id = adp->a_pg_id;
430303831d35Sstevel 	mpath.mip_man_ppa = adp->a_man_ppa;
430403831d35Sstevel 	/*
430503831d35Sstevel 	 * man_dr_attach passes the new device info in a_sf_dev.
430603831d35Sstevel 	 */
430703831d35Sstevel 	MAN_DBG(MAN_DR, ("man_do_dr_attach: "));
430803831d35Sstevel 	MAN_DBGCALL(MAN_DR, man_print_dev(&adp->a_sf_dev));
430903831d35Sstevel 	mpath.mip_devs[0] = adp->a_sf_dev;
431003831d35Sstevel 	mpath.mip_ndevs = 1;
431103831d35Sstevel 	mpath.mip_cmd = MI_PATH_ADD;
431203831d35Sstevel 	status = man_pg_cmd(&mpath, NULL);
431303831d35Sstevel 
431403831d35Sstevel exit:
431503831d35Sstevel 	mutex_exit(&man_lock);
431603831d35Sstevel 	return (status);
431703831d35Sstevel }
431803831d35Sstevel 
431903831d35Sstevel /*
432003831d35Sstevel  * man_dr_detach has submitted a request to DRDETACH a path.
432103831d35Sstevel  * He is in cv_wait_sig(wp->mw_cv). We remove the path and
432203831d35Sstevel  * cv_signal(wp->mw_cv) man_dr_detach.
432303831d35Sstevel  *
432403831d35Sstevel  * Called holding perimeter lock.
432503831d35Sstevel  */
432603831d35Sstevel static void
man_do_dr_detach(man_work_t * wp)432703831d35Sstevel man_do_dr_detach(man_work_t *wp)
432803831d35Sstevel {
432903831d35Sstevel 	man_t		*manp;
433003831d35Sstevel 	man_pg_t	*mpg;
433103831d35Sstevel 	man_path_t	*mp;
433203831d35Sstevel 	man_adest_t	*adp;
433303831d35Sstevel 	manc_t		manc;
433403831d35Sstevel 	mi_path_t	mpath;
433503831d35Sstevel 	int		i;
433603831d35Sstevel 	int		found;
433703831d35Sstevel 	int		status = 0;
433803831d35Sstevel 
433903831d35Sstevel 	adp = &wp->mw_arg;
434003831d35Sstevel 
434103831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_do_dr_detach: pg_id %d work:", adp->a_pg_id));
434203831d35Sstevel 	MAN_DBGCALL(MAN_SWITCH, man_print_work(wp));
434303831d35Sstevel 
434403831d35Sstevel 	mutex_enter(&man_lock);
434503831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, adp->a_man_ppa);
434603831d35Sstevel 	if (manp == NULL || manp->man_pg == NULL) {
434703831d35Sstevel 		status = ENODEV;
434803831d35Sstevel 		goto exit;
434903831d35Sstevel 	}
435003831d35Sstevel 
435103831d35Sstevel 	mpg = man_find_pg_by_id(manp->man_pg, adp->a_pg_id);
435203831d35Sstevel 	if (mpg == NULL) {
435303831d35Sstevel 		status = ENODEV;
435403831d35Sstevel 		goto exit;
435503831d35Sstevel 	}
435603831d35Sstevel 
435703831d35Sstevel 	if (mpg->mpg_flags & MAN_PG_SWITCHING) {
435803831d35Sstevel 		status = EAGAIN;
435903831d35Sstevel 		goto exit;
436003831d35Sstevel 	}
436103831d35Sstevel 
436203831d35Sstevel 	/*
436303831d35Sstevel 	 * We should have switched detaching path if it was active.
436403831d35Sstevel 	 */
436503831d35Sstevel 	mp = man_find_active_path(mpg->mpg_pathp);
436603831d35Sstevel 	if (mp && mp->mp_device.mdev_ppa == adp->a_sf_dev.mdev_ppa) {
436703831d35Sstevel 		status = EAGAIN;
436803831d35Sstevel 		goto exit;
436903831d35Sstevel 	}
437003831d35Sstevel 
437103831d35Sstevel 	/*
437203831d35Sstevel 	 * Submit an ASSIGN command, minus the detaching device.
437303831d35Sstevel 	 */
437403831d35Sstevel 	bzero((char *)&mpath, sizeof (mi_path_t));
437503831d35Sstevel 
437603831d35Sstevel 	if (status = man_get_iosram(&manc)) {
437703831d35Sstevel 		goto exit;
437803831d35Sstevel 	}
437903831d35Sstevel 
438003831d35Sstevel 	mpath.mip_cmd = MI_PATH_ASSIGN;
438103831d35Sstevel 	mpath.mip_man_ppa = 0;
438203831d35Sstevel 	mpath.mip_pg_id = 0;
438303831d35Sstevel 
438403831d35Sstevel 	mp = mpg->mpg_pathp;
438503831d35Sstevel 	i = 0;
438603831d35Sstevel 	found = FALSE;
438703831d35Sstevel 	while (mp != NULL) {
438803831d35Sstevel 		if (mp->mp_device.mdev_ppa != adp->a_sf_dev.mdev_ppa) {
438903831d35Sstevel 			mpath.mip_devs[i] = mp->mp_device;
439003831d35Sstevel 			i++;
439103831d35Sstevel 		} else {
439203831d35Sstevel 			found = TRUE;
439303831d35Sstevel 		}
439403831d35Sstevel 		mp = mp->mp_next;
439503831d35Sstevel 	}
439603831d35Sstevel 
439703831d35Sstevel 	if (found) {
439803831d35Sstevel 		/*
439903831d35Sstevel 		 * Need to include SCs ethernet address in command.
440003831d35Sstevel 		 */
440103831d35Sstevel 		mpath.mip_ndevs = i;
440203831d35Sstevel 		ether_copy(&manc.manc_sc_eaddr, &mpath.mip_eaddr);
440303831d35Sstevel 
440403831d35Sstevel 		status = man_pg_cmd(&mpath, NULL);
440503831d35Sstevel 	}
440603831d35Sstevel 
440703831d35Sstevel 	/*
440803831d35Sstevel 	 * Hand back status to man_dr_detach request.
440903831d35Sstevel 	 */
441003831d35Sstevel exit:
441103831d35Sstevel 	if (status != ENODEV)
441203831d35Sstevel 		wp->mw_status = status;
441303831d35Sstevel 
441403831d35Sstevel 	mutex_exit(&man_lock);
441503831d35Sstevel 
441603831d35Sstevel }
441703831d35Sstevel 
441803831d35Sstevel 
441903831d35Sstevel /*
442003831d35Sstevel  * The background thread has configured new lower multiplexor streams for
442103831d35Sstevel  * the given destinations. Update the appropriate destination data structures
442203831d35Sstevel  * inside the inner perimeter. We must take care to deal with destinations
442303831d35Sstevel  * whose upper stream has closed or detached from lower streams.
442403831d35Sstevel  *
442503831d35Sstevel  * Returns
442603831d35Sstevel  *	0		Done with work request.
442703831d35Sstevel  *	1		Reused work request.
442803831d35Sstevel  */
442903831d35Sstevel static int
man_iswitch(man_work_t * wp)443003831d35Sstevel man_iswitch(man_work_t *wp)
443103831d35Sstevel {
443203831d35Sstevel 	man_adest_t	*adp;
443303831d35Sstevel 	man_t		*manp;
443403831d35Sstevel 	man_pg_t	*mpg;
443503831d35Sstevel 	man_path_t	*mp = NULL;
443603831d35Sstevel 	man_dest_t	*mdp;
443703831d35Sstevel 	man_dest_t	*tdp;
443803831d35Sstevel 	int		i;
443903831d35Sstevel 	int		switch_ok = TRUE;
444003831d35Sstevel 
444103831d35Sstevel 	adp = &wp->mw_arg;
444203831d35Sstevel 
444303831d35Sstevel 	if (wp->mw_status != 0) {
444403831d35Sstevel 		switch_ok = FALSE;	/* Never got things opened */
444503831d35Sstevel 	}
444603831d35Sstevel 
444703831d35Sstevel 	/*
444803831d35Sstevel 	 * Update destination structures as appropriate.
444903831d35Sstevel 	 */
445003831d35Sstevel 	for (i = 0; i < adp->a_ndests; i++) {
445103831d35Sstevel 		man_dest_t	tmp;
445203831d35Sstevel 
445303831d35Sstevel 		/*
445403831d35Sstevel 		 * Check to see if lower stream we just switch is still
445503831d35Sstevel 		 * around.
445603831d35Sstevel 		 */
445703831d35Sstevel 		tdp = &adp->a_mdp[i];
445803831d35Sstevel 		mdp = man_switch_match(tdp, adp->a_pg_id, tdp->md_switch_id);
445903831d35Sstevel 
446003831d35Sstevel 		if (mdp == NULL)
446103831d35Sstevel 			continue;
446203831d35Sstevel 
446303831d35Sstevel 		if (switch_ok == FALSE) {
446403831d35Sstevel 			/*
446503831d35Sstevel 			 * Switch failed for some reason.  Clear
446603831d35Sstevel 			 * PLUMBING flag and retry switch again later.
446703831d35Sstevel 			 */
446803831d35Sstevel 			man_ifail_dest(mdp);
446903831d35Sstevel 			continue;
447003831d35Sstevel 		}
447103831d35Sstevel 
447203831d35Sstevel 		/*
447303831d35Sstevel 		 * Swap new info, for old. We return the old info to
447403831d35Sstevel 		 * man_bwork to close things up below.
447503831d35Sstevel 		 */
447603831d35Sstevel 		bcopy((char *)mdp, (char *)&tmp, sizeof (man_dest_t));
447703831d35Sstevel 
447803831d35Sstevel 		ASSERT(mdp->md_state & MAN_DSTATE_PLUMBING);
447903831d35Sstevel 		ASSERT(mdp->md_state == tdp->md_state);
448003831d35Sstevel 
448103831d35Sstevel 		mdp->md_state = tdp->md_state;
448203831d35Sstevel 
448303831d35Sstevel 		/*
448403831d35Sstevel 		 * save the wq from the destination passed(tdp).
448503831d35Sstevel 		 */
448603831d35Sstevel 		mdp->md_wq = tdp->md_wq;
448703831d35Sstevel 		RD(mdp->md_wq)->q_ptr = (void *)(mdp);
448803831d35Sstevel 		WR(mdp->md_wq)->q_ptr = (void *)(mdp);
448903831d35Sstevel 
449003831d35Sstevel 		mdp->md_state &= ~MAN_DSTATE_INITIALIZING;
449103831d35Sstevel 		mdp->md_state |= MAN_DSTATE_READY;
449203831d35Sstevel 
449303831d35Sstevel 		ASSERT(mdp->md_device.mdev_major == adp->a_sf_dev.mdev_major);
449403831d35Sstevel 
449503831d35Sstevel 		ASSERT(tdp->md_device.mdev_ppa == adp->a_st_dev.mdev_ppa);
449603831d35Sstevel 		ASSERT(tdp->md_device.mdev_major == adp->a_st_dev.mdev_major);
449703831d35Sstevel 
449803831d35Sstevel 		mdp->md_device = tdp->md_device;
449903831d35Sstevel 		mdp->md_muxid = tdp->md_muxid;
450003831d35Sstevel 		mdp->md_linkstate = MAN_LINKUNKNOWN;
450103831d35Sstevel 		(void) drv_getparm(TIME, &mdp->md_lastswitch);
450203831d35Sstevel 		mdp->md_state &= ~MAN_DSTATE_PLUMBING;
450303831d35Sstevel 		mdp->md_switch_id = 0;
450403831d35Sstevel 		mdp->md_switches++;
450503831d35Sstevel 		mdp->md_dlpierrors = 0;
450603831d35Sstevel 		D_SETSTATE(mdp, DL_UNATTACHED);
450703831d35Sstevel 
450803831d35Sstevel 		/*
450903831d35Sstevel 		 * Resync lower w/ upper dlpi state. This will start link
451003831d35Sstevel 		 * timer if/when lower stream goes to DL_IDLE (see man_lrsrv).
451103831d35Sstevel 		 */
451203831d35Sstevel 		man_reset_dlpi((void *)mdp);
451303831d35Sstevel 
451403831d35Sstevel 		bcopy((char *)&tmp, (char *)tdp, sizeof (man_dest_t));
451503831d35Sstevel 	}
451603831d35Sstevel 
451703831d35Sstevel 	if (switch_ok) {
451803831d35Sstevel 		for (i = 0; i < adp->a_ndests; i++) {
451903831d35Sstevel 			tdp = &adp->a_mdp[i];
452003831d35Sstevel 
452103831d35Sstevel 			tdp->md_state &= ~MAN_DSTATE_PLUMBING;
452203831d35Sstevel 			tdp->md_state &= ~MAN_DSTATE_INITIALIZING;
452303831d35Sstevel 			tdp->md_state |= MAN_DSTATE_READY;
452403831d35Sstevel 		}
452503831d35Sstevel 	} else {
452603831d35Sstevel 		/*
452703831d35Sstevel 		 * Never got switch-to destinations open, free them.
452803831d35Sstevel 		 */
452903831d35Sstevel 		man_kfree(adp->a_mdp,
453003831d35Sstevel 		    sizeof (man_dest_t) * adp->a_ndests);
453103831d35Sstevel 	}
453203831d35Sstevel 
453303831d35Sstevel 	/*
453403831d35Sstevel 	 * Clear pathgroup switching flag and update path flags.
453503831d35Sstevel 	 */
453603831d35Sstevel 	mutex_enter(&man_lock);
453703831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, adp->a_man_ppa);
453803831d35Sstevel 
453903831d35Sstevel 	ASSERT(manp != NULL);
454003831d35Sstevel 	ASSERT(manp->man_pg != NULL);
454103831d35Sstevel 
454203831d35Sstevel 	mpg = man_find_pg_by_id(manp->man_pg, adp->a_pg_id);
454303831d35Sstevel 	ASSERT(mpg != NULL);
454403831d35Sstevel 	ASSERT(mpg->mpg_flags & MAN_PG_SWITCHING);
454503831d35Sstevel 	mpg->mpg_flags &= ~MAN_PG_SWITCHING;
454603831d35Sstevel 
454703831d35Sstevel 	/*
454803831d35Sstevel 	 * Switch succeeded, mark path we switched from as failed, and
454903831d35Sstevel 	 * device we switch to as active and clear its failed flag (if set).
455003831d35Sstevel 	 * Sync up kstats.
455103831d35Sstevel 	 */
455203831d35Sstevel 	if (switch_ok) {
455303831d35Sstevel 		mp = man_find_active_path(mpg->mpg_pathp);
455403831d35Sstevel 		if (mp != NULL) {
455503831d35Sstevel 
455603831d35Sstevel 			ASSERT(adp->a_sf_dev.mdev_major != 0);
455703831d35Sstevel 
455803831d35Sstevel 			MAN_DBG(MAN_SWITCH, ("man_iswitch: switch from dev:"));
455903831d35Sstevel 			MAN_DBGCALL(MAN_SWITCH, man_print_dev(&adp->a_sf_dev));
456003831d35Sstevel 
456103831d35Sstevel 			mp->mp_device.mdev_state &= ~MDEV_ACTIVE;
456203831d35Sstevel 		} else
456303831d35Sstevel 			ASSERT(adp->a_sf_dev.mdev_major == 0);
456403831d35Sstevel 
456503831d35Sstevel 		MAN_DBG(MAN_SWITCH, ("man_iswitch: switch to dev:"));
456603831d35Sstevel 		MAN_DBGCALL(MAN_SWITCH, man_print_dev(&adp->a_st_dev));
456703831d35Sstevel 
456803831d35Sstevel 		ASSERT(adp->a_st_dev.mdev_major != 0);
456903831d35Sstevel 
457003831d35Sstevel 		mp = man_find_path_by_ppa(mpg->mpg_pathp,
457103831d35Sstevel 		    adp->a_st_dev.mdev_ppa);
457203831d35Sstevel 
457303831d35Sstevel 		ASSERT(mp != NULL);
457403831d35Sstevel 
457503831d35Sstevel 		mp->mp_device.mdev_state |= MDEV_ACTIVE;
457603831d35Sstevel 	}
457703831d35Sstevel 
457803831d35Sstevel 	/*
457903831d35Sstevel 	 * Decrement manp reference count and hand back work request if
458003831d35Sstevel 	 * needed.
458103831d35Sstevel 	 */
458203831d35Sstevel 	manp->man_refcnt--;
458303831d35Sstevel 
458403831d35Sstevel 	if (switch_ok) {
458503831d35Sstevel 		wp->mw_type = MAN_WORK_CLOSE;
458603831d35Sstevel 		man_work_add(man_bwork_q, wp);
458703831d35Sstevel 	}
458803831d35Sstevel 
458903831d35Sstevel 	mutex_exit(&man_lock);
459003831d35Sstevel 
459103831d35Sstevel 	return (switch_ok);
459203831d35Sstevel }
459303831d35Sstevel 
459403831d35Sstevel /*
459503831d35Sstevel  * Find the destination in the upper stream that we just switched.
459603831d35Sstevel  */
459703831d35Sstevel man_dest_t *
man_switch_match(man_dest_t * sdp,int pg_id,void * sid)459803831d35Sstevel man_switch_match(man_dest_t *sdp, int pg_id, void *sid)
459903831d35Sstevel {
460003831d35Sstevel 	man_dest_t	*mdp = NULL;
460103831d35Sstevel 	manstr_t	*msp;
460203831d35Sstevel 
460303831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
460403831d35Sstevel 		/*
460503831d35Sstevel 		 * Check if upper stream closed, or detached.
460603831d35Sstevel 		 */
460703831d35Sstevel 		if (msp != sdp->md_msp)
460803831d35Sstevel 			continue;
460903831d35Sstevel 
461003831d35Sstevel 		if (msp->ms_dests == NULL)
461103831d35Sstevel 			break;
461203831d35Sstevel 
461303831d35Sstevel 		mdp = &msp->ms_dests[pg_id];
461403831d35Sstevel 
461503831d35Sstevel 		/*
461603831d35Sstevel 		 * Upper stream detached and reattached while we were
461703831d35Sstevel 		 * switching.
461803831d35Sstevel 		 */
461903831d35Sstevel 		if (mdp->md_switch_id != sid) {
462003831d35Sstevel 			mdp = NULL;
462103831d35Sstevel 			break;
462203831d35Sstevel 		}
462303831d35Sstevel 	}
462403831d35Sstevel 
462503831d35Sstevel 	return (mdp);
462603831d35Sstevel }
462703831d35Sstevel 
462803831d35Sstevel /*
462903831d35Sstevel  * bg_thread cant complete the switch for some reason. (Re)start the
463003831d35Sstevel  * linkcheck timer again.
463103831d35Sstevel  */
463203831d35Sstevel static void
man_ifail_dest(man_dest_t * mdp)463303831d35Sstevel man_ifail_dest(man_dest_t *mdp)
463403831d35Sstevel {
463503831d35Sstevel 	ASSERT(mdp->md_lc_timer_id == 0);
463603831d35Sstevel 	ASSERT(mdp->md_bc_id == 0);
463703831d35Sstevel 	ASSERT(mdp->md_state & MAN_DSTATE_PLUMBING);
463803831d35Sstevel 
463903831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_ifail_dest"));
464003831d35Sstevel 	MAN_DBGCALL(MAN_SWITCH, man_print_mdp(mdp));
464103831d35Sstevel 
464203831d35Sstevel 	mdp->md_state &= ~MAN_DSTATE_PLUMBING;
464303831d35Sstevel 	mdp->md_linkstate = MAN_LINKFAIL;
464403831d35Sstevel 
464503831d35Sstevel 	/*
464603831d35Sstevel 	 * If we have not yet initialized link, or the upper stream is
464703831d35Sstevel 	 * DL_IDLE, restart the linktimer.
464803831d35Sstevel 	 */
464903831d35Sstevel 	if ((mdp->md_state & MAN_DSTATE_INITIALIZING) ||
465003831d35Sstevel 	    ((mdp->md_msp->ms_sap == ETHERTYPE_IPV6 ||
465103831d35Sstevel 	    mdp->md_msp->ms_sap == ETHERTYPE_IP) &&
465203831d35Sstevel 	    mdp->md_msp->ms_dlpistate == DL_IDLE)) {
465303831d35Sstevel 
465403831d35Sstevel 		mdp->md_lc_timer_id = qtimeout(man_ctl_wq, man_linkcheck_timer,
465503831d35Sstevel 		    (void *)mdp, man_gettimer(MAN_TIMER_LINKCHECK, mdp));
465603831d35Sstevel 	}
465703831d35Sstevel 
465803831d35Sstevel }
465903831d35Sstevel 
466003831d35Sstevel /*
466103831d35Sstevel  * Arrange to replay all of ms_dl_mp on the new lower stream to get it
466203831d35Sstevel  * in sync with the upper stream. Note that this includes setting the
466303831d35Sstevel  * physical address.
466403831d35Sstevel  *
466503831d35Sstevel  * Called from qtimeout with inner perimeter lock.
466603831d35Sstevel  */
466703831d35Sstevel static void
man_reset_dlpi(void * argp)466803831d35Sstevel man_reset_dlpi(void *argp)
466903831d35Sstevel {
467003831d35Sstevel 	man_dest_t	*mdp = (man_dest_t *)argp;
467103831d35Sstevel 	manstr_t	*msp;
467203831d35Sstevel 	mblk_t		*mp;
467303831d35Sstevel 	mblk_t		*rmp = NULL;
467403831d35Sstevel 	mblk_t		*tmp;
467503831d35Sstevel 
467603831d35Sstevel 	mdp->md_lc_timer_id = 0;
467703831d35Sstevel 
467803831d35Sstevel 	if (mdp->md_state != MAN_DSTATE_READY) {
467903831d35Sstevel 		MAN_DBG(MAN_DLPI, ("man_reset_dlpi: not ready!"));
468003831d35Sstevel 		return;
468103831d35Sstevel 	}
468203831d35Sstevel 
468303831d35Sstevel 	msp = mdp->md_msp;
468403831d35Sstevel 
468503831d35Sstevel 	rmp = man_dup_mplist(msp->ms_dl_mp);
468603831d35Sstevel 	if (rmp == NULL)
468703831d35Sstevel 		goto fail;
468803831d35Sstevel 
468903831d35Sstevel 	/*
469003831d35Sstevel 	 * Send down an unbind and detach request, just to clean things
469103831d35Sstevel 	 * out, we ignore ERROR_ACKs for unbind and detach in man_lrsrv.
469203831d35Sstevel 	 */
469303831d35Sstevel 	tmp = man_alloc_ubreq_dreq();
469403831d35Sstevel 	if (tmp == NULL) {
469503831d35Sstevel 		goto fail;
469603831d35Sstevel 	}
469703831d35Sstevel 	mp = tmp;
469803831d35Sstevel 	while (mp->b_next != NULL)
469903831d35Sstevel 		mp = mp->b_next;
470003831d35Sstevel 	mp->b_next = rmp;
470103831d35Sstevel 	rmp = tmp;
470203831d35Sstevel 
470303831d35Sstevel 	man_dlpi_replay(mdp, rmp);
470403831d35Sstevel 
470503831d35Sstevel 	return;
470603831d35Sstevel 
470703831d35Sstevel fail:
470803831d35Sstevel 
470903831d35Sstevel 	while (rmp) {
471003831d35Sstevel 		mp = rmp;
471103831d35Sstevel 		rmp = rmp->b_next;
471203831d35Sstevel 		mp->b_next = mp->b_prev = NULL;
471303831d35Sstevel 		freemsg(mp);
471403831d35Sstevel 	}
471503831d35Sstevel 
471603831d35Sstevel 	ASSERT(mdp->md_lc_timer_id == 0);
471703831d35Sstevel 	ASSERT(mdp->md_bc_id == 0);
471803831d35Sstevel 
471903831d35Sstevel 	/*
472003831d35Sstevel 	 * If low on memory, try again later. I Could use qbufcall, but that
472103831d35Sstevel 	 * could fail and I would have to try and recover from that w/
472203831d35Sstevel 	 * qtimeout anyway.
472303831d35Sstevel 	 */
472403831d35Sstevel 	mdp->md_lc_timer_id = qtimeout(man_ctl_wq, man_reset_dlpi,
472503831d35Sstevel 	    (void *)mdp, man_gettimer(MAN_TIMER_LINKCHECK, mdp));
472603831d35Sstevel }
472703831d35Sstevel 
472803831d35Sstevel /*
472903831d35Sstevel  * Once we receive acknowledgement that DL_ATTACH_REQ was successful,
473003831d35Sstevel  * we can send down the DL_* related IOCTLs (e.g. DL_IOC_HDR). If we
473103831d35Sstevel  * try and send them downsteam w/o waiting, the ioctl's get processed before
473203831d35Sstevel  * the ATTACH_REQ and they are rejected. TBD - could just do the lower
473303831d35Sstevel  * dlpi state change in lock step. TBD
473403831d35Sstevel  */
473503831d35Sstevel static int
man_dlioc_replay(man_dest_t * mdp)473603831d35Sstevel man_dlioc_replay(man_dest_t *mdp)
473703831d35Sstevel {
473803831d35Sstevel 	mblk_t		*rmp;
473903831d35Sstevel 	int		status = 1;
474003831d35Sstevel 
474103831d35Sstevel 	if (mdp->md_msp->ms_dlioc_mp == NULL)
474203831d35Sstevel 		goto exit;
474303831d35Sstevel 
474403831d35Sstevel 	rmp = man_dup_mplist(mdp->md_msp->ms_dlioc_mp);
474503831d35Sstevel 	if (rmp == NULL) {
474603831d35Sstevel 		status = 0;
474703831d35Sstevel 		goto exit;
474803831d35Sstevel 	}
474903831d35Sstevel 
475003831d35Sstevel 	man_dlpi_replay(mdp, rmp);
475103831d35Sstevel exit:
475203831d35Sstevel 	return (status);
475303831d35Sstevel }
475403831d35Sstevel 
475503831d35Sstevel static mblk_t *
man_alloc_ubreq_dreq()475603831d35Sstevel man_alloc_ubreq_dreq()
475703831d35Sstevel {
475803831d35Sstevel 	mblk_t			*dreq;
475903831d35Sstevel 	mblk_t			*ubreq = NULL;
476003831d35Sstevel 	union DL_primitives	*dlp;
476103831d35Sstevel 
476203831d35Sstevel 	dreq = allocb(DL_DETACH_REQ_SIZE, BPRI_MED);
476303831d35Sstevel 	if (dreq == NULL)
476403831d35Sstevel 		goto exit;
476503831d35Sstevel 
476603831d35Sstevel 	dreq->b_datap->db_type = M_PROTO;
476703831d35Sstevel 	dlp = (union DL_primitives *)dreq->b_rptr;
476803831d35Sstevel 	dlp->dl_primitive = DL_DETACH_REQ;
476903831d35Sstevel 	dreq->b_wptr += DL_DETACH_REQ_SIZE;
477003831d35Sstevel 
477103831d35Sstevel 	ubreq = allocb(DL_UNBIND_REQ_SIZE, BPRI_MED);
477203831d35Sstevel 	if (ubreq == NULL) {
477303831d35Sstevel 		freemsg(dreq);
477403831d35Sstevel 		goto exit;
477503831d35Sstevel 	}
477603831d35Sstevel 
477703831d35Sstevel 	ubreq->b_datap->db_type = M_PROTO;
477803831d35Sstevel 	dlp = (union DL_primitives *)ubreq->b_rptr;
477903831d35Sstevel 	dlp->dl_primitive = DL_UNBIND_REQ;
478003831d35Sstevel 	ubreq->b_wptr += DL_UNBIND_REQ_SIZE;
478103831d35Sstevel 
478203831d35Sstevel 	ubreq->b_next = dreq;
478303831d35Sstevel 
478403831d35Sstevel exit:
478503831d35Sstevel 
478603831d35Sstevel 	return (ubreq);
478703831d35Sstevel }
478803831d35Sstevel 
478903831d35Sstevel static mblk_t *
man_dup_mplist(mblk_t * mp)479003831d35Sstevel man_dup_mplist(mblk_t *mp)
479103831d35Sstevel {
479203831d35Sstevel 	mblk_t	*listp = NULL;
479303831d35Sstevel 	mblk_t	*tailp = NULL;
479403831d35Sstevel 
479503831d35Sstevel 	for (; mp != NULL; mp = mp->b_next) {
479603831d35Sstevel 
479703831d35Sstevel 		mblk_t	*nmp;
479803831d35Sstevel 		mblk_t	*prev;
479903831d35Sstevel 		mblk_t	*next;
480003831d35Sstevel 
480103831d35Sstevel 		prev = mp->b_prev;
480203831d35Sstevel 		next = mp->b_next;
480303831d35Sstevel 		mp->b_prev = mp->b_next = NULL;
480403831d35Sstevel 
480503831d35Sstevel 		nmp = copymsg(mp);
480603831d35Sstevel 
480703831d35Sstevel 		mp->b_prev = prev;
480803831d35Sstevel 		mp->b_next = next;
480903831d35Sstevel 
481003831d35Sstevel 		if (nmp == NULL)
481103831d35Sstevel 			goto nomem;
481203831d35Sstevel 
481303831d35Sstevel 		if (listp == NULL) {
481403831d35Sstevel 			listp = tailp = nmp;
481503831d35Sstevel 		} else {
481603831d35Sstevel 			tailp->b_next = nmp;
481703831d35Sstevel 			tailp = nmp;
481803831d35Sstevel 		}
481903831d35Sstevel 	}
482003831d35Sstevel 
482103831d35Sstevel 	return (listp);
482203831d35Sstevel nomem:
482303831d35Sstevel 
482403831d35Sstevel 	while (listp) {
482503831d35Sstevel 		mp = listp;
482603831d35Sstevel 		listp = mp->b_next;
482703831d35Sstevel 		mp->b_next = mp->b_prev = NULL;
482803831d35Sstevel 		freemsg(mp);
482903831d35Sstevel 	}
483003831d35Sstevel 
483103831d35Sstevel 	return (NULL);
483203831d35Sstevel 
483303831d35Sstevel }
483403831d35Sstevel 
483503831d35Sstevel static mblk_t *
man_alloc_physreq_mp(eaddr_t * man_eap)483603831d35Sstevel man_alloc_physreq_mp(eaddr_t *man_eap)
483703831d35Sstevel {
483803831d35Sstevel 
483903831d35Sstevel 	mblk_t			*mp;
484003831d35Sstevel 	union DL_primitives	*dlp;
484103831d35Sstevel 	t_uscalar_t		off;
484203831d35Sstevel 	eaddr_t			*eap;
484303831d35Sstevel 
484403831d35Sstevel 	mp = allocb(DL_SET_PHYS_ADDR_REQ_SIZE + ETHERADDRL, BPRI_MED);
484503831d35Sstevel 	if (mp == NULL)
484603831d35Sstevel 		goto exit;
484703831d35Sstevel 
484803831d35Sstevel 	mp->b_datap->db_type = M_PROTO;
484903831d35Sstevel 	dlp = (union DL_primitives *)mp->b_wptr;
485003831d35Sstevel 	dlp->set_physaddr_req.dl_primitive = DL_SET_PHYS_ADDR_REQ;
485103831d35Sstevel 	dlp->set_physaddr_req.dl_addr_length = ETHERADDRL;
485203831d35Sstevel 	off = DL_SET_PHYS_ADDR_REQ_SIZE;
485303831d35Sstevel 	dlp->set_physaddr_req.dl_addr_offset =  off;
485403831d35Sstevel 	mp->b_wptr += DL_SET_PHYS_ADDR_REQ_SIZE + ETHERADDRL;
485503831d35Sstevel 
485603831d35Sstevel 	eap = (eaddr_t *)(mp->b_rptr + off);
485703831d35Sstevel 	ether_copy(man_eap, eap);
485803831d35Sstevel 
485903831d35Sstevel exit:
486003831d35Sstevel 	MAN_DBG(MAN_DLPI, ("man_alloc_physreq: physaddr %s\n",
486103831d35Sstevel 	    ether_sprintf(eap)));
486203831d35Sstevel 
486303831d35Sstevel 	return (mp);
486403831d35Sstevel }
486503831d35Sstevel 
486603831d35Sstevel /*
486703831d35Sstevel  * A new path in a pathgroup has become active for the first time. Setup
486803831d35Sstevel  * the lower destinations in prepartion for man_pg_activate to call
486903831d35Sstevel  * man_autoswitch.
487003831d35Sstevel  */
487103831d35Sstevel static void
man_add_dests(man_pg_t * mpg)487203831d35Sstevel man_add_dests(man_pg_t *mpg)
487303831d35Sstevel {
487403831d35Sstevel 	manstr_t	*msp;
487503831d35Sstevel 	man_dest_t	*mdp;
487603831d35Sstevel 
487703831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
487803831d35Sstevel 
487903831d35Sstevel 		if (!man_str_uses_pg(msp, mpg))
488003831d35Sstevel 			continue;
488103831d35Sstevel 
488203831d35Sstevel 		mdp = &msp->ms_dests[mpg->mpg_pg_id];
488303831d35Sstevel 
488403831d35Sstevel /*
488503831d35Sstevel  * TBD - Take out
488603831d35Sstevel  *		ASSERT(mdp->md_device.mdev_state == MDEV_UNASSIGNED);
488703831d35Sstevel  *		ASSERT(mdp->md_state == MAN_DSTATE_NOTPRESENT);
488803831d35Sstevel  */
488903831d35Sstevel 		if (mdp->md_device.mdev_state != MDEV_UNASSIGNED) {
489003831d35Sstevel 			cmn_err(CE_NOTE, "man_add_dests mdev !unassigned");
489103831d35Sstevel 			MAN_DBGCALL(MAN_PATH, man_print_mdp(mdp));
489203831d35Sstevel 		}
489303831d35Sstevel 
489403831d35Sstevel 		man_start_dest(mdp, msp, mpg);
489503831d35Sstevel 	}
489603831d35Sstevel 
489703831d35Sstevel }
489803831d35Sstevel 
489903831d35Sstevel static int
man_remove_dests(man_pg_t * mpg)490003831d35Sstevel man_remove_dests(man_pg_t *mpg)
490103831d35Sstevel {
490203831d35Sstevel 	manstr_t	*msp;
490303831d35Sstevel 	int		close_cnt = 0;
490403831d35Sstevel 	man_dest_t	*cdp;
490503831d35Sstevel 	man_dest_t	*mdp;
490603831d35Sstevel 	man_dest_t	*tdp;
490703831d35Sstevel 	man_work_t	*wp;
490803831d35Sstevel 	mblk_t		*mp;
490903831d35Sstevel 	int		status = 0;
491003831d35Sstevel 
491103831d35Sstevel 	wp = man_work_alloc(MAN_WORK_CLOSE, KM_NOSLEEP);
491203831d35Sstevel 	if (wp == NULL) {
491303831d35Sstevel 		status = ENOMEM;
491403831d35Sstevel 		goto exit;
491503831d35Sstevel 	}
491603831d35Sstevel 
491703831d35Sstevel 	/*
491803831d35Sstevel 	 * Count up number of destinations we need to close.
491903831d35Sstevel 	 */
492003831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
492103831d35Sstevel 		if (!man_str_uses_pg(msp, mpg))
492203831d35Sstevel 			continue;
492303831d35Sstevel 
492403831d35Sstevel 		close_cnt++;
492503831d35Sstevel 	}
492603831d35Sstevel 
492703831d35Sstevel 	if (close_cnt == 0)
492803831d35Sstevel 		goto exit;
492903831d35Sstevel 
493003831d35Sstevel 	cdp = man_kzalloc(sizeof (man_dest_t) * close_cnt, KM_NOSLEEP);
493103831d35Sstevel 	if (cdp == NULL) {
493203831d35Sstevel 		status = ENOMEM;
493303831d35Sstevel 		man_work_free(wp);
493403831d35Sstevel 		goto exit;
493503831d35Sstevel 	}
493603831d35Sstevel 
493703831d35Sstevel 	tdp = cdp;
493803831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
493903831d35Sstevel 		if (!man_str_uses_pg(msp, mpg))
494003831d35Sstevel 			continue;
494103831d35Sstevel 
494203831d35Sstevel 		mdp = &msp->ms_dests[mpg->mpg_pg_id];
494303831d35Sstevel 
494403831d35Sstevel 		mdp->md_state |= MAN_DSTATE_CLOSING;
494503831d35Sstevel 		mdp->md_device.mdev_state = MDEV_UNASSIGNED;
494603831d35Sstevel 		mdp->md_msp = NULL;
494703831d35Sstevel 		mdp->md_rq = NULL;
494803831d35Sstevel 
494903831d35Sstevel 		/*
495003831d35Sstevel 		 * Clean up optimized destination pointer if we are
495103831d35Sstevel 		 * closing it.
495203831d35Sstevel 		 */
495303831d35Sstevel 		man_set_optimized_dest(msp);
495403831d35Sstevel 
495503831d35Sstevel 		if (mdp->md_lc_timer_id != 0) {
495603831d35Sstevel 			(void) quntimeout(man_ctl_wq, mdp->md_lc_timer_id);
495703831d35Sstevel 			mdp->md_lc_timer_id = 0;
495803831d35Sstevel 		}
495903831d35Sstevel 		if (mdp->md_bc_id != 0) {
496003831d35Sstevel 			qunbufcall(man_ctl_wq, mdp->md_bc_id);
496103831d35Sstevel 			mdp->md_bc_id = 0;
496203831d35Sstevel 		}
496303831d35Sstevel 
496403831d35Sstevel 		mutex_enter(&mdp->md_lock);
496503831d35Sstevel 		while ((mp = mdp->md_dmp_head) != NULL) {
496603831d35Sstevel 			mdp->md_dmp_head = mp->b_next;
496703831d35Sstevel 			mp->b_next = NULL;
496803831d35Sstevel 			freemsg(mp);
496903831d35Sstevel 		}
497003831d35Sstevel 		mdp->md_dmp_count = 0;
497103831d35Sstevel 		mdp->md_dmp_tail = NULL;
497203831d35Sstevel 		mutex_exit(&mdp->md_lock);
497303831d35Sstevel 
497403831d35Sstevel 		*tdp++ = *mdp;
497503831d35Sstevel 
497603831d35Sstevel 		mdp->md_state = MAN_DSTATE_NOTPRESENT;
497703831d35Sstevel 		mdp->md_muxid = -1;
497803831d35Sstevel 	}
497903831d35Sstevel 
498003831d35Sstevel 	wp->mw_arg.a_mdp = cdp;
498103831d35Sstevel 	wp->mw_arg.a_ndests = close_cnt;
498203831d35Sstevel 	man_work_add(man_bwork_q, wp);
498303831d35Sstevel 
498403831d35Sstevel exit:
498503831d35Sstevel 	return (status);
498603831d35Sstevel 
498703831d35Sstevel }
498803831d35Sstevel 
498903831d35Sstevel /*
499003831d35Sstevel  * Returns TRUE if stream uses pathgroup, FALSE otherwise.
499103831d35Sstevel  */
499203831d35Sstevel static int
man_str_uses_pg(manstr_t * msp,man_pg_t * mpg)499303831d35Sstevel man_str_uses_pg(manstr_t *msp, man_pg_t *mpg)
499403831d35Sstevel {
499503831d35Sstevel 	int	status;
499603831d35Sstevel 
499703831d35Sstevel 	status = ((msp->ms_flags & MAN_SFLAG_CONTROL)	||
499803831d35Sstevel 	    (msp->ms_dests == NULL)	||
499903831d35Sstevel 	    (msp->ms_manp == NULL)	||
500003831d35Sstevel 	    (msp->ms_manp->man_meta_ppa != mpg->mpg_man_ppa));
500103831d35Sstevel 
500203831d35Sstevel 	return (!status);
500303831d35Sstevel }
500403831d35Sstevel 
500503831d35Sstevel static int
man_gettimer(int timer,man_dest_t * mdp)500603831d35Sstevel man_gettimer(int timer, man_dest_t *mdp)
500703831d35Sstevel {
500803831d35Sstevel 
500903831d35Sstevel 	int attached = TRUE;
501003831d35Sstevel 	int time = 0;
501103831d35Sstevel 
501203831d35Sstevel 	if (mdp == NULL || mdp->md_msp == NULL || mdp->md_msp->ms_manp == NULL)
501303831d35Sstevel 		attached = FALSE;
501403831d35Sstevel 
501503831d35Sstevel 	switch (timer) {
501603831d35Sstevel 	case MAN_TIMER_INIT:
501703831d35Sstevel 		if (attached)
501803831d35Sstevel 			time = mdp->md_msp->ms_manp->man_init_time;
501903831d35Sstevel 		else
502003831d35Sstevel 			time = MAN_INIT_TIME;
502103831d35Sstevel 		break;
502203831d35Sstevel 
502303831d35Sstevel 	case MAN_TIMER_LINKCHECK:
502403831d35Sstevel 		if (attached) {
502503831d35Sstevel 			if (mdp->md_linkstate == MAN_LINKSTALE)
502603831d35Sstevel 				time = mdp->md_msp->ms_manp->man_linkstale_time;
502703831d35Sstevel 			else
502803831d35Sstevel 				time = mdp->md_msp->ms_manp->man_linkcheck_time;
502903831d35Sstevel 		} else
503003831d35Sstevel 			time = MAN_LINKCHECK_TIME;
503103831d35Sstevel 		break;
503203831d35Sstevel 
503303831d35Sstevel 	case MAN_TIMER_DLPIRESET:
503403831d35Sstevel 		if (attached)
503503831d35Sstevel 			time = mdp->md_msp->ms_manp->man_dlpireset_time;
503603831d35Sstevel 		else
503703831d35Sstevel 			time = MAN_DLPIRESET_TIME;
503803831d35Sstevel 		break;
503903831d35Sstevel 
504003831d35Sstevel 	default:
504103831d35Sstevel 		MAN_DBG(MAN_LINK, ("man_gettimer: unknown timer %d", timer));
504203831d35Sstevel 		time = MAN_LINKCHECK_TIME;
504303831d35Sstevel 		break;
504403831d35Sstevel 	}
504503831d35Sstevel 
504603831d35Sstevel 	return (drv_usectohz(time));
504703831d35Sstevel }
504803831d35Sstevel 
504903831d35Sstevel /*
505003831d35Sstevel  * Check the links for each active destination. Called inside inner
505103831d35Sstevel  * perimeter via qtimeout. This timer only runs on the domain side of the
505203831d35Sstevel  * driver. It should never run on the SC side.
505303831d35Sstevel  *
505403831d35Sstevel  * On a MAN_LINKGOOD link, we check/probe the link health every
505503831d35Sstevel  * MAN_LINKCHECK_TIME seconds. If the link goes MAN_LINKSTALE, the we probe
505603831d35Sstevel  * the link every MAN_LINKSTALE_TIME seconds, and fail the link after probing
505703831d35Sstevel  * the link MAN_LINKSTALE_RETRIES times.
505803831d35Sstevel  * The man_lock is held to synchronize access pathgroup list(man_pg).
505903831d35Sstevel  */
506003831d35Sstevel void
man_linkcheck_timer(void * argp)506103831d35Sstevel man_linkcheck_timer(void *argp)
506203831d35Sstevel {
506303831d35Sstevel 	man_dest_t		*mdp = (man_dest_t *)argp;
506403831d35Sstevel 	int			restart_timer = TRUE;
506503831d35Sstevel 	int			send_ping = TRUE;
506603831d35Sstevel 	int			newstate;
506703831d35Sstevel 	int			oldstate;
506803831d35Sstevel 	man_pg_t		*mpg;
506903831d35Sstevel 	man_path_t		*mp;
507003831d35Sstevel 
507103831d35Sstevel 	MAN_DBG(MAN_LINK, ("man_linkcheck_timer: mdp"));
507203831d35Sstevel 	MAN_DBGCALL(MAN_LINK, man_print_mdp(mdp));
507303831d35Sstevel 
507403831d35Sstevel 	/*
507503831d35Sstevel 	 * Clear timeout id and check if someones waiting on us to
507603831d35Sstevel 	 * complete a close.
507703831d35Sstevel 	 */
507803831d35Sstevel 	mdp->md_lc_timer_id = 0;
507903831d35Sstevel 
508003831d35Sstevel 	if (mdp->md_state == MAN_DSTATE_NOTPRESENT ||
508103831d35Sstevel 	    mdp->md_state & MAN_DSTATE_BUSY) {
508203831d35Sstevel 
508303831d35Sstevel 		MAN_DBG(MAN_LINK, ("man_linkcheck_timer: not ready mdp"));
508403831d35Sstevel 		MAN_DBGCALL(MAN_LINK, man_print_mdp(mdp));
508503831d35Sstevel 		goto exit;
508603831d35Sstevel 	}
508703831d35Sstevel 
508803831d35Sstevel 	mutex_enter(&man_lock);
508903831d35Sstevel 	/*
509003831d35Sstevel 	 * If the lower stream needs initializing, just go straight to
509103831d35Sstevel 	 * switch code. As the linkcheck timer is started for all
509203831d35Sstevel 	 * SAPs, do not send ping packets during the initialization.
509303831d35Sstevel 	 */
509403831d35Sstevel 	if (mdp->md_state == MAN_DSTATE_INITIALIZING) {
509503831d35Sstevel 		send_ping = FALSE;
509603831d35Sstevel 		goto do_switch;
509703831d35Sstevel 	}
509803831d35Sstevel 
509903831d35Sstevel 	newstate = oldstate = mdp->md_linkstate;
510003831d35Sstevel 
510103831d35Sstevel 	if (!man_needs_linkcheck(mdp)) {
510203831d35Sstevel 		cmn_err(CE_NOTE,
510303831d35Sstevel 		    "man_linkcheck_timer: unneeded linkcheck on mdp(0x%p)",
510403831d35Sstevel 		    (void *)mdp);
510503831d35Sstevel 		mutex_exit(&man_lock);
510603831d35Sstevel 		return;
510703831d35Sstevel 	}
510803831d35Sstevel 
510903831d35Sstevel 	/*
511003831d35Sstevel 	 * The above call to  man_needs_linkcheck() validates
511103831d35Sstevel 	 * mdp->md_msp and mdp->md_msp->ms_manp pointers.
511203831d35Sstevel 	 */
511303831d35Sstevel 	mpg = man_find_pg_by_id(mdp->md_msp->ms_manp->man_pg, mdp->md_pg_id);
511403831d35Sstevel 	ASSERT(mpg != NULL);
511503831d35Sstevel 	mp = man_find_path_by_ppa(mpg->mpg_pathp, mdp->md_device.mdev_ppa);
511603831d35Sstevel 	ASSERT(mp != NULL);
511703831d35Sstevel 
511803831d35Sstevel 	/*
511903831d35Sstevel 	 * This is the most common case, when traffic is flowing.
512003831d35Sstevel 	 */
512103831d35Sstevel 	if (mdp->md_rcvcnt != mdp->md_lastrcvcnt) {
512203831d35Sstevel 
512303831d35Sstevel 		newstate = MAN_LINKGOOD;
512403831d35Sstevel 		mdp->md_lastrcvcnt = mdp->md_rcvcnt;
512503831d35Sstevel 		send_ping = FALSE;
512603831d35Sstevel 
512703831d35Sstevel 		/*
512803831d35Sstevel 		 * Clear the FAILED flag and update lru.
512903831d35Sstevel 		 */
513003831d35Sstevel 		mp->mp_device.mdev_state &= ~MDEV_FAILED;
513103831d35Sstevel 		(void) drv_getparm(TIME, &mp->mp_lru);
513203831d35Sstevel 
513303831d35Sstevel 		if (mdp->md_link_updown_msg == MAN_LINK_DOWN_MSG) {
513403831d35Sstevel 			man_t *manp = mdp->md_msp->ms_manp;
513503831d35Sstevel 
513603831d35Sstevel 			cmn_err(CE_NOTE, "%s%d Link up",
513703831d35Sstevel 			    ddi_major_to_name(manp->man_meta_major),
513803831d35Sstevel 			    manp->man_meta_ppa);
513903831d35Sstevel 
514003831d35Sstevel 			mdp->md_link_updown_msg = MAN_LINK_UP_MSG;
514103831d35Sstevel 		}
514203831d35Sstevel 
514303831d35Sstevel 		goto done;
514403831d35Sstevel 	}
514503831d35Sstevel 
514603831d35Sstevel 	/*
514703831d35Sstevel 	 * If we're here, it means we have not seen any traffic
514803831d35Sstevel 	 */
514903831d35Sstevel 	switch (oldstate) {
515003831d35Sstevel 	case MAN_LINKINIT:
515103831d35Sstevel 	case MAN_LINKGOOD:
515203831d35Sstevel 		newstate = MAN_LINKSTALE;
515303831d35Sstevel 		mdp->md_linkstales++;
515403831d35Sstevel 		mdp->md_linkstale_retries =
515503831d35Sstevel 		    mdp->md_msp->ms_manp->man_linkstale_retries;
515603831d35Sstevel 		break;
515703831d35Sstevel 
515803831d35Sstevel 	case MAN_LINKSTALE:
515903831d35Sstevel 	case MAN_LINKFAIL:
516003831d35Sstevel 		mdp->md_linkstales++;
516103831d35Sstevel 		mdp->md_linkstale_retries--;
516203831d35Sstevel 		if (mdp->md_linkstale_retries < 0) {
516303831d35Sstevel 			newstate = MAN_LINKFAIL;
516403831d35Sstevel 			mdp->md_linkfails++;
516503831d35Sstevel 			mdp->md_linkstale_retries =
516603831d35Sstevel 			    mdp->md_msp->ms_manp->man_linkstale_retries;
516703831d35Sstevel 			/*
516803831d35Sstevel 			 * Mark the destination as FAILED and
516903831d35Sstevel 			 * update lru.
517003831d35Sstevel 			 */
517103831d35Sstevel 			if (oldstate != MAN_LINKFAIL) {
517203831d35Sstevel 				mp->mp_device.mdev_state |= MDEV_FAILED;
517303831d35Sstevel 				(void) drv_getparm(TIME, &mp->mp_lru);
517403831d35Sstevel 			}
517503831d35Sstevel 		}
517603831d35Sstevel 		break;
517703831d35Sstevel 
517803831d35Sstevel 	default:
517903831d35Sstevel 		cmn_err(CE_WARN, "man_linkcheck_timer: illegal link"
518003831d35Sstevel 		    " state %d", oldstate);
518103831d35Sstevel 		break;
518203831d35Sstevel 	}
518303831d35Sstevel done:
518403831d35Sstevel 
518503831d35Sstevel 	if (oldstate != newstate) {
518603831d35Sstevel 
518703831d35Sstevel 		MAN_DBG(MAN_LINK, ("man_linkcheck_timer"
518803831d35Sstevel 		    " link state %s -> %s", lss[oldstate],
518903831d35Sstevel 		    lss[newstate]));
519003831d35Sstevel 
519103831d35Sstevel 		mdp->md_linkstate = newstate;
519203831d35Sstevel 	}
519303831d35Sstevel 
519403831d35Sstevel 	/*
519503831d35Sstevel 	 * Do any work required from state transitions above.
519603831d35Sstevel 	 */
519703831d35Sstevel 	if (newstate == MAN_LINKFAIL) {
519803831d35Sstevel do_switch:
519903831d35Sstevel 		if (!man_do_autoswitch(mdp)) {
520003831d35Sstevel 			/*
520103831d35Sstevel 			 * Stop linkcheck timer until switch completes.
520203831d35Sstevel 			 */
520303831d35Sstevel 			restart_timer = FALSE;
520403831d35Sstevel 			send_ping = FALSE;
520503831d35Sstevel 		}
520603831d35Sstevel 	}
520703831d35Sstevel 
520803831d35Sstevel 	mutex_exit(&man_lock);
520903831d35Sstevel 	if (send_ping)
521003831d35Sstevel 		man_do_icmp_bcast(mdp, mdp->md_msp->ms_sap);
521103831d35Sstevel 
521203831d35Sstevel 	if (restart_timer)
521303831d35Sstevel 		mdp->md_lc_timer_id = qtimeout(man_ctl_wq, man_linkcheck_timer,
521403831d35Sstevel 		    (void *)mdp, man_gettimer(MAN_TIMER_LINKCHECK, mdp));
521503831d35Sstevel 
521603831d35Sstevel exit:
521703831d35Sstevel 	MAN_DBG(MAN_LINK, ("man_linkcheck_timer: returns"));
521803831d35Sstevel 
521903831d35Sstevel }
522003831d35Sstevel 
522103831d35Sstevel /*
522203831d35Sstevel  * Handle linkcheck initiated autoswitching.
522303831d35Sstevel  * Called with man_lock held.
522403831d35Sstevel  */
522503831d35Sstevel static int
man_do_autoswitch(man_dest_t * mdp)522603831d35Sstevel man_do_autoswitch(man_dest_t *mdp)
522703831d35Sstevel {
522803831d35Sstevel 	man_pg_t	*mpg;
522903831d35Sstevel 	man_path_t	*ap;
523003831d35Sstevel 	int		status = 0;
523103831d35Sstevel 
523203831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
523303831d35Sstevel 	/*
523403831d35Sstevel 	 * Set flags and refcnt. Cleared in man_iswitch when SWITCH completes.
523503831d35Sstevel 	 */
523603831d35Sstevel 	mdp->md_msp->ms_manp->man_refcnt++;
523703831d35Sstevel 
523803831d35Sstevel 	mpg = man_find_pg_by_id(mdp->md_msp->ms_manp->man_pg, mdp->md_pg_id);
523903831d35Sstevel 	ASSERT(mpg);
524003831d35Sstevel 
524103831d35Sstevel 	if (mpg->mpg_flags & MAN_PG_SWITCHING)
524203831d35Sstevel 		return (EBUSY);
524303831d35Sstevel 
524403831d35Sstevel 	mpg->mpg_flags |= MAN_PG_SWITCHING;
524503831d35Sstevel 
524603831d35Sstevel 	if (mdp->md_state == MAN_DSTATE_INITIALIZING) {
524703831d35Sstevel 		/*
524803831d35Sstevel 		 * We're initializing, ask for a switch to our currently
524903831d35Sstevel 		 * active device.
525003831d35Sstevel 		 */
525103831d35Sstevel 		status = man_autoswitch(mpg, &mdp->md_device, NULL);
525203831d35Sstevel 	} else {
525303831d35Sstevel 
525403831d35Sstevel 		if (mdp->md_msp != NULL && mdp->md_msp->ms_manp != NULL &&
525503831d35Sstevel 		    mdp->md_link_updown_msg == MAN_LINK_UP_MSG) {
525603831d35Sstevel 
525703831d35Sstevel 			man_t *manp = mdp->md_msp->ms_manp;
525803831d35Sstevel 
525903831d35Sstevel 			cmn_err(CE_NOTE, "%s%d Link down",
526003831d35Sstevel 			    ddi_major_to_name(manp->man_meta_major),
526103831d35Sstevel 			    manp->man_meta_ppa);
526203831d35Sstevel 		}
526303831d35Sstevel 		mdp->md_link_updown_msg = MAN_LINK_DOWN_MSG;
526403831d35Sstevel 
526503831d35Sstevel 		MAN_DBG(MAN_LINK, ("man_linkcheck_timer: link failure on %s%d",
526603831d35Sstevel 		    ddi_major_to_name(mdp->md_device.mdev_major),
526703831d35Sstevel 		    mdp->md_device.mdev_ppa));
526803831d35Sstevel 
526903831d35Sstevel 		ap = man_find_alternate_path(mpg->mpg_pathp);
527003831d35Sstevel 
527103831d35Sstevel 		if (ap == NULL) {
527203831d35Sstevel 			status = ENODEV;
527303831d35Sstevel 			goto exit;
527403831d35Sstevel 		}
527503831d35Sstevel 		status = man_autoswitch(mpg, &ap->mp_device, NULL);
527603831d35Sstevel 	}
527703831d35Sstevel exit:
527803831d35Sstevel 	if (status != 0) {
527903831d35Sstevel 		/*
528003831d35Sstevel 		 * man_iswitch not going to run, clean up.
528103831d35Sstevel 		 */
528203831d35Sstevel 		mpg->mpg_flags &= ~MAN_PG_SWITCHING;
528303831d35Sstevel 		mdp->md_msp->ms_manp->man_refcnt--;
528403831d35Sstevel 	}
528503831d35Sstevel 
528603831d35Sstevel 	return (status);
528703831d35Sstevel }
528803831d35Sstevel 
528903831d35Sstevel /*
529003831d35Sstevel  * Gather up all lower multiplexor streams that have this link open and
529103831d35Sstevel  * try to switch them. Called from inner perimeter and holding man_lock.
529203831d35Sstevel  *
529303831d35Sstevel  *	pg_id		- Pathgroup to do switch for.
529403831d35Sstevel  *	st_devp		- New device to switch to.
529503831d35Sstevel  *	wait_for_switch	- whether or not to qwait for completion.
529603831d35Sstevel  */
529703831d35Sstevel static int
man_autoswitch(man_pg_t * mpg,man_dev_t * st_devp,man_work_t * waiter_wp)529803831d35Sstevel man_autoswitch(man_pg_t *mpg, man_dev_t *st_devp, man_work_t *waiter_wp)
529903831d35Sstevel {
530003831d35Sstevel 	man_work_t	*wp;
530103831d35Sstevel 	int		sdp_cnt = 0;
530203831d35Sstevel 	man_dest_t	*sdp;
530303831d35Sstevel 	int		status = 0;
530403831d35Sstevel 
530503831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
530603831d35Sstevel 	if (waiter_wp == NULL) {
530703831d35Sstevel 		wp = man_work_alloc(MAN_WORK_SWITCH, KM_NOSLEEP);
530803831d35Sstevel 		if (wp == NULL) {
530903831d35Sstevel 			status = ENOMEM;
531003831d35Sstevel 			goto exit;
531103831d35Sstevel 		}
531203831d35Sstevel 	} else {
531303831d35Sstevel 		ASSERT(waiter_wp->mw_type == MAN_WORK_SWITCH);
531403831d35Sstevel 		wp = waiter_wp;
531503831d35Sstevel 	}
531603831d35Sstevel 
531703831d35Sstevel 	/*
531803831d35Sstevel 	 * Set dests as PLUMBING, cancel timers and return array of dests
531903831d35Sstevel 	 * that need a switch.
532003831d35Sstevel 	 */
532103831d35Sstevel 	status = man_prep_dests_for_switch(mpg, &sdp, &sdp_cnt);
532203831d35Sstevel 	if (status) {
532303831d35Sstevel 		if (waiter_wp == NULL)
532403831d35Sstevel 			man_work_free(wp);
532503831d35Sstevel 		goto exit;
532603831d35Sstevel 	}
532703831d35Sstevel 
532803831d35Sstevel 	/*
532903831d35Sstevel 	 * If no streams are active, there are no streams to switch.
533003831d35Sstevel 	 * Return ENODEV (see man_pg_activate).
533103831d35Sstevel 	 */
533203831d35Sstevel 	if (sdp_cnt == 0) {
533303831d35Sstevel 		if (waiter_wp == NULL)
533403831d35Sstevel 			man_work_free(wp);
533503831d35Sstevel 		status = ENODEV;
533603831d35Sstevel 		goto exit;
533703831d35Sstevel 	}
533803831d35Sstevel 
533903831d35Sstevel 	/*
534003831d35Sstevel 	 * Ask the bgthread to switch. See man_bwork.
534103831d35Sstevel 	 */
534203831d35Sstevel 	wp->mw_arg.a_sf_dev = sdp->md_device;
534303831d35Sstevel 	wp->mw_arg.a_st_dev = *st_devp;
534403831d35Sstevel 	wp->mw_arg.a_pg_id = mpg->mpg_pg_id;
534503831d35Sstevel 	wp->mw_arg.a_man_ppa = mpg->mpg_man_ppa;
534603831d35Sstevel 
534703831d35Sstevel 	wp->mw_arg.a_mdp = sdp;
534803831d35Sstevel 	wp->mw_arg.a_ndests = sdp_cnt;
534903831d35Sstevel 	man_work_add(man_bwork_q, wp);
535003831d35Sstevel 
535103831d35Sstevel exit:
535203831d35Sstevel 
535303831d35Sstevel 	return (status);
535403831d35Sstevel }
535503831d35Sstevel 
535603831d35Sstevel /*
535703831d35Sstevel  * If an alternate path exists for pathgroup, arrange for switch to
535803831d35Sstevel  * happen. Note that we need to switch each of msp->dests[pg_id], for
535903831d35Sstevel  * all on man_strup. We must:
536003831d35Sstevel  *
536103831d35Sstevel  *		Cancel any timers
536203831d35Sstevel  *		Mark dests as PLUMBING
536303831d35Sstevel  *		Submit switch request to man_bwork_q->
536403831d35Sstevel  */
536503831d35Sstevel static int
man_prep_dests_for_switch(man_pg_t * mpg,man_dest_t ** mdpp,int * cntp)536603831d35Sstevel man_prep_dests_for_switch(man_pg_t *mpg, man_dest_t **mdpp, int *cntp)
536703831d35Sstevel {
536803831d35Sstevel 	manstr_t	*msp;
536903831d35Sstevel 	man_dest_t	*mdp;
537003831d35Sstevel 	int		sdp_cnt = 0;
537103831d35Sstevel 	man_dest_t	*sdp = NULL;
537203831d35Sstevel 	man_dest_t	*tdp;
537303831d35Sstevel 	int		status = 0;
537403831d35Sstevel 
537503831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_prep_dests_for_switch: pg_id %d",
537603831d35Sstevel 	    mpg->mpg_pg_id));
537703831d35Sstevel 
537803831d35Sstevel 	/*
537903831d35Sstevel 	 * Count up number of streams, there is one destination that needs
538003831d35Sstevel 	 * switching per stream.
538103831d35Sstevel 	 */
538203831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
538303831d35Sstevel 		if (man_str_uses_pg(msp, mpg))
538403831d35Sstevel 			sdp_cnt++;
538503831d35Sstevel 	}
538603831d35Sstevel 
538703831d35Sstevel 	if (sdp_cnt == 0)
538803831d35Sstevel 		goto exit;
538903831d35Sstevel 
539003831d35Sstevel 	sdp = man_kzalloc(sizeof (man_dest_t) * sdp_cnt, KM_NOSLEEP);
539103831d35Sstevel 	if (sdp == NULL) {
539203831d35Sstevel 		status = ENOMEM;
539303831d35Sstevel 		goto exit;
539403831d35Sstevel 	}
539503831d35Sstevel 	tdp = sdp;
539603831d35Sstevel 	/*
539703831d35Sstevel 	 * Mark each destination as unusable.
539803831d35Sstevel 	 */
539903831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
540003831d35Sstevel 		if (man_str_uses_pg(msp, mpg)) {
540103831d35Sstevel 
540203831d35Sstevel 			/*
540303831d35Sstevel 			 * Mark destination as plumbing and store the
540403831d35Sstevel 			 * address of sdp as a way to identify the
540503831d35Sstevel 			 * SWITCH request when it comes back (see man_iswitch).
540603831d35Sstevel 			 */
540703831d35Sstevel 			mdp = &msp->ms_dests[mpg->mpg_pg_id];
540803831d35Sstevel 			mdp->md_state |= MAN_DSTATE_PLUMBING;
540903831d35Sstevel 			mdp->md_switch_id = sdp;
541003831d35Sstevel 
541103831d35Sstevel 			/*
541203831d35Sstevel 			 * Copy destination info.
541303831d35Sstevel 			 */
541403831d35Sstevel 			bcopy(mdp, tdp, sizeof (man_dest_t));
541503831d35Sstevel 			tdp++;
541603831d35Sstevel 
541703831d35Sstevel 			/*
541803831d35Sstevel 			 * Cancel timers.
541903831d35Sstevel 			 */
542003831d35Sstevel 			if (mdp->md_lc_timer_id) {
542103831d35Sstevel 				(void) quntimeout(man_ctl_wq,
542203831d35Sstevel 				    mdp->md_lc_timer_id);
542303831d35Sstevel 				mdp->md_lc_timer_id = 0;
542403831d35Sstevel 			}
542503831d35Sstevel 			if (mdp->md_bc_id) {
542603831d35Sstevel 				qunbufcall(man_ctl_wq, mdp->md_bc_id);
542703831d35Sstevel 				mdp->md_bc_id = 0;
542803831d35Sstevel 			}
542903831d35Sstevel 		}
543003831d35Sstevel 	}
543103831d35Sstevel 
543203831d35Sstevel 	*mdpp = sdp;
543303831d35Sstevel 	*cntp = sdp_cnt;
543403831d35Sstevel 	status = 0;
543503831d35Sstevel exit:
543603831d35Sstevel 
543703831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_prep_dests_for_switch: returns %d"
543803831d35Sstevel 	    " sdp(0x%p) sdp_cnt(%d)", status, (void *)sdp, sdp_cnt));
543903831d35Sstevel 
544003831d35Sstevel 	return (status);
544103831d35Sstevel 
544203831d35Sstevel }
544303831d35Sstevel 
544403831d35Sstevel /*
544503831d35Sstevel  * The code below generates an ICMP echo packet and sends it to the
544603831d35Sstevel  * broadcast address in the hopes that the other end will respond
544703831d35Sstevel  * and the man_linkcheck_timer logic will see the traffic.
544803831d35Sstevel  *
544903831d35Sstevel  * This assumes ethernet-like media.
545003831d35Sstevel  */
545103831d35Sstevel /*
545203831d35Sstevel  * Generate an ICMP packet. Called exclusive inner perimeter.
545303831d35Sstevel  *
545403831d35Sstevel  *	mdp - destination to send packet to.
545503831d35Sstevel  *	sap - either ETHERTYPE_ARP or ETHERTYPE_IPV6
545603831d35Sstevel  */
545703831d35Sstevel static void
man_do_icmp_bcast(man_dest_t * mdp,t_uscalar_t sap)545803831d35Sstevel man_do_icmp_bcast(man_dest_t *mdp, t_uscalar_t sap)
545903831d35Sstevel {
546003831d35Sstevel 	mblk_t			*mp = NULL;
546103831d35Sstevel 
546203831d35Sstevel 	/* TBD - merge pinger and this routine. */
546303831d35Sstevel 
546403831d35Sstevel 	ASSERT(sap == ETHERTYPE_IPV6 || sap == ETHERTYPE_IP);
546503831d35Sstevel 
546603831d35Sstevel 	if (sap == ETHERTYPE_IPV6) {
546703831d35Sstevel 		mdp->md_icmpv6probes++;
546803831d35Sstevel 	} else {
546903831d35Sstevel 		mdp->md_icmpv4probes++;
547003831d35Sstevel 	}
547103831d35Sstevel 	/*
547203831d35Sstevel 	 * Send the ICMP message
547303831d35Sstevel 	 */
547403831d35Sstevel 	mp = man_pinger(sap);
547503831d35Sstevel 
547603831d35Sstevel 	MAN_DBG(MAN_LINK, ("man_do_icmp_bcast: sap=0x%x mp=0x%p",
547703831d35Sstevel 	    sap, (void *)mp));
547803831d35Sstevel 	if (mp == NULL)
547903831d35Sstevel 		return;
548003831d35Sstevel 
548103831d35Sstevel 	/*
548203831d35Sstevel 	 * Send it out.
548303831d35Sstevel 	 */
548403831d35Sstevel 	if (man_start_lower(mdp, mp, NULL, MAN_LOWER)) {
548503831d35Sstevel 
548603831d35Sstevel 		MAN_DBG(MAN_LINK, ("man_do_icmp_broadcast: xmit failed"));
548703831d35Sstevel 
548803831d35Sstevel 		freemsg(mp);
548903831d35Sstevel 	}
549003831d35Sstevel 
549103831d35Sstevel }
549203831d35Sstevel 
549303831d35Sstevel static mblk_t *
man_pinger(t_uscalar_t sap)549403831d35Sstevel man_pinger(t_uscalar_t sap)
549503831d35Sstevel {
549603831d35Sstevel 	mblk_t		*mp = NULL;
549703831d35Sstevel 	man_dladdr_t	dlsap;
549803831d35Sstevel 	icmph_t		*icmph;
549903831d35Sstevel 	int		ipver;
550003831d35Sstevel 	ipha_t		*ipha;
550103831d35Sstevel 	ip6_t		*ip6h;
550203831d35Sstevel 	int		iph_hdr_len;
550303831d35Sstevel 	int		datalen = 64;
550403831d35Sstevel 	uchar_t		*datap;
550503831d35Sstevel 	uint16_t	size;
550603831d35Sstevel 	uchar_t		i;
550703831d35Sstevel 
550803831d35Sstevel 	dlsap.dl_sap = htons(sap);
550903831d35Sstevel 	bcopy(&etherbroadcast, &dlsap.dl_phys, sizeof (dlsap.dl_phys));
551003831d35Sstevel 
551103831d35Sstevel 	if (sap == ETHERTYPE_IPV6) {
551203831d35Sstevel 		ipver = IPV6_VERSION;
551303831d35Sstevel 		iph_hdr_len = sizeof (ip6_t);
551403831d35Sstevel 		size = ICMP6_MINLEN;
551503831d35Sstevel 	} else {
551603831d35Sstevel 		ipver = IPV4_VERSION;
551703831d35Sstevel 		iph_hdr_len = sizeof (ipha_t);
551803831d35Sstevel 		size = ICMPH_SIZE;
551903831d35Sstevel 	}
552003831d35Sstevel 	size += (uint16_t)iph_hdr_len;
552103831d35Sstevel 	size += datalen;
552203831d35Sstevel 
552303831d35Sstevel 	mp = man_alloc_udreq(size, &dlsap);
552403831d35Sstevel 	if (mp == NULL)
552503831d35Sstevel 		goto exit;
552603831d35Sstevel 
552703831d35Sstevel 	/*
552803831d35Sstevel 	 * fill out the ICMP echo packet headers
552903831d35Sstevel 	 */
553003831d35Sstevel 	mp->b_cont->b_wptr += iph_hdr_len;
553103831d35Sstevel 	if (ipver == IPV4_VERSION) {
553203831d35Sstevel 		ipha = (ipha_t *)mp->b_cont->b_rptr;
553303831d35Sstevel 		ipha->ipha_version_and_hdr_length = (IP_VERSION << 4)
553403831d35Sstevel 		    | IP_SIMPLE_HDR_LENGTH_IN_WORDS;
553503831d35Sstevel 		ipha->ipha_type_of_service = 0;
553603831d35Sstevel 		ipha->ipha_length = size;
553703831d35Sstevel 		ipha->ipha_fragment_offset_and_flags = IPH_DF;
553803831d35Sstevel 		ipha->ipha_ttl = 1;
553903831d35Sstevel 		ipha->ipha_protocol = IPPROTO_ICMP;
554003831d35Sstevel 		if (man_is_on_domain) {
554103831d35Sstevel 			manc_t		manc;
554203831d35Sstevel 
554303831d35Sstevel 			if (man_get_iosram(&manc)) {
554403831d35Sstevel 				freemsg(mp);
554503831d35Sstevel 				mp = NULL;
554603831d35Sstevel 				goto exit;
554703831d35Sstevel 			}
554803831d35Sstevel 
554903831d35Sstevel 			/*
555003831d35Sstevel 			 * Domain generates ping packets for domain to
555103831d35Sstevel 			 * SC network (dman0 <--> scman0).
555203831d35Sstevel 			 */
555303831d35Sstevel 			ipha->ipha_dst = manc.manc_sc_ipaddr;
555403831d35Sstevel 			ipha->ipha_src = manc.manc_dom_ipaddr;
555503831d35Sstevel 		} else {
555603831d35Sstevel 			/*
555703831d35Sstevel 			 * Note that ping packets are only generated
555803831d35Sstevel 			 * by the SC across scman1 (SC to SC network).
555903831d35Sstevel 			 */
556003831d35Sstevel 			ipha->ipha_dst = man_sc_ipaddrs.ip_other_sc_ipaddr;
556103831d35Sstevel 			ipha->ipha_src = man_sc_ipaddrs.ip_my_sc_ipaddr;
556203831d35Sstevel 		}
556303831d35Sstevel 
556403831d35Sstevel 		ipha->ipha_ident = 0;
556503831d35Sstevel 
556603831d35Sstevel 		ipha->ipha_hdr_checksum = 0;
556703831d35Sstevel 		ipha->ipha_hdr_checksum = IP_CSUM(mp->b_cont, 0, 0);
556803831d35Sstevel 
556903831d35Sstevel 	} else {
557003831d35Sstevel 		ip6h = (ip6_t *)mp->b_cont->b_rptr;
557103831d35Sstevel 		/*
557203831d35Sstevel 		 * IP version = 6, priority = 0, flow = 0
557303831d35Sstevel 		 */
557403831d35Sstevel 		ip6h->ip6_flow = (IPV6_VERSION << 28);
557503831d35Sstevel 		ip6h->ip6_plen =
557603831d35Sstevel 		    htons((short)(size - iph_hdr_len));
557703831d35Sstevel 		ip6h->ip6_nxt = IPPROTO_ICMPV6;
557803831d35Sstevel 		ip6h->ip6_hlim = 1;	/* stay on link */
557903831d35Sstevel 
558003831d35Sstevel 		if (man_is_on_domain) {
558103831d35Sstevel 			manc_t		manc;
558203831d35Sstevel 
558303831d35Sstevel 			if (man_get_iosram(&manc)) {
558403831d35Sstevel 				freemsg(mp);
558503831d35Sstevel 				mp = NULL;
558603831d35Sstevel 				goto exit;
558703831d35Sstevel 			}
558803831d35Sstevel 
558903831d35Sstevel 			/*
559003831d35Sstevel 			 * Domain generates ping packets for domain to
559103831d35Sstevel 			 * SC network (dman0 <--> scman0).
559203831d35Sstevel 			 */
559303831d35Sstevel 			ip6h->ip6_src = manc.manc_dom_ipv6addr;
559403831d35Sstevel 			ip6h->ip6_dst = manc.manc_sc_ipv6addr;
559503831d35Sstevel 		} else {
559603831d35Sstevel 			/*
559703831d35Sstevel 			 * Note that ping packets are only generated
559803831d35Sstevel 			 * by the SC across scman1 (SC to SC network).
559903831d35Sstevel 			 */
560003831d35Sstevel 			ip6h->ip6_src = man_sc_ip6addrs.ip6_my_sc_ipaddr;
560103831d35Sstevel 			ip6h->ip6_dst = man_sc_ip6addrs.ip6_other_sc_ipaddr;
560203831d35Sstevel 		}
560303831d35Sstevel 	}
560403831d35Sstevel 
560503831d35Sstevel 	/*
560603831d35Sstevel 	 * IPv6 and IP are the same for ICMP as far as I'm concerned.
560703831d35Sstevel 	 */
560803831d35Sstevel 	icmph = (icmph_t *)mp->b_cont->b_wptr;
560903831d35Sstevel 	if (ipver == IPV4_VERSION) {
561003831d35Sstevel 		mp->b_cont->b_wptr += ICMPH_SIZE;
561103831d35Sstevel 		icmph->icmph_type = ICMP_ECHO_REQUEST;
561203831d35Sstevel 		icmph->icmph_code = 0;
561303831d35Sstevel 	} else {
561403831d35Sstevel 		mp->b_cont->b_wptr += ICMP6_MINLEN;
561503831d35Sstevel 		icmph->icmph_type = ICMP6_ECHO_REQUEST;
561603831d35Sstevel 		icmph->icmph_code = 0;
561703831d35Sstevel 	}
561803831d35Sstevel 
561903831d35Sstevel 	datap = mp->b_cont->b_wptr;
562003831d35Sstevel 	mp->b_cont->b_wptr += datalen;
562103831d35Sstevel 
562203831d35Sstevel 	for (i = 0; i < datalen; i++)
562303831d35Sstevel 		*datap++ = i;
562403831d35Sstevel 
562503831d35Sstevel 	if (ipver == IPV4_VERSION) {
562603831d35Sstevel 		icmph->icmph_checksum = IP_CSUM(mp->b_cont, iph_hdr_len, 0);
562703831d35Sstevel 	} else {
562803831d35Sstevel 		uint32_t	sum;
562903831d35Sstevel 
563003831d35Sstevel 		sum = htons(IPPROTO_ICMPV6) + ip6h->ip6_plen;
563103831d35Sstevel 		icmph->icmph_checksum = IP_CSUM(mp->b_cont, iph_hdr_len - 32,
563203831d35Sstevel 		    (sum & 0xffff) + (sum >> 16));
563303831d35Sstevel 	}
563403831d35Sstevel 
563503831d35Sstevel /*
563603831d35Sstevel  * TBD
563703831d35Sstevel  *	icp->icmp_time =  ???;
563803831d35Sstevel  */
563903831d35Sstevel 
564003831d35Sstevel exit:
564103831d35Sstevel 	return (mp);
564203831d35Sstevel }
564303831d35Sstevel 
564403831d35Sstevel static mblk_t *
man_alloc_udreq(int size,man_dladdr_t * dlsap)564503831d35Sstevel man_alloc_udreq(int size, man_dladdr_t *dlsap)
564603831d35Sstevel {
564703831d35Sstevel 	dl_unitdata_req_t	*udreq;
564803831d35Sstevel 	mblk_t			*bp;
564903831d35Sstevel 	mblk_t			*mp;
565003831d35Sstevel 
565103831d35Sstevel 	mp = allocb(sizeof (dl_unitdata_req_t) + sizeof (*dlsap), BPRI_MED);
565203831d35Sstevel 
565303831d35Sstevel 	if (mp == NULL) {
565403831d35Sstevel 		cmn_err(CE_NOTE, "man_preparepkt: allocb failed");
565503831d35Sstevel 		return (NULL);
565603831d35Sstevel 	}
565703831d35Sstevel 
565803831d35Sstevel 	if ((bp = allocb(size, BPRI_MED)) == NULL) {
565903831d35Sstevel 		freemsg(mp);
566003831d35Sstevel 		cmn_err(CE_NOTE, "man_preparepkts: allocb failed");
566103831d35Sstevel 		return (NULL);
566203831d35Sstevel 	}
566303831d35Sstevel 	bzero(bp->b_rptr, size);
566403831d35Sstevel 
566503831d35Sstevel 	mp->b_cont = bp;
566603831d35Sstevel 	mp->b_datap->db_type = M_PROTO;
566703831d35Sstevel 	udreq = (dl_unitdata_req_t *)mp->b_wptr;
566803831d35Sstevel 	mp->b_wptr += sizeof (dl_unitdata_req_t);
566903831d35Sstevel 
567003831d35Sstevel 	/*
567103831d35Sstevel 	 * phys addr first - TBD
567203831d35Sstevel 	 */
567303831d35Sstevel 	bcopy((char *)dlsap, mp->b_wptr, sizeof (*dlsap));
567403831d35Sstevel 	mp->b_wptr += sizeof (*dlsap);
567503831d35Sstevel 
567603831d35Sstevel 	udreq->dl_primitive = DL_UNITDATA_REQ;
567703831d35Sstevel 	udreq->dl_dest_addr_length = sizeof (*dlsap);
567803831d35Sstevel 	udreq->dl_dest_addr_offset = sizeof (*udreq);
567903831d35Sstevel 	udreq->dl_priority.dl_min = 0;
568003831d35Sstevel 	udreq->dl_priority.dl_max = 0;
568103831d35Sstevel 
568203831d35Sstevel 	return (mp);
568303831d35Sstevel }
568403831d35Sstevel 
568503831d35Sstevel 
568603831d35Sstevel /*
568703831d35Sstevel  * The routines in this file are executed by the MAN background thread,
568803831d35Sstevel  * which executes outside of the STREAMS framework (see man_str.c). It is
568903831d35Sstevel  * allowed to do the things required to modify the STREAMS driver (things
569003831d35Sstevel  * that are normally done from a user process). These routines do things like
569103831d35Sstevel  * open and close drivers, PLINK and PUNLINK streams to/from the multiplexor,
569203831d35Sstevel  * etc.
569303831d35Sstevel  *
569403831d35Sstevel  * The mechanism of communication between the STREAMS portion of the driver
569503831d35Sstevel  * and the background thread portion are two work queues, man_bwork_q
569603831d35Sstevel  * and man_iwork_q (background work q and streams work q).  Work
569703831d35Sstevel  * requests are placed on those queues when one half of the driver wants
569803831d35Sstevel  * the other half to do some work for it.
569903831d35Sstevel  *
570003831d35Sstevel  * The MAN background thread executes the man_bwork routine. Its sole
570103831d35Sstevel  * job is to process work requests placed on this work q. The MAN upper
570203831d35Sstevel  * write service routine is responsible for processing work requests posted
570303831d35Sstevel  * to the man_iwork_q->
570403831d35Sstevel  *
570503831d35Sstevel  * Both work queues are protected by the global mutex man_lock. The
570603831d35Sstevel  * man_bwork is signalged via the condvarman_bwork_q->q_cv. The man_uwsrv
570703831d35Sstevel  * routine is signaled by calling qenable (forcing man_uwsrv to run).
570803831d35Sstevel  */
570903831d35Sstevel 
571003831d35Sstevel /*
571103831d35Sstevel  * man_bwork - Work thread for this device.  It is responsible for
571203831d35Sstevel  * performing operations which can't occur within the STREAMS framework.
571303831d35Sstevel  *
571403831d35Sstevel  * Locking:
571503831d35Sstevel  *	- Called holding no locks
571603831d35Sstevel  *	- Obtains the global mutex man_lock to remove work from
571703831d35Sstevel  *	  man_bwork_q, and post work to man_iwork_q->
571803831d35Sstevel  *	- Note that we do not want to hold any locks when making
571903831d35Sstevel  *	  any ldi_ calls.
572003831d35Sstevel  */
572103831d35Sstevel void
man_bwork()572203831d35Sstevel man_bwork()
572303831d35Sstevel {
572403831d35Sstevel 	man_work_t	*wp;
572503831d35Sstevel 	int		done = 0;
572603831d35Sstevel 	callb_cpr_t	cprinfo;
572703831d35Sstevel 	int		wp_finished;
572803831d35Sstevel 
572903831d35Sstevel 	CALLB_CPR_INIT(&cprinfo, &man_lock, callb_generic_cpr,
573003831d35Sstevel 	    "mn_work_thrd");
573103831d35Sstevel 
573203831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_bwork: enter"));
573303831d35Sstevel 
573403831d35Sstevel 	while (done == 0) {
573503831d35Sstevel 
573603831d35Sstevel 		mutex_enter(&man_lock);
573703831d35Sstevel 		/*
573803831d35Sstevel 		 * While there is nothing to do, sit in cv_wait.  If work
573903831d35Sstevel 		 * request is made, requester will signal.
574003831d35Sstevel 		 */
574103831d35Sstevel 		while (man_bwork_q->q_work == NULL) {
574203831d35Sstevel 
574303831d35Sstevel 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
574403831d35Sstevel 
574503831d35Sstevel 			cv_wait(&man_bwork_q->q_cv, &man_lock);
574603831d35Sstevel 
574703831d35Sstevel 			CALLB_CPR_SAFE_END(&cprinfo, &man_lock);
574803831d35Sstevel 		}
574903831d35Sstevel 
575003831d35Sstevel 		wp = man_bwork_q->q_work;
575103831d35Sstevel 		man_bwork_q->q_work = wp->mw_next;
575203831d35Sstevel 		wp->mw_next = NULL;
575303831d35Sstevel 		mutex_exit(&man_lock);
575403831d35Sstevel 
575503831d35Sstevel 		wp_finished = TRUE;
575603831d35Sstevel 
575703831d35Sstevel 		MAN_DBG(MAN_SWITCH, ("man_bwork: type %s",
575803831d35Sstevel 		    _mw_type[wp->mw_type]));
575903831d35Sstevel 
576003831d35Sstevel 		switch (wp->mw_type) {
576103831d35Sstevel 		case MAN_WORK_OPEN_CTL:
576203831d35Sstevel 			wp->mw_status = man_open_ctl();
576303831d35Sstevel 			break;
576403831d35Sstevel 
576503831d35Sstevel 		case MAN_WORK_CLOSE_CTL:
576603831d35Sstevel 			man_close_ctl();
576703831d35Sstevel 			break;
576803831d35Sstevel 
576903831d35Sstevel 		case MAN_WORK_CLOSE:
577003831d35Sstevel 		case MAN_WORK_CLOSE_STREAM:
577103831d35Sstevel 			man_bclose(&wp->mw_arg);
577203831d35Sstevel 			break;
577303831d35Sstevel 
577403831d35Sstevel 		case MAN_WORK_SWITCH:
577503831d35Sstevel 			man_bswitch(&wp->mw_arg, wp);
577603831d35Sstevel 			wp_finished = FALSE;
577703831d35Sstevel 			break;
577803831d35Sstevel 
577903831d35Sstevel 		case MAN_WORK_STOP:		/* man_bwork_stop() */
578003831d35Sstevel 			done = 1;
578103831d35Sstevel 			mutex_enter(&man_lock);
578203831d35Sstevel 			CALLB_CPR_EXIT(&cprinfo); /* Unlocks man_lock */
578303831d35Sstevel 			break;
578403831d35Sstevel 
578503831d35Sstevel 		default:
578603831d35Sstevel 			cmn_err(CE_WARN, "man_bwork: "
578703831d35Sstevel 			    "illegal work type(%d)", wp->mw_type);
578803831d35Sstevel 			break;
578903831d35Sstevel 		}
579003831d35Sstevel 
579103831d35Sstevel 		mutex_enter(&man_lock);
579203831d35Sstevel 
579303831d35Sstevel 		if (wp_finished) {
579403831d35Sstevel 			wp->mw_flags |= MAN_WFLAGS_DONE;
579503831d35Sstevel 			if (wp->mw_flags & MAN_WFLAGS_CVWAITER)
579603831d35Sstevel 				cv_signal(&wp->mw_cv);
579703831d35Sstevel 			else if (wp->mw_flags & MAN_WFLAGS_QWAITER)
579803831d35Sstevel 				qenable(wp->mw_q);
579903831d35Sstevel 			else
580003831d35Sstevel 				man_work_free(wp);
580103831d35Sstevel 		}
580203831d35Sstevel 
580303831d35Sstevel 		mutex_exit(&man_lock);
580403831d35Sstevel 	}
580503831d35Sstevel 
580603831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_bwork: thread_exit"));
580703831d35Sstevel 
580803831d35Sstevel 	mutex_enter(&man_lock);
580903831d35Sstevel 	man_bwork_id = NULL;
581003831d35Sstevel 	mutex_exit(&man_lock);
581103831d35Sstevel 
581203831d35Sstevel 	thread_exit();
581303831d35Sstevel }
581403831d35Sstevel 
581503831d35Sstevel /*
581603831d35Sstevel  * man_open_ctl - Open the control stream.
581703831d35Sstevel  *
581803831d35Sstevel  *	returns	- success - 0
581903831d35Sstevel  *		- failure - errno code
582003831d35Sstevel  *
582103831d35Sstevel  * Mutex Locking Notes:
582203831d35Sstevel  *	We need a way to keep the CLONE_OPEN qwaiters in man_open from
582303831d35Sstevel  *	checking the man_config variables after the ldi_open call below
582403831d35Sstevel  *	returns from man_open, leaving the inner perimeter. So, we use the
582503831d35Sstevel  *	man_lock to synchronize the threads in man_open_ctl and man_open.  We
582603831d35Sstevel  *	hold man_lock across this call into man_open, which in general is a
582703831d35Sstevel  *	no-no. But, the STREAMs portion of the driver (other than open)
582803831d35Sstevel  *	doesn't use it. So, if ldi_open gets hijacked to run any part of
582903831d35Sstevel  *	the MAN streams driver, it wont end up recursively trying to acquire
583003831d35Sstevel  *	man_lock. Note that the non-CLONE_OPEN portion of man_open doesnt
583103831d35Sstevel  *	acquire it either, so again no recursive mutex.
583203831d35Sstevel  */
583303831d35Sstevel static int
man_open_ctl()583403831d35Sstevel man_open_ctl()
583503831d35Sstevel {
583603831d35Sstevel 	int		status = 0;
583703831d35Sstevel 	ldi_handle_t	ctl_lh = NULL;
583803831d35Sstevel 	ldi_ident_t	li = NULL;
583903831d35Sstevel 
584003831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_open_ctl: plumbing control stream\n"));
584103831d35Sstevel 
584203831d35Sstevel 	/*
584303831d35Sstevel 	 * Get eri driver loaded and kstats initialized. Is there a better
584403831d35Sstevel 	 * way to do this? - TBD.
584503831d35Sstevel 	 */
584603831d35Sstevel 	status = ldi_ident_from_mod(&modlinkage, &li);
584703831d35Sstevel 	if (status) {
584803831d35Sstevel 		cmn_err(CE_WARN,
584903831d35Sstevel 		    "man_open_ctl: ident alloc failed, error %d", status);
585003831d35Sstevel 		goto exit;
585103831d35Sstevel 	}
585203831d35Sstevel 
585303831d35Sstevel 	status = ldi_open_by_name(ERI_PATH, FREAD | FWRITE | FNOCTTY,
585403831d35Sstevel 	    kcred, &ctl_lh, li);
585503831d35Sstevel 	if (status) {
585603831d35Sstevel 		cmn_err(CE_WARN,
585703831d35Sstevel 		    "man_open_ctl: eri open failed, error %d", status);
585803831d35Sstevel 		ctl_lh = NULL;
585903831d35Sstevel 		goto exit;
586003831d35Sstevel 	}
586103831d35Sstevel 	(void) ldi_close(ctl_lh, NULL, kcred);
586203831d35Sstevel 	ctl_lh = NULL;
586303831d35Sstevel 
586403831d35Sstevel 	mutex_enter(&man_lock);
586503831d35Sstevel 
586603831d35Sstevel 	if (man_ctl_lh != NULL) {
586703831d35Sstevel 		mutex_exit(&man_lock);
586803831d35Sstevel 		goto exit;
586903831d35Sstevel 	}
587003831d35Sstevel 
587103831d35Sstevel 	ASSERT(man_ctl_wq == NULL);
587203831d35Sstevel 	mutex_exit(&man_lock);
587303831d35Sstevel 
587403831d35Sstevel 	status = ldi_open_by_name(DMAN_INT_PATH, FREAD | FWRITE | FNOCTTY,
587503831d35Sstevel 	    kcred, &ctl_lh, li);
587603831d35Sstevel 	if (status) {
587703831d35Sstevel 		cmn_err(CE_WARN,
587803831d35Sstevel 		    "man_open_ctl: man control dev open failed, "
587903831d35Sstevel 		    "error %d", status);
588003831d35Sstevel 		goto exit;
588103831d35Sstevel 	}
588203831d35Sstevel 
588303831d35Sstevel 	/*
588403831d35Sstevel 	 * Update global config state. TBD - dont need lock here, since
588503831d35Sstevel 	 * everyone is stuck in open until we finish. Only other modifier
588603831d35Sstevel 	 * is man_deconfigure via _fini, which returns EBUSY if there is
588703831d35Sstevel 	 * any open streams (other than control). Do need to signal qwaiters
588803831d35Sstevel 	 * on error.
588903831d35Sstevel 	 */
589003831d35Sstevel 	mutex_enter(&man_lock);
589103831d35Sstevel 	ASSERT(man_config_state == MAN_CONFIGURING);
589203831d35Sstevel 	ASSERT(man_ctl_lh == NULL);
589303831d35Sstevel 	man_ctl_lh = ctl_lh;
589403831d35Sstevel 	mutex_exit(&man_lock);
589503831d35Sstevel 
589603831d35Sstevel exit:
589703831d35Sstevel 	if (li)
589803831d35Sstevel 		ldi_ident_release(li);
589903831d35Sstevel 
590003831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_open_ctl: man_ctl_lh(0x%p) errno = %d\n",
590103831d35Sstevel 	    (void *)man_ctl_lh, status));
590203831d35Sstevel 
590303831d35Sstevel 	return (status);
590403831d35Sstevel }
590503831d35Sstevel 
590603831d35Sstevel /*
590703831d35Sstevel  * man_close_ctl - Close control stream, we are about to unload driver.
590803831d35Sstevel  *
590903831d35Sstevel  * Locking:
591003831d35Sstevel  *	- Called holding no locks.
591103831d35Sstevel  */
591203831d35Sstevel static void
man_close_ctl()591303831d35Sstevel man_close_ctl()
591403831d35Sstevel {
591503831d35Sstevel 	ldi_handle_t tlh;
591603831d35Sstevel 
591703831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_close_ctl: unplumbing control stream\n"));
591803831d35Sstevel 
591903831d35Sstevel 	mutex_enter(&man_lock);
592003831d35Sstevel 	if ((tlh = man_ctl_lh) != NULL)
592103831d35Sstevel 		man_ctl_lh = NULL;
592203831d35Sstevel 	mutex_exit(&man_lock);
592303831d35Sstevel 
592403831d35Sstevel 	if (tlh != NULL) {
592503831d35Sstevel 		(void) ldi_close(tlh, NULL, kcred);
592603831d35Sstevel 	}
592703831d35Sstevel 
592803831d35Sstevel }
592903831d35Sstevel 
593003831d35Sstevel /*
593103831d35Sstevel  * Close the lower streams. Get all the timers canceled, close the lower
593203831d35Sstevel  * stream and delete the dest array.
593303831d35Sstevel  *
593403831d35Sstevel  * Returns:
593503831d35Sstevel  *	0	Closed all streams.
593603831d35Sstevel  *	1	Couldn't close one or more streams, timers still running.
593703831d35Sstevel  *
593803831d35Sstevel  * Locking:
593903831d35Sstevel  *	- Called holding no locks.
594003831d35Sstevel  */
594103831d35Sstevel static void
man_bclose(man_adest_t * adp)594203831d35Sstevel man_bclose(man_adest_t *adp)
594303831d35Sstevel {
594403831d35Sstevel 	int		i;
594503831d35Sstevel 	man_dest_t	*mdp;
594603831d35Sstevel 
594703831d35Sstevel 	man_cancel_timers(adp);
594803831d35Sstevel 
594903831d35Sstevel 	for (i = 0; i < adp->a_ndests; i++) {
595003831d35Sstevel 		mdp = &adp->a_mdp[i];
595103831d35Sstevel 
595203831d35Sstevel 		if (mdp->md_muxid != -1)
595303831d35Sstevel 			man_unplumb(mdp);
595403831d35Sstevel 	}
595503831d35Sstevel 
595603831d35Sstevel 	mutex_destroy(&mdp->md_lock);
595703831d35Sstevel 	man_kfree(adp->a_mdp, sizeof (man_dest_t) * adp->a_ndests);
595803831d35Sstevel 	adp->a_mdp = NULL;
595903831d35Sstevel }
596003831d35Sstevel 
596103831d35Sstevel /*
596203831d35Sstevel  * We want to close down all lower streams. Need to wait until all
596303831d35Sstevel  * timers and work related to these lower streams is quiesced.
596403831d35Sstevel  *
596503831d35Sstevel  * Returns 1 if lower streams are quiesced, 0 if we need to wait
596603831d35Sstevel  * a bit longer.
596703831d35Sstevel  */
596803831d35Sstevel static void
man_cancel_timers(man_adest_t * adp)596903831d35Sstevel man_cancel_timers(man_adest_t *adp)
597003831d35Sstevel {
597103831d35Sstevel 	man_dest_t	*mdp;
597203831d35Sstevel 	int		cnt;
597303831d35Sstevel 	int		i;
597403831d35Sstevel 
597503831d35Sstevel 	mdp = adp->a_mdp;
597603831d35Sstevel 	cnt = adp->a_ndests;
597703831d35Sstevel 
597803831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_cancel_timers: mdp(0x%p) cnt %d",
597903831d35Sstevel 	    (void *)mdp, cnt));
598003831d35Sstevel 
598103831d35Sstevel 	for (i = 0; i < cnt; i++) {
598203831d35Sstevel 
598303831d35Sstevel 		if (mdp[i].md_lc_timer_id != 0) {
598403831d35Sstevel 			(void) quntimeout(man_ctl_wq, mdp[i].md_lc_timer_id);
598503831d35Sstevel 			mdp[i].md_lc_timer_id = 0;
598603831d35Sstevel 		}
598703831d35Sstevel 
598803831d35Sstevel 		if (mdp[i].md_bc_id != 0) {
598903831d35Sstevel 			qunbufcall(man_ctl_wq, mdp[i].md_bc_id);
599003831d35Sstevel 			mdp[i].md_bc_id = 0;
599103831d35Sstevel 		}
599203831d35Sstevel 	}
599303831d35Sstevel 
599403831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_cancel_timers: returns"));
599503831d35Sstevel }
599603831d35Sstevel 
599703831d35Sstevel /*
599803831d35Sstevel  * A failover is started at start of day, when the driver detects a
599903831d35Sstevel  * link failure (see man_linkcheck_timer), or when DR detaches
600003831d35Sstevel  * the IO board containing the current active link between SC and
600103831d35Sstevel  * domain (see man_dr_detach, man_iwork, and man_do_dr_detach). A
600203831d35Sstevel  * MAN_WORK_SWITCH work request containing all the lower streams that
600303831d35Sstevel  * should be switched is posted on the man_bwork_q-> This work request is
600403831d35Sstevel  * processed here. Once all lower streams have been switched to an
600503831d35Sstevel  * alternate path, the MAN_WORK_SWITCH work request is passed back to
600603831d35Sstevel  * man_iwork_q where it is processed within the inner perimeter of the
600703831d35Sstevel  * STREAMS framework (see man_iswitch).
600803831d35Sstevel  *
600903831d35Sstevel  * Note that when the switch fails for whatever reason, we just hand
601003831d35Sstevel  * back the lower streams untouched and let another failover happen.
601103831d35Sstevel  * Hopefully we will sooner or later succeed at the failover.
601203831d35Sstevel  */
601303831d35Sstevel static void
man_bswitch(man_adest_t * adp,man_work_t * wp)601403831d35Sstevel man_bswitch(man_adest_t *adp, man_work_t *wp)
601503831d35Sstevel {
601603831d35Sstevel 	man_dest_t	*tdp;
601703831d35Sstevel 	man_t		*manp;
601803831d35Sstevel 	int		i;
601903831d35Sstevel 	int		status = 0;
602003831d35Sstevel 
602103831d35Sstevel 	/*
602203831d35Sstevel 	 * Make a temporary copy of dest array, updating device to the
602303831d35Sstevel 	 * alternate and try to open all lower streams. bgthread can sleep.
602403831d35Sstevel 	 */
602503831d35Sstevel 
602603831d35Sstevel 	tdp = man_kzalloc(sizeof (man_dest_t) * adp->a_ndests,
602703831d35Sstevel 	    KM_SLEEP);
602803831d35Sstevel 	bcopy(adp->a_mdp, tdp, sizeof (man_dest_t) * adp->a_ndests);
602903831d35Sstevel 
603003831d35Sstevel 	/*
603103831d35Sstevel 	 * Before we switch to the new path, lets sync the kstats.
603203831d35Sstevel 	 */
603303831d35Sstevel 	mutex_enter(&man_lock);
603403831d35Sstevel 
603503831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, adp->a_man_ppa);
603603831d35Sstevel 	if (manp != NULL) {
603703831d35Sstevel 		man_update_path_kstats(manp);
603803831d35Sstevel 	} else
603903831d35Sstevel 		status = ENODEV;
604003831d35Sstevel 
604103831d35Sstevel 	mutex_exit(&man_lock);
604203831d35Sstevel 
604303831d35Sstevel 	if (status != 0)
604403831d35Sstevel 		goto exit;
604503831d35Sstevel 
604603831d35Sstevel 	for (i = 0; i < adp->a_ndests; i++) {
604703831d35Sstevel 
604803831d35Sstevel 		tdp[i].md_device = adp->a_st_dev;
604903831d35Sstevel 		tdp[i].md_muxid = -1;
605003831d35Sstevel 
605103831d35Sstevel 		if (man_plumb(&tdp[i]))
605203831d35Sstevel 			break;
605303831d35Sstevel 	}
605403831d35Sstevel 
605503831d35Sstevel 	/*
605603831d35Sstevel 	 * Didn't plumb everyone, unplumb new lower stuff and return.
605703831d35Sstevel 	 */
605803831d35Sstevel 	if (i < adp->a_ndests) {
605903831d35Sstevel 		int	j;
606003831d35Sstevel 
606103831d35Sstevel 		for (j = 0; j <= i; j++)
606203831d35Sstevel 			man_unplumb(&tdp[j]);
606303831d35Sstevel 		status = EAGAIN;
606403831d35Sstevel 		goto exit;
606503831d35Sstevel 	}
606603831d35Sstevel 
606703831d35Sstevel 	if (man_is_on_domain && man_dossc_switch(adp->a_st_dev.mdev_exp_id)) {
606803831d35Sstevel 		/*
606903831d35Sstevel 		 * If we cant set new path on the SSC, then fail the
607003831d35Sstevel 		 * failover.
607103831d35Sstevel 		 */
607203831d35Sstevel 		for (i = 0; i < adp->a_ndests; i++)
607303831d35Sstevel 			man_unplumb(&tdp[i]);
607403831d35Sstevel 		status = EAGAIN;
607503831d35Sstevel 		goto exit;
607603831d35Sstevel 	}
607703831d35Sstevel 
607803831d35Sstevel 	man_kfree(adp->a_mdp, sizeof (man_dest_t) * adp->a_ndests);
607903831d35Sstevel 	adp->a_mdp = tdp;
608003831d35Sstevel 
608103831d35Sstevel exit:
608203831d35Sstevel 	if (status)
608303831d35Sstevel 		man_kfree(tdp, sizeof (man_dest_t) * adp->a_ndests);
608403831d35Sstevel 
608503831d35Sstevel 
608603831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_bswitch: returns %d", status));
608703831d35Sstevel 
608803831d35Sstevel 	/*
608903831d35Sstevel 	 * Hand processed switch request back to man_iwork for
609003831d35Sstevel 	 * processing in man_iswitch.
609103831d35Sstevel 	 */
609203831d35Sstevel 	wp->mw_status = status;
609303831d35Sstevel 
609403831d35Sstevel 	mutex_enter(&man_lock);
609503831d35Sstevel 	man_work_add(man_iwork_q, wp);
609603831d35Sstevel 	mutex_exit(&man_lock);
609703831d35Sstevel 
609803831d35Sstevel }
609903831d35Sstevel 
610003831d35Sstevel /*
610103831d35Sstevel  * man_plumb - Configure a lower stream for this destination.
610203831d35Sstevel  *
610303831d35Sstevel  * Locking:
610403831d35Sstevel  * 	- Called holding no locks.
610503831d35Sstevel  *
610603831d35Sstevel  * Returns:
610703831d35Sstevel  *	- success - 0
610803831d35Sstevel  *	- failure - error code of failure
610903831d35Sstevel  */
611003831d35Sstevel static int
man_plumb(man_dest_t * mdp)611103831d35Sstevel man_plumb(man_dest_t *mdp)
611203831d35Sstevel {
611303831d35Sstevel 	int		status;
611403831d35Sstevel 	int		muxid;
611503831d35Sstevel 	ldi_handle_t	lh;
611603831d35Sstevel 	ldi_ident_t	li = NULL;
611703831d35Sstevel 
611803831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_plumb: mdp(0x%p) %s%d exp(%d)",
611903831d35Sstevel 	    (void *)mdp, ddi_major_to_name(mdp->md_device.mdev_major),
612003831d35Sstevel 	    mdp->md_device.mdev_ppa, mdp->md_device.mdev_exp_id));
612103831d35Sstevel 
612203831d35Sstevel 	/*
612303831d35Sstevel 	 * Control stream should already be open.
612403831d35Sstevel 	 */
612503831d35Sstevel 	if (man_ctl_lh == NULL) {
612603831d35Sstevel 		status = EAGAIN;
612703831d35Sstevel 		goto exit;
612803831d35Sstevel 	}
612903831d35Sstevel 
613003831d35Sstevel 	mutex_enter(&man_lock);
613103831d35Sstevel 	ASSERT(man_ctl_wq != NULL);
613203831d35Sstevel 	status = ldi_ident_from_stream(man_ctl_wq, &li);
613303831d35Sstevel 	if (status != 0) {
613403831d35Sstevel 		cmn_err(CE_WARN,
613503831d35Sstevel 		    "man_plumb: ident alloc failed, error %d", status);
613603831d35Sstevel 		goto exit;
613703831d35Sstevel 	}
613803831d35Sstevel 	mutex_exit(&man_lock);
613903831d35Sstevel 
614003831d35Sstevel 	/*
614103831d35Sstevel 	 * previously opens were done by a dev_t of makedev(clone_major,
614203831d35Sstevel 	 * mdev_major) which should always map to /devices/pseudo/clone@0:eri
614303831d35Sstevel 	 */
614403831d35Sstevel 	ASSERT(strcmp(ERI_IDNAME,
614503831d35Sstevel 	    ddi_major_to_name(mdp->md_device.mdev_major)) == 0);
614603831d35Sstevel 
614703831d35Sstevel 	status = ldi_open_by_name(ERI_PATH, FREAD | FWRITE | FNOCTTY,
614803831d35Sstevel 	    kcred, &lh, li);
614903831d35Sstevel 	if (status) {
615003831d35Sstevel 		cmn_err(CE_WARN,
615103831d35Sstevel 		    "man_plumb: eri open failed, error %d", status);
615203831d35Sstevel 		goto exit;
615303831d35Sstevel 	}
615403831d35Sstevel 
615503831d35Sstevel 	/*
615603831d35Sstevel 	 * Link netdev under MAN.
615703831d35Sstevel 	 */
615803831d35Sstevel 	ASSERT(mdp->md_muxid == -1);
615903831d35Sstevel 
616003831d35Sstevel 	status = ldi_ioctl(man_ctl_lh, I_PLINK, (intptr_t)lh,
616103831d35Sstevel 	    FREAD+FWRITE+FNOCTTY+FKIOCTL, kcred, &muxid);
616203831d35Sstevel 	if (status) {
616303831d35Sstevel 		cmn_err(CE_WARN,
616403831d35Sstevel 		    "man_plumb: ldi_ioctl(I_PLINK) failed, error %d", status);
616503831d35Sstevel 		(void) ldi_close(lh, NULL, kcred);
616603831d35Sstevel 		goto exit;
616703831d35Sstevel 
616803831d35Sstevel 	}
616903831d35Sstevel 	mdp->md_muxid = muxid;
617003831d35Sstevel 	mdp->md_wq = man_linkrec_find(muxid);
617103831d35Sstevel 	/*
617203831d35Sstevel 	 * If we can't find the linkrec then return an
617303831d35Sstevel 	 * error. It will be automatically unplumbed on failure.
617403831d35Sstevel 	 */
617503831d35Sstevel 	if (mdp->md_wq == NULL)
617603831d35Sstevel 		status = EAGAIN;
617703831d35Sstevel 
617803831d35Sstevel 	(void) ldi_close(lh, NULL, kcred);
617903831d35Sstevel exit:
618003831d35Sstevel 	if (li)
618103831d35Sstevel 		ldi_ident_release(li);
618203831d35Sstevel 
618303831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_plumb: exit\n"));
618403831d35Sstevel 
618503831d35Sstevel 	return (status);
618603831d35Sstevel }
618703831d35Sstevel 
618803831d35Sstevel /*
618903831d35Sstevel  * man_unplumb - tear down the STREAMs framework for the lower multiplexor.
619003831d35Sstevel  *
619103831d35Sstevel  *	mdp - destination struct of interest
619203831d35Sstevel  *
619303831d35Sstevel  *	returns	- success - 0
619403831d35Sstevel  *		- failure - return error from ldi_ioctl
619503831d35Sstevel  */
619603831d35Sstevel static void
man_unplumb(man_dest_t * mdp)619703831d35Sstevel man_unplumb(man_dest_t *mdp)
619803831d35Sstevel {
619903831d35Sstevel 	int	status, rval;
620003831d35Sstevel 
620103831d35Sstevel 	MAN_DBG(MAN_SWITCH, ("man_unplumb: mdp"));
620203831d35Sstevel 	MAN_DBGCALL(MAN_SWITCH, man_print_mdp(mdp));
620303831d35Sstevel 
620403831d35Sstevel 	if (mdp->md_muxid == -1)
620503831d35Sstevel 		return;
620603831d35Sstevel 
620703831d35Sstevel 	ASSERT(man_ctl_lh != NULL);
620803831d35Sstevel 
620903831d35Sstevel 	/*
621003831d35Sstevel 	 * I_PUNLINK causes the multiplexor resources to be freed.
621103831d35Sstevel 	 */
621203831d35Sstevel 	status = ldi_ioctl(man_ctl_lh, I_PUNLINK, (intptr_t)mdp->md_muxid,
621303831d35Sstevel 	    FREAD+FWRITE+FNOCTTY+FKIOCTL, kcred, &rval);
621403831d35Sstevel 	if (status) {
621503831d35Sstevel 		cmn_err(CE_WARN, "man_unplumb: ldi_ioctl(I_PUNLINK) failed"
621603831d35Sstevel 		    " errno %d\n", status);
621703831d35Sstevel 	}
621803831d35Sstevel 	/*
621903831d35Sstevel 	 * Delete linkrec if it exists.
622003831d35Sstevel 	 */
622103831d35Sstevel 	(void) man_linkrec_find(mdp->md_muxid);
622203831d35Sstevel 	mdp->md_muxid = -1;
622303831d35Sstevel 
622403831d35Sstevel }
622503831d35Sstevel 
622603831d35Sstevel /*
622703831d35Sstevel  * The routines below deal with paths and pathgroups. These data structures
622803831d35Sstevel  * are used to track the physical devices connecting the domain and SSC.
622903831d35Sstevel  * These devices make up the lower streams of the MAN multiplexor. The
623003831d35Sstevel  * routines all expect the man_lock to be held.
623103831d35Sstevel  *
623203831d35Sstevel  * A pathgroup consists of all paths that connect a particular domain and the
623303831d35Sstevel  * SSC. The concept of a pathgroup id (pg_id) is used to uniquely identify
623403831d35Sstevel  * a pathgroup.  For Domains, there is just one pathgroup, that connecting
623503831d35Sstevel  * the domain to the SSC (pg_id == 0). On the SSC, there is one pathgroup per
623603831d35Sstevel  * domain. The pg_id field corresponds to the domain tags A-R. A pg_id of
623703831d35Sstevel  * 0 means domain tag A, a pg_id of 1 means domain B, etc.
623803831d35Sstevel  *
623903831d35Sstevel  * The path data structure identifies one path between the SSC and a domain.
624003831d35Sstevel  * It describes the information for the path: the major and minor number of
624103831d35Sstevel  * the physical device; kstat pointers; and ethernet address of the
624203831d35Sstevel  * other end of the path.
624303831d35Sstevel  *
624403831d35Sstevel  * The pathgroups are anchored at man_pg_head and are protected by the
624503831d35Sstevel  * by the inner perimeter. The routines are only called by the STREAMs
624603831d35Sstevel  * portion of the driver.
624703831d35Sstevel  */
624803831d35Sstevel 
624903831d35Sstevel /*
625003831d35Sstevel  * Update man instance pathgroup info. Exclusive inner perimeter assures
625103831d35Sstevel  * this code is single threaded. man_refcnt assures man_t wont detach
625203831d35Sstevel  * while we are playing with man_pg stuff.
625303831d35Sstevel  *
625403831d35Sstevel  * Returns 0 on success, errno on failure.
625503831d35Sstevel  */
625603831d35Sstevel int
man_pg_cmd(mi_path_t * mip,man_work_t * waiter_wp)625703831d35Sstevel man_pg_cmd(mi_path_t *mip, man_work_t *waiter_wp)
625803831d35Sstevel {
625903831d35Sstevel 	int		status = 0;
626003831d35Sstevel 	man_t		*manp;
626103831d35Sstevel 
626203831d35Sstevel 	if (mip->mip_ndevs < 0) {
626303831d35Sstevel 		status = EINVAL;
626403831d35Sstevel 		cmn_err(CE_WARN, "man_pg_cmd: EINVAL: mip_ndevs %d",
626503831d35Sstevel 		    mip->mip_ndevs);
626603831d35Sstevel 		goto exit;
626703831d35Sstevel 	}
626803831d35Sstevel 
626903831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
627003831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, mip->mip_man_ppa);
627103831d35Sstevel 	if (manp == NULL) {
627203831d35Sstevel 		status = ENODEV;
627303831d35Sstevel 		goto exit;
627403831d35Sstevel 	}
627503831d35Sstevel 
627603831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_cmd: mip"));
627703831d35Sstevel 	MAN_DBGCALL(MAN_PATH, man_print_mip(mip));
627803831d35Sstevel 
627903831d35Sstevel 	MAN_DBG(MAN_PATH, ("\tman_t"));
628003831d35Sstevel 	MAN_DBGCALL(MAN_PATH, man_print_man(manp));
628103831d35Sstevel 
628203831d35Sstevel 	switch (mip->mip_cmd) {
628303831d35Sstevel 	case MI_PATH_ASSIGN:
628403831d35Sstevel 		status = man_pg_assign(&manp->man_pg, mip, FALSE);
628503831d35Sstevel 		break;
628603831d35Sstevel 
628703831d35Sstevel 	case MI_PATH_ADD:
628803831d35Sstevel 		status = man_pg_assign(&manp->man_pg, mip, TRUE);
628903831d35Sstevel 		break;
629003831d35Sstevel 
629103831d35Sstevel 	case MI_PATH_UNASSIGN:
629203831d35Sstevel 		status = man_pg_unassign(&manp->man_pg, mip);
629303831d35Sstevel 		break;
629403831d35Sstevel 
629503831d35Sstevel 	case MI_PATH_ACTIVATE:
629603831d35Sstevel 		status = man_pg_activate(manp, mip, waiter_wp);
629703831d35Sstevel 		break;
629803831d35Sstevel 
629903831d35Sstevel 	case MI_PATH_READ:
630003831d35Sstevel 		status = man_pg_read(manp->man_pg, mip);
630103831d35Sstevel 		break;
630203831d35Sstevel 
630303831d35Sstevel 	default:
630403831d35Sstevel 		status = EINVAL;
630503831d35Sstevel 		cmn_err(CE_NOTE, "man_pg_cmd: invalid command");
630603831d35Sstevel 		break;
630703831d35Sstevel 	}
630803831d35Sstevel 
630903831d35Sstevel exit:
631003831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_cmd: returns %d", status));
631103831d35Sstevel 
631203831d35Sstevel 	return (status);
631303831d35Sstevel }
631403831d35Sstevel 
631503831d35Sstevel /*
631603831d35Sstevel  * Assign paths to a pathgroup. If pathgroup doesnt exists, create it.
631703831d35Sstevel  * If path doesnt exist, create it. If ethernet address of existing
631803831d35Sstevel  * pathgroup different, change it. If an existing path is not in the new
631903831d35Sstevel  * list, remove it.  If anything changed, send PATH_UPDATE request to
632003831d35Sstevel  * man_iwork to update all man_dest_t's.
632103831d35Sstevel  *
632203831d35Sstevel  * 	mplpp	- man pathgroup list point to point.
632303831d35Sstevel  *	mip	- new/updated pathgroup info to assign.
632403831d35Sstevel  */
632503831d35Sstevel static int
man_pg_assign(man_pg_t ** mplpp,mi_path_t * mip,int add_only)632603831d35Sstevel man_pg_assign(man_pg_t **mplpp, mi_path_t *mip, int add_only)
632703831d35Sstevel {
632803831d35Sstevel 	man_pg_t	*mpg;
632903831d35Sstevel 	man_path_t	*mp;
633003831d35Sstevel 	man_path_t	*add_paths = NULL;
633103831d35Sstevel 	int		cnt;
633203831d35Sstevel 	int		i;
633303831d35Sstevel 	int		first_pass = TRUE;
633403831d35Sstevel 	int		status = 0;
633503831d35Sstevel 
633603831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
633703831d35Sstevel 
633803831d35Sstevel 	cnt = mip->mip_ndevs;
633903831d35Sstevel 	if (cnt == 0) {
634003831d35Sstevel 		status = EINVAL;
634103831d35Sstevel 		cmn_err(CE_NOTE, "man_pg_assign: mip_ndevs == 0");
634203831d35Sstevel 		goto exit;
634303831d35Sstevel 	}
634403831d35Sstevel 
634503831d35Sstevel 	/*
634603831d35Sstevel 	 * Assure the devices to be assigned are not assigned to some other
634703831d35Sstevel 	 * pathgroup.
634803831d35Sstevel 	 */
634903831d35Sstevel 	for (i = 0; i < cnt; i++) {
635003831d35Sstevel 		mpg = man_find_path_by_dev(*mplpp, &mip->mip_devs[i], NULL);
635103831d35Sstevel 
635203831d35Sstevel 		if (mpg == NULL)
635303831d35Sstevel 			continue;
635403831d35Sstevel 
635503831d35Sstevel 		if ((mpg->mpg_man_ppa != mip->mip_man_ppa) ||
635603831d35Sstevel 		    (mpg->mpg_pg_id != mip->mip_pg_id)) {
635703831d35Sstevel 			/*
635803831d35Sstevel 			 * Already assigned to some other man instance
635903831d35Sstevel 			 * or pathgroup.
636003831d35Sstevel 			 */
636103831d35Sstevel 			status = EEXIST;
636203831d35Sstevel 			goto exit;
636303831d35Sstevel 		}
636403831d35Sstevel 	}
636503831d35Sstevel 
636603831d35Sstevel 	/*
636703831d35Sstevel 	 * Find pathgroup, or allocate new one if it doesnt exist and
636803831d35Sstevel 	 * add it to list at mplpp. Result is that mpg points to
636903831d35Sstevel 	 * pathgroup to modify.
637003831d35Sstevel 	 */
637103831d35Sstevel 	mpg = man_find_pg_by_id(*mplpp, mip->mip_pg_id);
637203831d35Sstevel 	if (mpg == NULL) {
637303831d35Sstevel 
637403831d35Sstevel 		status = man_pg_create(mplpp, &mpg, mip);
637503831d35Sstevel 		if (status)
637603831d35Sstevel 			goto exit;
637703831d35Sstevel 
637803831d35Sstevel 	} else if (ether_cmp(&mip->mip_eaddr, &mpg->mpg_dst_eaddr) != 0) {
637903831d35Sstevel 
638003831d35Sstevel 		cmn_err(CE_WARN, "man_pg_assign: ethernet address mismatch");
638103831d35Sstevel 		cmn_err(CE_CONT, "existing %s",
638203831d35Sstevel 		    ether_sprintf(&mpg->mpg_dst_eaddr));
638303831d35Sstevel 		cmn_err(CE_CONT, "new %s",
638403831d35Sstevel 		    ether_sprintf(&mip->mip_eaddr));
638503831d35Sstevel 
638603831d35Sstevel 		status = EINVAL;
638703831d35Sstevel 		goto exit;
638803831d35Sstevel 	}
638903831d35Sstevel 
639003831d35Sstevel 	/*
639103831d35Sstevel 	 * Create list of new paths to add to pathgroup.
639203831d35Sstevel 	 */
639303831d35Sstevel 	for (i = 0; i < cnt; i++) {
639403831d35Sstevel 
639503831d35Sstevel 		if (man_find_path_by_dev(*mplpp, &mip->mip_devs[i], NULL))
639603831d35Sstevel 			continue;	/* Already exists in this pathgroup */
639703831d35Sstevel 
639803831d35Sstevel 		mp = man_kzalloc(sizeof (man_path_t), KM_NOSLEEP);
639903831d35Sstevel 		if (mp == NULL) {
640003831d35Sstevel 			status = ENOMEM;
640103831d35Sstevel 			goto exit;
640203831d35Sstevel 		}
640303831d35Sstevel 
640403831d35Sstevel 		mp->mp_device = mip->mip_devs[i];
640503831d35Sstevel 		mp->mp_device.mdev_state = MDEV_ASSIGNED;
640603831d35Sstevel 
640703831d35Sstevel 		MAN_DBG(MAN_PATH, ("man_pg_assign: assigning mdp"));
640803831d35Sstevel 		MAN_DBGCALL(MAN_PATH, man_print_dev(&mp->mp_device));
640903831d35Sstevel 
641003831d35Sstevel 		status = man_path_kstat_init(mp);
641103831d35Sstevel 		if (status) {
641203831d35Sstevel 			man_kfree(mp, sizeof (man_path_t));
641303831d35Sstevel 			goto exit;
641403831d35Sstevel 		}
641503831d35Sstevel 
641603831d35Sstevel 		man_path_insert(&add_paths, mp);
641703831d35Sstevel 	}
641803831d35Sstevel 
641903831d35Sstevel 	/*
642003831d35Sstevel 	 * man_dr_attach passes only the path which is being DRd in.
642103831d35Sstevel 	 * So just add the path and don't worry about removing paths.
642203831d35Sstevel 	 */
642303831d35Sstevel 	if (add_only == TRUE)
642403831d35Sstevel 		goto exit;
642503831d35Sstevel 
642603831d35Sstevel 
642703831d35Sstevel 	/*
642803831d35Sstevel 	 * Check if any paths we want to remove are ACTIVE. If not,
642903831d35Sstevel 	 * do a second pass and remove them.
643003831d35Sstevel 	 */
643103831d35Sstevel again:
643203831d35Sstevel 	mp = mpg->mpg_pathp;
643303831d35Sstevel 	while (mp != NULL) {
643403831d35Sstevel 		int		in_new_list;
643503831d35Sstevel 		man_path_t	*rp;
643603831d35Sstevel 
643703831d35Sstevel 		rp = NULL;
643803831d35Sstevel 		in_new_list = FALSE;
643903831d35Sstevel 
644003831d35Sstevel 		for (i = 0; i < cnt; i++) {
644103831d35Sstevel 			if (mp->mp_device.mdev_ppa ==
644203831d35Sstevel 			    mip->mip_devs[i].mdev_ppa) {
644303831d35Sstevel 
644403831d35Sstevel 				in_new_list = TRUE;
644503831d35Sstevel 				break;
644603831d35Sstevel 			}
644703831d35Sstevel 		}
644803831d35Sstevel 
644903831d35Sstevel 		if (!in_new_list) {
645003831d35Sstevel 			if (first_pass) {
645103831d35Sstevel 				if (mp->mp_device.mdev_state & MDEV_ACTIVE) {
645203831d35Sstevel 					status = EBUSY;
645303831d35Sstevel 					goto exit;
645403831d35Sstevel 				}
645503831d35Sstevel 			} else {
645603831d35Sstevel 				rp = mp;
645703831d35Sstevel 			}
645803831d35Sstevel 		}
645903831d35Sstevel 		mp = mp->mp_next;
646003831d35Sstevel 
646103831d35Sstevel 		if (rp != NULL)
646203831d35Sstevel 			man_path_remove(&mpg->mpg_pathp, rp);
646303831d35Sstevel 	}
646403831d35Sstevel 
646503831d35Sstevel 	if (first_pass == TRUE) {
646603831d35Sstevel 		first_pass = FALSE;
646703831d35Sstevel 		goto again;
646803831d35Sstevel 	}
646903831d35Sstevel 
647003831d35Sstevel exit:
647103831d35Sstevel 	if (status == 0) {
647203831d35Sstevel 		if (add_paths)
647303831d35Sstevel 			man_path_merge(&mpg->mpg_pathp, add_paths);
647403831d35Sstevel 	} else {
647503831d35Sstevel 		while (add_paths != NULL) {
647603831d35Sstevel 			mp = add_paths;
647703831d35Sstevel 			add_paths = mp->mp_next;
647803831d35Sstevel 			mp->mp_next = NULL;
647903831d35Sstevel 
648003831d35Sstevel 			man_path_kstat_uninit(mp);
648103831d35Sstevel 			man_kfree(mp, sizeof (man_path_t));
648203831d35Sstevel 		}
648303831d35Sstevel 	}
648403831d35Sstevel 
648503831d35Sstevel 	return (status);
648603831d35Sstevel }
648703831d35Sstevel 
648803831d35Sstevel /*
648903831d35Sstevel  * Remove all paths from a pathgroup (domain shutdown). If there is an
649003831d35Sstevel  * active path in the group, shut down all destinations referencing it
649103831d35Sstevel  * first.
649203831d35Sstevel  */
649303831d35Sstevel static int
man_pg_unassign(man_pg_t ** plpp,mi_path_t * mip)649403831d35Sstevel man_pg_unassign(man_pg_t **plpp, mi_path_t *mip)
649503831d35Sstevel {
649603831d35Sstevel 	man_pg_t	*mpg;
649703831d35Sstevel 	man_pg_t	*tpg;
649803831d35Sstevel 	man_pg_t	*tppg;
649903831d35Sstevel 	man_path_t	*mp = NULL;
650003831d35Sstevel 	int		status = 0;
650103831d35Sstevel 
650203831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
650303831d35Sstevel 
650403831d35Sstevel 	/*
650503831d35Sstevel 	 * Check for existence of pathgroup.
650603831d35Sstevel 	 */
650703831d35Sstevel 	if ((mpg = man_find_pg_by_id(*plpp, mip->mip_pg_id)) == NULL)
650803831d35Sstevel 		goto exit;
650903831d35Sstevel 
651003831d35Sstevel 	if (man_find_active_path(mpg->mpg_pathp) != NULL) {
651103831d35Sstevel 		status = man_remove_dests(mpg);
651203831d35Sstevel 		if (status)
651303831d35Sstevel 			goto exit;
651403831d35Sstevel 	}
651503831d35Sstevel 
651603831d35Sstevel 	/*
651703831d35Sstevel 	 * Free all the paths for this pathgroup.
651803831d35Sstevel 	 */
651903831d35Sstevel 	while (mpg->mpg_pathp) {
652003831d35Sstevel 		mp = mpg->mpg_pathp;
652103831d35Sstevel 		mpg->mpg_pathp = mp->mp_next;
652203831d35Sstevel 		mp->mp_next = NULL;
652303831d35Sstevel 
652403831d35Sstevel 		man_path_kstat_uninit(mp);
652503831d35Sstevel 		man_kfree(mp, sizeof (man_path_t));
652603831d35Sstevel 	}
652703831d35Sstevel 
652803831d35Sstevel 	/*
652903831d35Sstevel 	 * Remove this pathgroup from the list, and free it.
653003831d35Sstevel 	 */
653103831d35Sstevel 	tpg = tppg = *plpp;
653203831d35Sstevel 	if (tpg == mpg) {
653303831d35Sstevel 		*plpp = tpg->mpg_next;
653403831d35Sstevel 		goto free_pg;
653503831d35Sstevel 	}
653603831d35Sstevel 
653703831d35Sstevel 	for (tpg = tpg->mpg_next; tpg != NULL; tpg = tpg->mpg_next) {
653803831d35Sstevel 		if (tpg == mpg)
653903831d35Sstevel 			break;
654003831d35Sstevel 		tppg = tpg;
654103831d35Sstevel 	}
654203831d35Sstevel 
654303831d35Sstevel 	ASSERT(tpg != NULL);
654403831d35Sstevel 
654503831d35Sstevel 	tppg->mpg_next = tpg->mpg_next;
654603831d35Sstevel 	tpg->mpg_next = NULL;
654703831d35Sstevel 
654803831d35Sstevel free_pg:
654903831d35Sstevel 	man_kfree(tpg, sizeof (man_pg_t));
655003831d35Sstevel 
655103831d35Sstevel exit:
655203831d35Sstevel 	return (status);
655303831d35Sstevel 
655403831d35Sstevel }
655503831d35Sstevel 
655603831d35Sstevel /*
655703831d35Sstevel  * Set a new active path. This is done via man_ioctl so we are
655803831d35Sstevel  * exclusive in the inner perimeter.
655903831d35Sstevel  */
656003831d35Sstevel static int
man_pg_activate(man_t * manp,mi_path_t * mip,man_work_t * waiter_wp)656103831d35Sstevel man_pg_activate(man_t *manp, mi_path_t *mip, man_work_t *waiter_wp)
656203831d35Sstevel {
656303831d35Sstevel 	man_pg_t	*mpg1;
656403831d35Sstevel 	man_pg_t	*mpg2;
656503831d35Sstevel 	man_pg_t	*plp;
656603831d35Sstevel 	man_path_t	*mp;
656703831d35Sstevel 	man_path_t	*ap;
656803831d35Sstevel 	int		status = 0;
656903831d35Sstevel 
657003831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
657103831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_activate: dev"));
657203831d35Sstevel 	MAN_DBGCALL(MAN_PATH, man_print_dev(mip->mip_devs));
657303831d35Sstevel 
657403831d35Sstevel 	if (mip->mip_ndevs != 1) {
657503831d35Sstevel 		status = EINVAL;
657603831d35Sstevel 		goto exit;
657703831d35Sstevel 	}
657803831d35Sstevel 
657903831d35Sstevel 	plp = manp->man_pg;
658003831d35Sstevel 	mpg1 = man_find_pg_by_id(plp, mip->mip_pg_id);
658103831d35Sstevel 	if (mpg1 == NULL) {
658203831d35Sstevel 		status = EINVAL;
658303831d35Sstevel 		goto exit;
658403831d35Sstevel 	}
658503831d35Sstevel 
658603831d35Sstevel 	mpg2 = man_find_path_by_dev(plp, mip->mip_devs, &mp);
658703831d35Sstevel 	if (mpg2 == NULL) {
658803831d35Sstevel 		status = ENODEV;
658903831d35Sstevel 		goto exit;
659003831d35Sstevel 	}
659103831d35Sstevel 
659203831d35Sstevel 	if (mpg1 != mpg2) {
659303831d35Sstevel 		status = EINVAL;
659403831d35Sstevel 		goto exit;
659503831d35Sstevel 	}
659603831d35Sstevel 
659703831d35Sstevel 	ASSERT(mp->mp_device.mdev_ppa == mip->mip_devs->mdev_ppa);
659803831d35Sstevel 
659903831d35Sstevel 	if (mpg1->mpg_flags & MAN_PG_SWITCHING) {
660003831d35Sstevel 		status = EAGAIN;
660103831d35Sstevel 		goto exit;
660203831d35Sstevel 	}
660303831d35Sstevel 
660403831d35Sstevel 	ap = man_find_active_path(mpg1->mpg_pathp);
660503831d35Sstevel 	if (ap == NULL) {
660603831d35Sstevel 		/*
660703831d35Sstevel 		 * This is the first time a path has been activated for
660803831d35Sstevel 		 * this pathgroup. Initialize all upper streams dest
660903831d35Sstevel 		 * structure for this pathgroup so autoswitch will find
661003831d35Sstevel 		 * them.
661103831d35Sstevel 		 */
661203831d35Sstevel 		mp->mp_device.mdev_state |= MDEV_ACTIVE;
661303831d35Sstevel 		man_add_dests(mpg1);
661403831d35Sstevel 		goto exit;
661503831d35Sstevel 	}
661603831d35Sstevel 
661703831d35Sstevel 	/*
661803831d35Sstevel 	 * Path already active, nothing to do.
661903831d35Sstevel 	 */
662003831d35Sstevel 	if (ap == mp)
662103831d35Sstevel 		goto exit;
662203831d35Sstevel 
662303831d35Sstevel 	/*
662403831d35Sstevel 	 * Try to autoswitch to requested device. Set flags and refcnt.
662503831d35Sstevel 	 * Cleared in man_iswitch when SWITCH completes.
662603831d35Sstevel 	 */
662703831d35Sstevel 	manp->man_refcnt++;
662803831d35Sstevel 	mpg1->mpg_flags |= MAN_PG_SWITCHING;
662903831d35Sstevel 
663003831d35Sstevel 	/*
663103831d35Sstevel 	 * Switch to path specified.
663203831d35Sstevel 	 */
663303831d35Sstevel 	status = man_autoswitch(mpg1, mip->mip_devs, waiter_wp);
663403831d35Sstevel 
663503831d35Sstevel 	if (status != 0) {
663603831d35Sstevel 		/*
663703831d35Sstevel 		 * man_iswitch not going to run, clean up.
663803831d35Sstevel 		 */
663903831d35Sstevel 		manp->man_refcnt--;
664003831d35Sstevel 		mpg1->mpg_flags &= ~MAN_PG_SWITCHING;
664103831d35Sstevel 
664203831d35Sstevel 		if (status == ENODEV) {
664303831d35Sstevel 			/*
664403831d35Sstevel 			 * Device not plumbed isn't really an error. Change
664503831d35Sstevel 			 * active device setting here, since man_iswitch isn't
664603831d35Sstevel 			 * going to be run to do it.
664703831d35Sstevel 			 */
664803831d35Sstevel 			status = 0;
664903831d35Sstevel 			ap->mp_device.mdev_state &= ~MDEV_ACTIVE;
665003831d35Sstevel 			mp->mp_device.mdev_state |= MDEV_ACTIVE;
665103831d35Sstevel 		}
665203831d35Sstevel 	}
665303831d35Sstevel 
665403831d35Sstevel exit:
665503831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_activate: returns %d", status));
665603831d35Sstevel 
665703831d35Sstevel 	return (status);
665803831d35Sstevel }
665903831d35Sstevel 
666003831d35Sstevel static int
man_pg_read(man_pg_t * plp,mi_path_t * mip)666103831d35Sstevel man_pg_read(man_pg_t *plp, mi_path_t *mip)
666203831d35Sstevel {
666303831d35Sstevel 	man_pg_t	*mpg;
666403831d35Sstevel 	man_path_t	*mp;
666503831d35Sstevel 	int		cnt;
666603831d35Sstevel 	int		status = 0;
666703831d35Sstevel 
666803831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
666903831d35Sstevel 
667003831d35Sstevel 	if ((mpg = man_find_pg_by_id(plp, mip->mip_pg_id)) == NULL) {
667103831d35Sstevel 		status = ENODEV;
667203831d35Sstevel 		goto exit;
667303831d35Sstevel 	}
667403831d35Sstevel 
667503831d35Sstevel 	cnt = 0;
667603831d35Sstevel 	for (mp = mpg->mpg_pathp; mp != NULL; mp = mp->mp_next) {
667703831d35Sstevel 		bcopy(&mp->mp_device, &mip->mip_devs[cnt], sizeof (man_dev_t));
667803831d35Sstevel 		if (cnt == mip->mip_ndevs)
667903831d35Sstevel 			break;
668003831d35Sstevel 		cnt++;
668103831d35Sstevel 	}
668203831d35Sstevel 
668303831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_read: pg(0x%p) id(%d) found %d paths",
668403831d35Sstevel 	    (void *)mpg, mpg->mpg_pg_id, cnt));
668503831d35Sstevel 
668603831d35Sstevel 	mip->mip_ndevs = cnt;
668703831d35Sstevel 
668803831d35Sstevel 	/*
668903831d35Sstevel 	 * TBD - What should errno be if user buffer too small ?
669003831d35Sstevel 	 */
669103831d35Sstevel 	if (mp != NULL) {
669203831d35Sstevel 		status = ENOMEM;
669303831d35Sstevel 	}
669403831d35Sstevel 
669503831d35Sstevel exit:
669603831d35Sstevel 
669703831d35Sstevel 	return (status);
669803831d35Sstevel }
669903831d35Sstevel 
670003831d35Sstevel /*
670103831d35Sstevel  * return existing pathgroup, or create it. TBD - Need to update
670203831d35Sstevel  * all of destinations if we added a pathgroup. Also, need to update
670303831d35Sstevel  * all of man_strup if we add a path.
670403831d35Sstevel  *
670503831d35Sstevel  * 	mplpp	- man pathgroup list point to pointer.
670603831d35Sstevel  * 	mpgp	- returns newly created man pathgroup.
670703831d35Sstevel  *	mip	- info to fill in mpgp.
670803831d35Sstevel  */
670903831d35Sstevel static int
man_pg_create(man_pg_t ** mplpp,man_pg_t ** mpgp,mi_path_t * mip)671003831d35Sstevel man_pg_create(man_pg_t **mplpp, man_pg_t **mpgp, mi_path_t *mip)
671103831d35Sstevel {
671203831d35Sstevel 	man_pg_t	*mpg;
671303831d35Sstevel 	man_pg_t	*tpg;
671403831d35Sstevel 	int		status = 0;
671503831d35Sstevel 
671603831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
671703831d35Sstevel 
671803831d35Sstevel 	if (ether_cmp(&mip->mip_eaddr, &zero_ether_addr) == 0) {
671903831d35Sstevel 		cmn_err(CE_NOTE, "man_ioctl: man_pg_create: ether"
672003831d35Sstevel 		    " addresss not set!");
672103831d35Sstevel 		status = EINVAL;
672203831d35Sstevel 		goto exit;
672303831d35Sstevel 	}
672403831d35Sstevel 
672503831d35Sstevel 	mpg = man_kzalloc(sizeof (man_pg_t), KM_NOSLEEP);
672603831d35Sstevel 	if (mpg == NULL) {
672703831d35Sstevel 		status = ENOMEM;
672803831d35Sstevel 		goto exit;
672903831d35Sstevel 	}
673003831d35Sstevel 
673103831d35Sstevel 	mpg->mpg_flags = MAN_PG_IDLE;
673203831d35Sstevel 	mpg->mpg_pg_id = mip->mip_pg_id;
673303831d35Sstevel 	mpg->mpg_man_ppa = mip->mip_man_ppa;
673403831d35Sstevel 	ether_copy(&mip->mip_eaddr, &mpg->mpg_dst_eaddr);
673503831d35Sstevel 
673603831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pg_create: new mpg"));
673703831d35Sstevel 	MAN_DBGCALL(MAN_PATH, man_print_mpg(mpg));
673803831d35Sstevel 
673903831d35Sstevel 	tpg = *mplpp;
674003831d35Sstevel 	if (tpg == NULL) {
674103831d35Sstevel 		*mplpp = mpg;
674203831d35Sstevel 	} else {
674303831d35Sstevel 		while (tpg->mpg_next != NULL)
674403831d35Sstevel 			tpg = tpg->mpg_next;
674503831d35Sstevel 		tpg->mpg_next = mpg;
674603831d35Sstevel 	}
674703831d35Sstevel 
674803831d35Sstevel exit:
674903831d35Sstevel 	*mpgp = mpg;
675003831d35Sstevel 
675103831d35Sstevel 	return (status);
675203831d35Sstevel }
675303831d35Sstevel 
675403831d35Sstevel /*
675503831d35Sstevel  * Return pointer to pathgroup containing mdevp, null otherwise. Also,
675603831d35Sstevel  * if a path pointer is passed in, set it to matching path in pathgroup.
675703831d35Sstevel  *
675803831d35Sstevel  * Called holding man_lock.
675903831d35Sstevel  */
676003831d35Sstevel static man_pg_t *
man_find_path_by_dev(man_pg_t * plp,man_dev_t * mdevp,man_path_t ** mpp)676103831d35Sstevel man_find_path_by_dev(man_pg_t *plp, man_dev_t *mdevp, man_path_t **mpp)
676203831d35Sstevel {
676303831d35Sstevel 	man_pg_t	*mpg;
676403831d35Sstevel 	man_path_t	*mp;
676503831d35Sstevel 
676603831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
676703831d35Sstevel 	for (mpg = plp; mpg != NULL; mpg = mpg->mpg_next) {
676803831d35Sstevel 		for (mp  = mpg->mpg_pathp; mp != NULL; mp = mp->mp_next) {
676903831d35Sstevel 			if (mp->mp_device.mdev_major == mdevp->mdev_major &&
677003831d35Sstevel 			    mp->mp_device.mdev_ppa == mdevp->mdev_ppa) {
677103831d35Sstevel 
677203831d35Sstevel 				if (mpp != NULL)
677303831d35Sstevel 					*mpp = mp;
677403831d35Sstevel 				return (mpg);
677503831d35Sstevel 			}
677603831d35Sstevel 		}
677703831d35Sstevel 	}
677803831d35Sstevel 
677903831d35Sstevel 	return (NULL);
678003831d35Sstevel }
678103831d35Sstevel 
678203831d35Sstevel /*
678303831d35Sstevel  * Return pointer to pathgroup assigned to destination, null if not found.
678403831d35Sstevel  *
678503831d35Sstevel  * Called holding man_lock.
678603831d35Sstevel  */
678703831d35Sstevel static man_pg_t *
man_find_pg_by_id(man_pg_t * mpg,int pg_id)678803831d35Sstevel man_find_pg_by_id(man_pg_t *mpg, int pg_id)
678903831d35Sstevel {
679003831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
679103831d35Sstevel 	for (; mpg != NULL; mpg = mpg->mpg_next) {
679203831d35Sstevel 		if (mpg->mpg_pg_id == pg_id)
679303831d35Sstevel 			return (mpg);
679403831d35Sstevel 	}
679503831d35Sstevel 
679603831d35Sstevel 	return (NULL);
679703831d35Sstevel }
679803831d35Sstevel 
679903831d35Sstevel static man_path_t *
man_find_path_by_ppa(man_path_t * mplist,int ppa)680003831d35Sstevel man_find_path_by_ppa(man_path_t *mplist, int ppa)
680103831d35Sstevel {
680203831d35Sstevel 	man_path_t	*mp;
680303831d35Sstevel 
680403831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
680503831d35Sstevel 	for (mp = mplist; mp != NULL; mp = mp->mp_next) {
680603831d35Sstevel 		if (mp->mp_device.mdev_ppa == ppa)
680703831d35Sstevel 			return (mp);
680803831d35Sstevel 	}
680903831d35Sstevel 
681003831d35Sstevel 	return (NULL);
681103831d35Sstevel }
681203831d35Sstevel 
681303831d35Sstevel static man_path_t *
man_find_active_path(man_path_t * mplist)681403831d35Sstevel man_find_active_path(man_path_t *mplist)
681503831d35Sstevel {
681603831d35Sstevel 	man_path_t	*mp;
681703831d35Sstevel 
681803831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
681903831d35Sstevel 	for (mp = mplist; mp != NULL; mp = mp->mp_next)
682003831d35Sstevel 		if (mp->mp_device.mdev_state & MDEV_ACTIVE)
682103831d35Sstevel 			return (mp);
682203831d35Sstevel 
682303831d35Sstevel 	return (NULL);
682403831d35Sstevel }
682503831d35Sstevel 
682603831d35Sstevel /*
682703831d35Sstevel  * Try and find an alternate path.
682803831d35Sstevel  */
682903831d35Sstevel static man_path_t *
man_find_alternate_path(man_path_t * mlp)683003831d35Sstevel man_find_alternate_path(man_path_t *mlp)
683103831d35Sstevel {
683203831d35Sstevel 	man_path_t	*ap;		/* Active path */
683303831d35Sstevel 	man_path_t	*np;		/* New alternate path */
683403831d35Sstevel 	man_path_t	*fp = NULL;	/* LRU failed path */
683503831d35Sstevel 
683603831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
683703831d35Sstevel 	ap = man_find_active_path(mlp);
683803831d35Sstevel 
683903831d35Sstevel 	/*
684003831d35Sstevel 	 * Find a non-failed path, or the lru failed path and switch to it.
684103831d35Sstevel 	 */
684203831d35Sstevel 	for (np = mlp; np != NULL; np = np->mp_next) {
684303831d35Sstevel 		if (np == ap)
684403831d35Sstevel 			continue;
684503831d35Sstevel 
684603831d35Sstevel 		if (np->mp_device.mdev_state == MDEV_ASSIGNED)
684703831d35Sstevel 			goto exit;
684803831d35Sstevel 
684903831d35Sstevel 		if (np->mp_device.mdev_state & MDEV_FAILED) {
685003831d35Sstevel 			if (fp == NULL)
685103831d35Sstevel 				fp = np;
685203831d35Sstevel 			else
685303831d35Sstevel 				if (fp->mp_lru > np->mp_lru)
685403831d35Sstevel 						fp = np;
685503831d35Sstevel 		}
685603831d35Sstevel 	}
685703831d35Sstevel 
685803831d35Sstevel 	/*
685903831d35Sstevel 	 * Nowhere to switch to.
686003831d35Sstevel 	 */
686103831d35Sstevel 	if (np == NULL && (np =  fp) == NULL)
686203831d35Sstevel 		goto exit;
686303831d35Sstevel 
686403831d35Sstevel exit:
686503831d35Sstevel 	return (np);
686603831d35Sstevel }
686703831d35Sstevel 
686803831d35Sstevel /*
686903831d35Sstevel  * Assumes caller has verified existence.
687003831d35Sstevel  */
687103831d35Sstevel static void
man_path_remove(man_path_t ** lpp,man_path_t * mp)687203831d35Sstevel man_path_remove(man_path_t **lpp, man_path_t *mp)
687303831d35Sstevel {
687403831d35Sstevel 	man_path_t	*tp;
687503831d35Sstevel 	man_path_t	*tpp;
687603831d35Sstevel 
687703831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
687803831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_path_remove: removing path"));
687903831d35Sstevel 	MAN_DBGCALL(MAN_PATH, man_print_path(mp));
688003831d35Sstevel 
688103831d35Sstevel 	tp = tpp = *lpp;
688203831d35Sstevel 	if (tp == mp) {
688303831d35Sstevel 		*lpp = tp->mp_next;
688403831d35Sstevel 		goto exit;
688503831d35Sstevel 	}
688603831d35Sstevel 
688703831d35Sstevel 	for (tp = tp->mp_next; tp != NULL; tp = tp->mp_next) {
688803831d35Sstevel 		if (tp == mp)
688903831d35Sstevel 			break;
689003831d35Sstevel 		tpp = tp;
689103831d35Sstevel 	}
689203831d35Sstevel 
689303831d35Sstevel 	ASSERT(tp != NULL);
689403831d35Sstevel 
689503831d35Sstevel 	tpp->mp_next = tp->mp_next;
689603831d35Sstevel 	tp->mp_next = NULL;
689703831d35Sstevel 
689803831d35Sstevel exit:
689903831d35Sstevel 	man_path_kstat_uninit(tp);
690003831d35Sstevel 	man_kfree(tp, sizeof (man_path_t));
690103831d35Sstevel 
690203831d35Sstevel }
690303831d35Sstevel 
690403831d35Sstevel /*
690503831d35Sstevel  * Insert path into list, ascending order by ppa.
690603831d35Sstevel  */
690703831d35Sstevel static void
man_path_insert(man_path_t ** lpp,man_path_t * mp)690803831d35Sstevel man_path_insert(man_path_t **lpp, man_path_t *mp)
690903831d35Sstevel {
691003831d35Sstevel 	man_path_t	*tp;
691103831d35Sstevel 	man_path_t	*tpp;
691203831d35Sstevel 
691303831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
691403831d35Sstevel 	if (*lpp == NULL) {
691503831d35Sstevel 		*lpp = mp;
691603831d35Sstevel 		return;
691703831d35Sstevel 	}
691803831d35Sstevel 
691903831d35Sstevel 	tp = tpp = *lpp;
692003831d35Sstevel 	if (tp->mp_device.mdev_ppa > mp->mp_device.mdev_ppa) {
692103831d35Sstevel 		mp->mp_next = tp;
692203831d35Sstevel 		*lpp = mp;
692303831d35Sstevel 		return;
692403831d35Sstevel 	}
692503831d35Sstevel 
692603831d35Sstevel 	for (tp = tp->mp_next; tp != NULL; tp =  tp->mp_next) {
692703831d35Sstevel 		if (tp->mp_device.mdev_ppa > mp->mp_device.mdev_ppa)
692803831d35Sstevel 			break;
692903831d35Sstevel 		tpp = tp;
693003831d35Sstevel 	}
693103831d35Sstevel 
693203831d35Sstevel 	if (tp == NULL) {
693303831d35Sstevel 		tpp->mp_next = mp;
693403831d35Sstevel 	} else {
693503831d35Sstevel 		tpp->mp_next = mp;
693603831d35Sstevel 		mp->mp_next = tp;
693703831d35Sstevel 	}
693803831d35Sstevel }
693903831d35Sstevel 
694003831d35Sstevel /*
694103831d35Sstevel  * Merge npp into lpp, ascending order by ppa. Assumes no
694203831d35Sstevel  * duplicates in either list.
694303831d35Sstevel  */
694403831d35Sstevel static void
man_path_merge(man_path_t ** lpp,man_path_t * np)694503831d35Sstevel man_path_merge(man_path_t **lpp, man_path_t *np)
694603831d35Sstevel {
694703831d35Sstevel 	man_path_t	*tmp;
694803831d35Sstevel 
694903831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
695003831d35Sstevel 	while (np != NULL) {
695103831d35Sstevel 		tmp = np;
695203831d35Sstevel 		np = np->mp_next;
695303831d35Sstevel 		tmp->mp_next = NULL;
695403831d35Sstevel 
695503831d35Sstevel 		man_path_insert(lpp, tmp);
695603831d35Sstevel 	}
695703831d35Sstevel 
695803831d35Sstevel }
695903831d35Sstevel 
696003831d35Sstevel static int
man_path_kstat_init(man_path_t * mpp)696103831d35Sstevel man_path_kstat_init(man_path_t *mpp)
696203831d35Sstevel {
696303831d35Sstevel 
696403831d35Sstevel 	kstat_named_t	*dev_knp;
696503831d35Sstevel 	int		status = 0;
696603831d35Sstevel 
696703831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
696803831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_path_kstat_init: mpp(0x%p)\n", (void *)mpp));
696903831d35Sstevel 
697003831d35Sstevel 	/*
697103831d35Sstevel 	 * Create named kstats for accounting purposes.
697203831d35Sstevel 	 */
697303831d35Sstevel 	dev_knp = man_kzalloc(MAN_NUMSTATS * sizeof (kstat_named_t),
697403831d35Sstevel 	    KM_NOSLEEP);
697503831d35Sstevel 	if (dev_knp == NULL) {
697603831d35Sstevel 		status = ENOMEM;
697703831d35Sstevel 		goto exit;
697803831d35Sstevel 	}
697903831d35Sstevel 	man_kstat_named_init(dev_knp, MAN_NUMSTATS);
698003831d35Sstevel 	mpp->mp_last_knp = dev_knp;
698103831d35Sstevel 
698203831d35Sstevel exit:
698303831d35Sstevel 
698403831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_path_kstat_init: returns %d\n", status));
698503831d35Sstevel 
698603831d35Sstevel 	return (status);
698703831d35Sstevel }
698803831d35Sstevel 
698903831d35Sstevel static void
man_path_kstat_uninit(man_path_t * mp)699003831d35Sstevel man_path_kstat_uninit(man_path_t *mp)
699103831d35Sstevel {
699203831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
699303831d35Sstevel 	man_kfree(mp->mp_last_knp, MAN_NUMSTATS * sizeof (kstat_named_t));
699403831d35Sstevel }
699503831d35Sstevel 
699603831d35Sstevel /*
699703831d35Sstevel  * man_work_alloc - allocate and initiate a work request structure
699803831d35Sstevel  *
699903831d35Sstevel  *	type - type of request to allocate
700003831d35Sstevel  *	returns	- success - ptr to an initialized work structure
700103831d35Sstevel  *		- failure - NULL
700203831d35Sstevel  */
700303831d35Sstevel man_work_t *
man_work_alloc(int type,int kmflag)700403831d35Sstevel man_work_alloc(int type, int kmflag)
700503831d35Sstevel {
700603831d35Sstevel 	man_work_t	*wp;
700703831d35Sstevel 
700803831d35Sstevel 	wp = man_kzalloc(sizeof (man_work_t), kmflag);
700903831d35Sstevel 	if (wp == NULL)
701003831d35Sstevel 		goto exit;
701103831d35Sstevel 
701203831d35Sstevel 	cv_init(&wp->mw_cv, NULL, CV_DRIVER, NULL); \
701303831d35Sstevel 	wp->mw_type = type;
701403831d35Sstevel 
701503831d35Sstevel exit:
701603831d35Sstevel 	return (wp);
701703831d35Sstevel }
701803831d35Sstevel 
701903831d35Sstevel /*
702003831d35Sstevel  * man_work_free - deallocate a work request structure
702103831d35Sstevel  *
702203831d35Sstevel  *	wp - ptr to work structure to be freed
702303831d35Sstevel  */
702403831d35Sstevel void
man_work_free(man_work_t * wp)702503831d35Sstevel man_work_free(man_work_t *wp)
702603831d35Sstevel {
702703831d35Sstevel 	cv_destroy(&wp->mw_cv);
702803831d35Sstevel 	man_kfree((void *)wp, sizeof (man_work_t));
702903831d35Sstevel }
703003831d35Sstevel 
703103831d35Sstevel /*
703203831d35Sstevel  * Post work to a work queue.  The man_bwork sleeps on
703303831d35Sstevel  * man_bwork_q->q_cv, and work requesters may sleep on mw_cv.
703403831d35Sstevel  * The man_lock is used to protect both cv's.
703503831d35Sstevel  */
703603831d35Sstevel void
man_work_add(man_workq_t * q,man_work_t * wp)703703831d35Sstevel man_work_add(man_workq_t *q, man_work_t *wp)
703803831d35Sstevel {
703903831d35Sstevel 	man_work_t	*lp = q->q_work;
704003831d35Sstevel 
704103831d35Sstevel 	if (lp) {
704203831d35Sstevel 		while (lp->mw_next != NULL)
704303831d35Sstevel 			lp = lp->mw_next;
704403831d35Sstevel 
704503831d35Sstevel 		lp->mw_next = wp;
704603831d35Sstevel 
704703831d35Sstevel 	} else {
704803831d35Sstevel 		q->q_work = wp;
704903831d35Sstevel 	}
705003831d35Sstevel 
705103831d35Sstevel 	/*
705203831d35Sstevel 	 * cv_signal for man_bwork_q, qenable for man_iwork_q
705303831d35Sstevel 	 */
705403831d35Sstevel 	if (q == man_bwork_q) {
705503831d35Sstevel 		cv_signal(&q->q_cv);
705603831d35Sstevel 
705703831d35Sstevel 	} else {	/* q == man_iwork_q */
705803831d35Sstevel 
705903831d35Sstevel 		if (man_ctl_wq != NULL)
706003831d35Sstevel 			qenable(man_ctl_wq);
706103831d35Sstevel 	}
706203831d35Sstevel 
706303831d35Sstevel }
706403831d35Sstevel 
706503831d35Sstevel /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
706603831d35Sstevel /*
706703831d35Sstevel  * ndd support functions to get/set parameters
706803831d35Sstevel  */
706903831d35Sstevel 
707003831d35Sstevel /*
707103831d35Sstevel  * Register each element of the parameter array with the
707203831d35Sstevel  * named dispatch handler. Each element is loaded using
707303831d35Sstevel  * nd_load()
707403831d35Sstevel  *
707503831d35Sstevel  * 	cnt	- the number of elements present in the parameter array
707603831d35Sstevel  */
707703831d35Sstevel static int
man_param_register(param_t * manpa,int cnt)707803831d35Sstevel man_param_register(param_t *manpa, int cnt)
707903831d35Sstevel {
708003831d35Sstevel 	int	i;
708103831d35Sstevel 	ndgetf_t getp;
708203831d35Sstevel 	ndsetf_t setp;
708303831d35Sstevel 	int	status = B_TRUE;
708403831d35Sstevel 
708503831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_param_register: manpa(0x%p) cnt %d\n",
708603831d35Sstevel 	    (void *)manpa, cnt));
708703831d35Sstevel 
708803831d35Sstevel 	getp = man_param_get;
708903831d35Sstevel 
709003831d35Sstevel 	for (i = 0; i < cnt; i++, manpa++) {
709103831d35Sstevel 		switch (man_param_display[i]) {
709203831d35Sstevel 		case MAN_NDD_GETABLE:
709303831d35Sstevel 			setp = NULL;
709403831d35Sstevel 			break;
709503831d35Sstevel 
709603831d35Sstevel 		case MAN_NDD_SETABLE:
709703831d35Sstevel 			setp = man_param_set;
709803831d35Sstevel 			break;
709903831d35Sstevel 
710003831d35Sstevel 		default:
710103831d35Sstevel 			continue;
710203831d35Sstevel 		}
710303831d35Sstevel 
710403831d35Sstevel 		if (!nd_load(&man_ndlist, manpa->param_name, getp,
710503831d35Sstevel 		    setp, (caddr_t)manpa)) {
710603831d35Sstevel 
710703831d35Sstevel 			(void) man_nd_free(&man_ndlist);
710803831d35Sstevel 			status = B_FALSE;
710903831d35Sstevel 			goto exit;
711003831d35Sstevel 		}
711103831d35Sstevel 	}
711203831d35Sstevel 
711303831d35Sstevel 	if (!nd_load(&man_ndlist, "man_pathgroups_report",
711403831d35Sstevel 	    man_pathgroups_report, NULL, NULL)) {
711503831d35Sstevel 
711603831d35Sstevel 		(void) man_nd_free(&man_ndlist);
711703831d35Sstevel 		status = B_FALSE;
711803831d35Sstevel 		goto exit;
711903831d35Sstevel 	}
712003831d35Sstevel 
712103831d35Sstevel 	if (!nd_load(&man_ndlist, "man_set_active_path",
712203831d35Sstevel 	    NULL, man_set_active_path, NULL)) {
712303831d35Sstevel 
712403831d35Sstevel 		(void) man_nd_free(&man_ndlist);
712503831d35Sstevel 		status = B_FALSE;
712603831d35Sstevel 		goto exit;
712703831d35Sstevel 	}
712803831d35Sstevel 
712903831d35Sstevel 	if (!nd_load(&man_ndlist, "man_get_hostinfo",
713003831d35Sstevel 	    man_get_hostinfo, NULL, NULL)) {
713103831d35Sstevel 
713203831d35Sstevel 		(void) man_nd_free(&man_ndlist);
713303831d35Sstevel 		status = B_FALSE;
713403831d35Sstevel 		goto exit;
713503831d35Sstevel 	}
713603831d35Sstevel 
713703831d35Sstevel exit:
713803831d35Sstevel 
713903831d35Sstevel 	MAN_DBG(MAN_CONFIG, ("man_param_register: returns %d\n", status));
714003831d35Sstevel 
714103831d35Sstevel 	return (status);
714203831d35Sstevel }
714303831d35Sstevel 
714403831d35Sstevel static void
man_nd_getset(queue_t * wq,mblk_t * mp)714503831d35Sstevel man_nd_getset(queue_t *wq, mblk_t *mp)
714603831d35Sstevel {
714703831d35Sstevel 
714803831d35Sstevel 	if (!nd_getset(wq, man_ndlist, mp))
714903831d35Sstevel 		miocnak(wq, mp, 0, ENOENT);
715003831d35Sstevel 	else
715103831d35Sstevel 		qreply(wq, mp);
715203831d35Sstevel }
715303831d35Sstevel 
715403831d35Sstevel /*ARGSUSED*/
715503831d35Sstevel static int
man_pathgroups_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)715603831d35Sstevel man_pathgroups_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
715703831d35Sstevel {
715803831d35Sstevel 
715903831d35Sstevel 	man_t		*manp;
716003831d35Sstevel 	man_pg_t	*mpg;
716103831d35Sstevel 	int		i;
716203831d35Sstevel 	char		pad[] = "                 "; /* 17 spaces */
716303831d35Sstevel 	int		pad_end;
716403831d35Sstevel 
716503831d35Sstevel 
716603831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pathgroups_report: wq(0x%p) mp(0x%p)"
716703831d35Sstevel 	    " caddr 0x%p", (void *)wq, (void *)mp, (void *)cp));
716803831d35Sstevel 
716903831d35Sstevel 	(void) mi_mpprintf(mp, "MAN Pathgroup report: (* == failed)");
717003831d35Sstevel 	(void) mi_mpprintf(mp, "====================================="
717103831d35Sstevel 	    "==========================================");
717203831d35Sstevel 
717303831d35Sstevel 	mutex_enter(&man_lock);
717403831d35Sstevel 
717503831d35Sstevel 	for (i = 0; i < 2; i++) {
717603831d35Sstevel 		manp = ddi_get_soft_state(man_softstate, i);
717703831d35Sstevel 		if (manp == NULL)
717803831d35Sstevel 			continue;
717903831d35Sstevel 
718003831d35Sstevel 	(void) mi_mpprintf(mp,
718103831d35Sstevel 	    "Interface\tDestination\t\tActive Path\tAlternate Paths");
718203831d35Sstevel 	(void) mi_mpprintf(mp, "---------------------------------------"
718303831d35Sstevel 	    "----------------------------------------");
718403831d35Sstevel 
718503831d35Sstevel 		for (mpg = manp->man_pg; mpg != NULL; mpg = mpg->mpg_next) {
718603831d35Sstevel 
718703831d35Sstevel 			(void) mi_mpprintf(mp, "%s%d\t\t",
718803831d35Sstevel 			    ddi_major_to_name(manp->man_meta_major),
718903831d35Sstevel 			    manp->man_meta_ppa);
719003831d35Sstevel 
719103831d35Sstevel 			if (man_is_on_domain) {
719203831d35Sstevel 				(void) mi_mpprintf_nr(mp, "Master SSC\t");
719303831d35Sstevel 				man_preport(mpg->mpg_pathp, mp);
719403831d35Sstevel 			} else {
719503831d35Sstevel 				if (i == 0) {
719603831d35Sstevel 					pad_end = 17 - strlen(ether_sprintf(
719703831d35Sstevel 					    &mpg->mpg_dst_eaddr));
719803831d35Sstevel 					if (pad_end < 0 || pad_end > 16)
719903831d35Sstevel 					pad_end = 0;
720003831d35Sstevel 					pad[pad_end] = '\0';
720103831d35Sstevel 
720203831d35Sstevel 					(void) mi_mpprintf_nr(mp, "%c %s%s",
720303831d35Sstevel 					    mpg->mpg_pg_id + 'A',
720403831d35Sstevel 					    ether_sprintf(&mpg->mpg_dst_eaddr),
720503831d35Sstevel 					    pad);
720603831d35Sstevel 
720703831d35Sstevel 					pad[pad_end] = ' ';
720803831d35Sstevel 				} else {
720919397407SSherry Moore 					(void) mi_mpprintf_nr(mp,
721019397407SSherry Moore 					    "Other SSC\t");
721103831d35Sstevel 				}
721203831d35Sstevel 				man_preport(mpg->mpg_pathp, mp);
721303831d35Sstevel 			}
721403831d35Sstevel 			(void) mi_mpprintf_nr(mp, "\n");
721503831d35Sstevel 		}
721603831d35Sstevel 	}
721703831d35Sstevel 
721803831d35Sstevel 	mutex_exit(&man_lock);
721903831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_pathgroups_report: returns"));
722003831d35Sstevel 
722103831d35Sstevel 	return (0);
722203831d35Sstevel }
722303831d35Sstevel 
722403831d35Sstevel static void
man_preport(man_path_t * plist,mblk_t * mp)722503831d35Sstevel man_preport(man_path_t *plist, mblk_t *mp)
722603831d35Sstevel {
722703831d35Sstevel 	man_path_t	*ap;
722803831d35Sstevel 
722903831d35Sstevel 	ap = man_find_active_path(plist);
723003831d35Sstevel 	/*
723103831d35Sstevel 	 * Active path
723203831d35Sstevel 	 */
723303831d35Sstevel 	if (ap != NULL) {
723403831d35Sstevel 		(void) mi_mpprintf_nr(mp, "\t%s%d\t\t",
723503831d35Sstevel 		    ddi_major_to_name(ap->mp_device.mdev_major),
723603831d35Sstevel 		    ap->mp_device.mdev_ppa);
723703831d35Sstevel 	} else {
723803831d35Sstevel 		(void) mi_mpprintf_nr(mp, "None \t");
723903831d35Sstevel 	}
724003831d35Sstevel 
724103831d35Sstevel 	/*
724203831d35Sstevel 	 * Alternate Paths.
724303831d35Sstevel 	 */
724403831d35Sstevel 	while (plist != NULL) {
724503831d35Sstevel 		(void) mi_mpprintf_nr(mp, "%s%d exp %d",
724603831d35Sstevel 		    ddi_major_to_name(plist->mp_device.mdev_major),
724703831d35Sstevel 		    plist->mp_device.mdev_ppa,
724803831d35Sstevel 		    plist->mp_device.mdev_exp_id);
724903831d35Sstevel 		if (plist->mp_device.mdev_state & MDEV_FAILED)
725003831d35Sstevel 			(void) mi_mpprintf_nr(mp, "*");
725103831d35Sstevel 		plist = plist->mp_next;
725203831d35Sstevel 		if (plist)
725303831d35Sstevel 			(void) mi_mpprintf_nr(mp, ", ");
725403831d35Sstevel 	}
725503831d35Sstevel }
725603831d35Sstevel 
725703831d35Sstevel /*
725803831d35Sstevel  * NDD request to set active path. Calling context is man_ioctl, so we are
725903831d35Sstevel  * exclusive in the inner perimeter.
726003831d35Sstevel  *
726103831d35Sstevel  *	Syntax is "ndd -set /dev/dman <man ppa> <pg_id> <phys ppa>"
726203831d35Sstevel  */
726303831d35Sstevel /* ARGSUSED3 */
726403831d35Sstevel static int
man_set_active_path(queue_t * wq,mblk_t * mp,char * value,caddr_t cp,cred_t * cr)726503831d35Sstevel man_set_active_path(queue_t *wq, mblk_t *mp, char *value, caddr_t cp,
726603831d35Sstevel     cred_t *cr)
726703831d35Sstevel {
726803831d35Sstevel 	char		*end, *meta_ppap, *phys_ppap, *pg_idp;
726903831d35Sstevel 	int		meta_ppa;
727003831d35Sstevel 	int		phys_ppa;
727103831d35Sstevel 	int		pg_id;
727203831d35Sstevel 	man_t		*manp;
727303831d35Sstevel 	man_pg_t	*mpg;
727403831d35Sstevel 	man_path_t	*np;
727503831d35Sstevel 	mi_path_t	mpath;
727603831d35Sstevel 	int		status = 0;
727703831d35Sstevel 
727803831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_set_active_path: wq(0x%p) mp(0x%p)"
727903831d35Sstevel 	    " args %s", (void *)wq, (void *)mp, value));
728003831d35Sstevel 
728103831d35Sstevel 	meta_ppap = value;
728203831d35Sstevel 
728303831d35Sstevel 	if ((pg_idp = strchr(value, ' ')) == NULL) {
728403831d35Sstevel 		status = EINVAL;
728503831d35Sstevel 		goto exit;
728603831d35Sstevel 	}
728703831d35Sstevel 
728803831d35Sstevel 	*pg_idp++ = '\0';
728903831d35Sstevel 
729003831d35Sstevel 	if ((phys_ppap = strchr(pg_idp, ' ')) == NULL) {
729103831d35Sstevel 		status = EINVAL;
729203831d35Sstevel 		goto exit;
729303831d35Sstevel 	}
729403831d35Sstevel 
729503831d35Sstevel 	*phys_ppap++ = '\0';
729603831d35Sstevel 
729703831d35Sstevel 	meta_ppa = (int)mi_strtol(meta_ppap, &end, 10);
729803831d35Sstevel 	pg_id = (int)mi_strtol(pg_idp, &end, 10);
729903831d35Sstevel 	phys_ppa = (int)mi_strtol(phys_ppap, &end, 10);
730003831d35Sstevel 
730103831d35Sstevel 	mutex_enter(&man_lock);
730203831d35Sstevel 	manp = ddi_get_soft_state(man_softstate, meta_ppa);
730303831d35Sstevel 	if (manp == NULL || manp->man_pg == NULL) {
730403831d35Sstevel 		status = EINVAL;
730503831d35Sstevel 		mutex_exit(&man_lock);
730603831d35Sstevel 		goto exit;
730703831d35Sstevel 	}
730803831d35Sstevel 
730903831d35Sstevel 	mpg = man_find_pg_by_id(manp->man_pg, pg_id);
731003831d35Sstevel 	if (mpg == NULL) {
731103831d35Sstevel 		status = EINVAL;
731203831d35Sstevel 		mutex_exit(&man_lock);
731303831d35Sstevel 		goto exit;
731403831d35Sstevel 	}
731503831d35Sstevel 
731603831d35Sstevel 	np = man_find_path_by_ppa(mpg->mpg_pathp, phys_ppa);
731703831d35Sstevel 
731803831d35Sstevel 	if (np == NULL) {
731903831d35Sstevel 		status = EINVAL;
732003831d35Sstevel 		mutex_exit(&man_lock);
732103831d35Sstevel 		goto exit;
732203831d35Sstevel 	}
732303831d35Sstevel 
732403831d35Sstevel 	mpath.mip_cmd = MI_PATH_ACTIVATE;
732503831d35Sstevel 	mpath.mip_pg_id = pg_id;
732603831d35Sstevel 	mpath.mip_man_ppa = meta_ppa;
732703831d35Sstevel 	mpath.mip_devs[0] = np->mp_device;
732803831d35Sstevel 	mpath.mip_ndevs = 1;
732903831d35Sstevel 
733003831d35Sstevel 	status = man_pg_cmd(&mpath, NULL);
733103831d35Sstevel 	mutex_exit(&man_lock);
733203831d35Sstevel 
733303831d35Sstevel exit:
733403831d35Sstevel 
733503831d35Sstevel 	MAN_DBG(MAN_PATH, ("man_set_active_path: returns %d", status));
733603831d35Sstevel 
733703831d35Sstevel 	return (status);
733803831d35Sstevel }
733903831d35Sstevel 
734003831d35Sstevel /*
734103831d35Sstevel  * Dump out the contents of the IOSRAM handoff structure. Note that if
734203831d35Sstevel  * anything changes here, you must make sure that the sysinit script
734303831d35Sstevel  * stays in sync with this output.
734403831d35Sstevel  */
734503831d35Sstevel /* ARGSUSED */
734603831d35Sstevel static int
man_get_hostinfo(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)734703831d35Sstevel man_get_hostinfo(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
734803831d35Sstevel {
734903831d35Sstevel 	manc_t	manc;
735003831d35Sstevel 	char	*ipaddr;
735103831d35Sstevel 	char	ipv6addr[INET6_ADDRSTRLEN];
735203831d35Sstevel 	int	i;
735303831d35Sstevel 	int	status;
735403831d35Sstevel 
735503831d35Sstevel 	if (!man_is_on_domain)
735603831d35Sstevel 		return (0);
735703831d35Sstevel 
735803831d35Sstevel 	if (status = man_get_iosram(&manc)) {
735903831d35Sstevel 		return (status);
736003831d35Sstevel 	}
736103831d35Sstevel 
7362*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_magic = 0x%x", manc.manc_magic);
7363*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_version = 0%d", manc.manc_version);
7364*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_csum = 0x%x", manc.manc_csum);
736503831d35Sstevel 
736603831d35Sstevel 	if (manc.manc_ip_type == AF_INET) {
736703831d35Sstevel 		in_addr_t	netnum;
736803831d35Sstevel 
7369*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_ip_type = AF_INET");
737003831d35Sstevel 
737103831d35Sstevel 		ipaddr = man_inet_ntoa(manc.manc_dom_ipaddr);
7372*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_dom_ipaddr = %s", ipaddr);
737303831d35Sstevel 
737403831d35Sstevel 		ipaddr = man_inet_ntoa(manc.manc_dom_ip_netmask);
7375*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_dom_ip_netmask = %s", ipaddr);
737603831d35Sstevel 
737703831d35Sstevel 		netnum = manc.manc_dom_ipaddr & manc.manc_dom_ip_netmask;
737803831d35Sstevel 		ipaddr = man_inet_ntoa(netnum);
7379*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_dom_ip_netnum = %s", ipaddr);
738003831d35Sstevel 
738103831d35Sstevel 		ipaddr = man_inet_ntoa(manc.manc_sc_ipaddr);
7382*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_sc_ipaddr = %s", ipaddr);
738303831d35Sstevel 
738403831d35Sstevel 	} else if (manc.manc_ip_type == AF_INET6) {
738503831d35Sstevel 
7386*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_ip_type = AF_INET6");
738703831d35Sstevel 
738803831d35Sstevel 		(void) inet_ntop(AF_INET6, (void *)&manc.manc_dom_ipv6addr,
738903831d35Sstevel 		    ipv6addr, INET6_ADDRSTRLEN);
7390*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_dom_ipv6addr = %s", ipv6addr);
739103831d35Sstevel 
7392*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_dom_ipv6_netmask = %d",
739303831d35Sstevel 		    manc.manc_dom_ipv6_netmask.s6_addr[0]);
739403831d35Sstevel 
739503831d35Sstevel 		(void) inet_ntop(AF_INET6, (void *)&manc.manc_sc_ipv6addr,
739603831d35Sstevel 		    ipv6addr, INET6_ADDRSTRLEN);
7397*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_sc_ipv6addr = %s", ipv6addr);
739803831d35Sstevel 
739903831d35Sstevel 	} else {
740003831d35Sstevel 
7401*07d06da5SSurya Prakki 		(void) mi_mpprintf(mp, "manc_ip_type = NONE");
740203831d35Sstevel 	}
740303831d35Sstevel 
7404*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_dom_eaddr = %s",
740503831d35Sstevel 	    ether_sprintf(&manc.manc_dom_eaddr));
7406*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_sc_eaddr = %s",
740703831d35Sstevel 	    ether_sprintf(&manc.manc_sc_eaddr));
740803831d35Sstevel 
7409*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_iob_bitmap = 0x%x\tio boards = ",
741003831d35Sstevel 	    manc.manc_iob_bitmap);
741103831d35Sstevel 	for (i = 0; i < MAN_MAX_EXPANDERS; i++) {
741203831d35Sstevel 		if ((manc.manc_iob_bitmap >> i) & 0x1) {
7413*07d06da5SSurya Prakki 			(void) mi_mpprintf_nr(mp, "%d.1, ", i);
741403831d35Sstevel 		}
741503831d35Sstevel 	}
7416*07d06da5SSurya Prakki 	(void) mi_mpprintf(mp, "manc_golden_iob = %d", manc.manc_golden_iob);
741703831d35Sstevel 
741803831d35Sstevel 	return (0);
741903831d35Sstevel }
742003831d35Sstevel 
742103831d35Sstevel static char *
man_inet_ntoa(in_addr_t in)742203831d35Sstevel man_inet_ntoa(in_addr_t in)
742303831d35Sstevel {
742403831d35Sstevel 	static char b[18];
742503831d35Sstevel 	unsigned char *p;
742603831d35Sstevel 
742703831d35Sstevel 	p = (unsigned char *)&in;
742803831d35Sstevel 	(void) sprintf(b, "%d.%d.%d.%d", p[0], p[1], p[2], p[3]);
742903831d35Sstevel 	return (b);
743003831d35Sstevel }
743103831d35Sstevel 
743203831d35Sstevel /*
743303831d35Sstevel  * parameter value. cp points to the required parameter.
743403831d35Sstevel  */
743503831d35Sstevel /* ARGSUSED */
743603831d35Sstevel static int
man_param_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * cr)743703831d35Sstevel man_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
743803831d35Sstevel {
743903831d35Sstevel 	param_t	*manpa = (param_t *)cp;
744003831d35Sstevel 
744103831d35Sstevel 	(void) mi_mpprintf(mp, "%u", manpa->param_val);
744203831d35Sstevel 	return (0);
744303831d35Sstevel }
744403831d35Sstevel 
744503831d35Sstevel /*
744603831d35Sstevel  * Sets the man parameter to the value in the param_register using
744703831d35Sstevel  * nd_load().
744803831d35Sstevel  */
744903831d35Sstevel /* ARGSUSED */
745003831d35Sstevel static int
man_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * cr)745103831d35Sstevel man_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr)
745203831d35Sstevel {
745303831d35Sstevel 	char *end;
745403831d35Sstevel 	size_t new_value;
745503831d35Sstevel 	param_t	*manpa = (param_t *)cp;
745603831d35Sstevel 
745703831d35Sstevel 	new_value = mi_strtol(value, &end, 10);
745803831d35Sstevel 
745903831d35Sstevel 	if (end == value || new_value < manpa->param_min ||
746003831d35Sstevel 	    new_value > manpa->param_max) {
746103831d35Sstevel 			return (EINVAL);
746203831d35Sstevel 	}
746303831d35Sstevel 
746403831d35Sstevel 	manpa->param_val = new_value;
746503831d35Sstevel 
746603831d35Sstevel 	return (0);
746703831d35Sstevel 
746803831d35Sstevel }
746903831d35Sstevel 
747003831d35Sstevel /*
747103831d35Sstevel  * Free the Named Dispatch Table by calling man_nd_free
747203831d35Sstevel  */
747303831d35Sstevel static void
man_param_cleanup()747403831d35Sstevel man_param_cleanup()
747503831d35Sstevel {
747603831d35Sstevel 	if (man_ndlist != NULL)
747703831d35Sstevel 		nd_free(&man_ndlist);
747803831d35Sstevel }
747903831d35Sstevel 
748003831d35Sstevel /*
748103831d35Sstevel  * Free the table pointed to by 'ndp'
748203831d35Sstevel  */
748303831d35Sstevel static void
man_nd_free(caddr_t * nd_pparam)748403831d35Sstevel man_nd_free(caddr_t *nd_pparam)
748503831d35Sstevel {
748603831d35Sstevel 	ND	*nd;
748703831d35Sstevel 
748803831d35Sstevel 	if ((nd = (ND *)(*nd_pparam)) != NULL) {
748903831d35Sstevel 		if (nd->nd_tbl)
749003831d35Sstevel 			mi_free((char *)nd->nd_tbl);
749103831d35Sstevel 		mi_free((char *)nd);
749203831d35Sstevel 		*nd_pparam = NULL;
749303831d35Sstevel 	}
749403831d35Sstevel }
749503831d35Sstevel 
749603831d35Sstevel 
749703831d35Sstevel /*
749803831d35Sstevel  * man_kstat_update - update the statistics for a meta-interface.
749903831d35Sstevel  *
750003831d35Sstevel  *	ksp - kstats struct
750103831d35Sstevel  *	rw - flag indicating whether stats are to be read or written.
750203831d35Sstevel  *
750303831d35Sstevel  *	returns	0
750403831d35Sstevel  *
750503831d35Sstevel  * The destination specific kstat information is protected by the
750603831d35Sstevel  * perimeter lock, so we submit a work request to get the stats
750703831d35Sstevel  * updated (see man_do_kstats()), and then collect the results
750803831d35Sstevel  * when cv_signal'd. Note that we are doing cv_timedwait_sig()
750903831d35Sstevel  * as a precautionary measure only.
751003831d35Sstevel  */
751103831d35Sstevel static int
man_kstat_update(kstat_t * ksp,int rw)751203831d35Sstevel man_kstat_update(kstat_t *ksp, int rw)
751303831d35Sstevel {
751403831d35Sstevel 	man_t			*manp;		/* per instance data */
751503831d35Sstevel 	man_work_t		*wp;
751603831d35Sstevel 	int			status = 0;
751703831d35Sstevel 	kstat_named_t		*knp;
751803831d35Sstevel 	kstat_named_t		*man_knp;
751903831d35Sstevel 	int			i;
752003831d35Sstevel 
752103831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_kstat_update: %s\n", rw ? "KSTAT_WRITE" :
752203831d35Sstevel 	    "KSTAT_READ"));
752303831d35Sstevel 
752403831d35Sstevel 	mutex_enter(&man_lock);
752503831d35Sstevel 	manp = (man_t *)ksp->ks_private;
752603831d35Sstevel 	manp->man_refcnt++;
752703831d35Sstevel 
752803831d35Sstevel 	/*
752903831d35Sstevel 	 * If the driver has been configured, get kstats updated by inner
753003831d35Sstevel 	 * perimeter prior to retrieving.
753103831d35Sstevel 	 */
753203831d35Sstevel 	if (man_config_state == MAN_CONFIGURED) {
753303831d35Sstevel 		clock_t wait_status;
753403831d35Sstevel 
753503831d35Sstevel 		man_update_path_kstats(manp);
753603831d35Sstevel 		wp = man_work_alloc(MAN_WORK_KSTAT_UPDATE, KM_SLEEP);
753703831d35Sstevel 		wp->mw_arg.a_man_ppa = manp->man_meta_ppa;
753803831d35Sstevel 		wp->mw_flags = MAN_WFLAGS_CVWAITER;
753903831d35Sstevel 		man_work_add(man_iwork_q, wp);
754003831d35Sstevel 
7541d3d50737SRafael Vanoni 		wait_status = cv_reltimedwait_sig(&wp->mw_cv, &man_lock,
7542d3d50737SRafael Vanoni 		    drv_usectohz(manp->man_kstat_waittime), TR_CLOCK_TICK);
754303831d35Sstevel 
754403831d35Sstevel 		if (wp->mw_flags & MAN_WFLAGS_DONE) {
754503831d35Sstevel 			status = wp->mw_status;
754603831d35Sstevel 			man_work_free(wp);
754703831d35Sstevel 		} else {
754803831d35Sstevel 			ASSERT(wait_status <= 0);
754903831d35Sstevel 			wp->mw_flags &= ~MAN_WFLAGS_CVWAITER;
755003831d35Sstevel 			if (wait_status == 0)
755103831d35Sstevel 				status = EINTR;
755203831d35Sstevel 			else {
755303831d35Sstevel 				MAN_DBG(MAN_KSTAT, ("man_kstat_update: "
755403831d35Sstevel 				    "timedout, returning stale stats."));
755503831d35Sstevel 				status = 0;
755603831d35Sstevel 			}
755703831d35Sstevel 		}
755803831d35Sstevel 		if (status)
755903831d35Sstevel 			goto exit;
756003831d35Sstevel 	}
756103831d35Sstevel 
756203831d35Sstevel 	knp = (kstat_named_t *)ksp->ks_data;
756303831d35Sstevel 	man_knp = (kstat_named_t *)manp->man_ksp->ks_data;
756403831d35Sstevel 
756503831d35Sstevel 	if (rw == KSTAT_READ) {
756603831d35Sstevel 		for (i = 0; i < MAN_NUMSTATS; i++) {
756703831d35Sstevel 			knp[i].value.ui64 = man_knp[i].value.ui64;
756803831d35Sstevel 		}
756903831d35Sstevel 	} else {
757003831d35Sstevel 		for (i = 0; i < MAN_NUMSTATS; i++) {
757103831d35Sstevel 			man_knp[i].value.ui64 = knp[i].value.ui64;
757203831d35Sstevel 		}
757303831d35Sstevel 	}
757403831d35Sstevel 
757503831d35Sstevel exit:
757603831d35Sstevel 	manp->man_refcnt--;
757703831d35Sstevel 	mutex_exit(&man_lock);
757803831d35Sstevel 
757903831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_kstat_update: returns %d", status));
758003831d35Sstevel 
758103831d35Sstevel 	return (status);
758203831d35Sstevel }
758303831d35Sstevel 
758403831d35Sstevel /*
758503831d35Sstevel  * Sum destination kstats for all active paths for a given instance of the
758603831d35Sstevel  * MAN driver. Called with perimeter lock.
758703831d35Sstevel  */
758803831d35Sstevel static void
man_do_kstats(man_work_t * wp)758903831d35Sstevel man_do_kstats(man_work_t *wp)
759003831d35Sstevel {
759103831d35Sstevel 	man_t		*manp;
759203831d35Sstevel 	man_pg_t	*mpg;
759303831d35Sstevel 	man_path_t	*mp;
759403831d35Sstevel 
759503831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_do_kstats:"));
759603831d35Sstevel 
759703831d35Sstevel 	mutex_enter(&man_lock);
759803831d35Sstevel 	/*
759903831d35Sstevel 	 * Sync mp_last_knp for each path associated with the MAN instance.
760003831d35Sstevel 	 */
760103831d35Sstevel 	manp = (man_t *)ddi_get_soft_state(man_softstate,
760203831d35Sstevel 	    wp->mw_arg.a_man_ppa);
760303831d35Sstevel 	for (mpg = manp->man_pg; mpg != NULL; mpg = mpg->mpg_next) {
760403831d35Sstevel 
760503831d35Sstevel 		ASSERT(mpg->mpg_man_ppa == manp->man_meta_ppa);
760603831d35Sstevel 
760703831d35Sstevel 		if ((mp = man_find_active_path(mpg->mpg_pathp)) != NULL) {
760803831d35Sstevel 
760903831d35Sstevel 			MAN_DBG(MAN_KSTAT, ("\tkstat: path"));
761003831d35Sstevel 			MAN_DBGCALL(MAN_KSTAT, man_print_path(mp));
761103831d35Sstevel 
761203831d35Sstevel 			/*
761303831d35Sstevel 			 * We just to update the destination statistics here.
761403831d35Sstevel 			 */
761503831d35Sstevel 			man_sum_dests_kstats(mp->mp_last_knp, mpg);
761603831d35Sstevel 		}
761703831d35Sstevel 	}
761803831d35Sstevel 	mutex_exit(&man_lock);
761903831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_do_kstats: returns"));
762003831d35Sstevel }
762103831d35Sstevel 
762203831d35Sstevel /*
762303831d35Sstevel  * Sum device kstats for all active paths for a given instance of the
762403831d35Sstevel  * MAN driver. Called with man_lock.
762503831d35Sstevel  */
762603831d35Sstevel static void
man_update_path_kstats(man_t * manp)762703831d35Sstevel man_update_path_kstats(man_t *manp)
762803831d35Sstevel {
762903831d35Sstevel 	kstat_named_t	*man_knp;
763003831d35Sstevel 	man_pg_t	*mpg;
763103831d35Sstevel 	man_path_t	*mp;
763203831d35Sstevel 
763303831d35Sstevel 	ASSERT(MUTEX_HELD(&man_lock));
763403831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_update_path_kstats:"));
763503831d35Sstevel 
763603831d35Sstevel 	man_knp = (kstat_named_t *)manp->man_ksp->ks_data;
763703831d35Sstevel 
763803831d35Sstevel 	for (mpg = manp->man_pg; mpg != NULL; mpg = mpg->mpg_next) {
763903831d35Sstevel 
764003831d35Sstevel 		ASSERT(mpg->mpg_man_ppa == manp->man_meta_ppa);
764103831d35Sstevel 
764203831d35Sstevel 		if ((mp = man_find_active_path(mpg->mpg_pathp)) != NULL) {
764303831d35Sstevel 
764403831d35Sstevel 			man_update_dev_kstats(man_knp, mp);
764503831d35Sstevel 
764603831d35Sstevel 		}
764703831d35Sstevel 	}
764803831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_update_path_kstats: returns"));
764903831d35Sstevel }
765003831d35Sstevel 
765103831d35Sstevel /*
765203831d35Sstevel  * Update the device kstats.
765303831d35Sstevel  * As man_kstat_update() is called with kstat_chain_lock held,
765403831d35Sstevel  * we can safely update the statistics from the underlying driver here.
765503831d35Sstevel  */
765603831d35Sstevel static void
man_update_dev_kstats(kstat_named_t * man_knp,man_path_t * mp)765703831d35Sstevel man_update_dev_kstats(kstat_named_t *man_knp, man_path_t *mp)
765803831d35Sstevel {
765903831d35Sstevel 	kstat_t		*dev_ksp;
766003831d35Sstevel 	major_t		major;
766103831d35Sstevel 	int		instance;
766203831d35Sstevel 	char		buf[KSTAT_STRLEN];
766303831d35Sstevel 
766403831d35Sstevel 
766503831d35Sstevel 	major = mp->mp_device.mdev_major;
766603831d35Sstevel 	instance = mp->mp_device.mdev_ppa;
766703831d35Sstevel 	(void) sprintf(buf, "%s%d", ddi_major_to_name(major), instance);
766803831d35Sstevel 
766903831d35Sstevel 	dev_ksp = kstat_hold_byname(ddi_major_to_name(major), instance, buf,
767003831d35Sstevel 	    ALL_ZONES);
767103831d35Sstevel 	if (dev_ksp != NULL) {
767203831d35Sstevel 
767303831d35Sstevel 		KSTAT_ENTER(dev_ksp);
767403831d35Sstevel 		KSTAT_UPDATE(dev_ksp, KSTAT_READ);
767503831d35Sstevel 		man_sum_kstats(man_knp, dev_ksp, mp->mp_last_knp);
767603831d35Sstevel 		KSTAT_EXIT(dev_ksp);
767703831d35Sstevel 		kstat_rele(dev_ksp);
767803831d35Sstevel 
767903831d35Sstevel 	} else {
768003831d35Sstevel 		MAN_DBG(MAN_KSTAT,
768103831d35Sstevel 		    ("man_update_dev_kstats: no kstat data found for %s(%d,%d)",
768203831d35Sstevel 		    buf, major, instance));
768303831d35Sstevel 	}
768403831d35Sstevel }
768503831d35Sstevel 
768603831d35Sstevel static void
man_sum_dests_kstats(kstat_named_t * knp,man_pg_t * mpg)768703831d35Sstevel man_sum_dests_kstats(kstat_named_t *knp, man_pg_t *mpg)
768803831d35Sstevel {
768903831d35Sstevel 	int		i;
769003831d35Sstevel 	int		flags;
769103831d35Sstevel 	char		*statname;
769203831d35Sstevel 	manstr_t	*msp;
769303831d35Sstevel 	man_dest_t	*mdp;
769403831d35Sstevel 	uint64_t	switches = 0;
769503831d35Sstevel 	uint64_t	linkfails = 0;
769603831d35Sstevel 	uint64_t	linkstales = 0;
769703831d35Sstevel 	uint64_t	icmpv4probes = 0;
769803831d35Sstevel 	uint64_t	icmpv6probes = 0;
769903831d35Sstevel 
770003831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_sum_dests_kstats: mpg 0x%p", (void *)mpg));
770103831d35Sstevel 
770203831d35Sstevel 	for (msp = man_strup; msp != NULL; msp = msp->ms_next) {
770303831d35Sstevel 
770403831d35Sstevel 		if (!man_str_uses_pg(msp, mpg))
770503831d35Sstevel 			continue;
770603831d35Sstevel 
770703831d35Sstevel 		mdp = &msp->ms_dests[mpg->mpg_pg_id];
770803831d35Sstevel 
770903831d35Sstevel 		switches += mdp->md_switches;
771003831d35Sstevel 		linkfails += mdp->md_linkfails;
771103831d35Sstevel 		linkstales += mdp->md_linkstales;
771203831d35Sstevel 		icmpv4probes += mdp->md_icmpv4probes;
771303831d35Sstevel 		icmpv6probes += mdp->md_icmpv6probes;
771403831d35Sstevel 	}
771503831d35Sstevel 
771603831d35Sstevel 	for (i = 0; i < MAN_NUMSTATS; i++) {
771703831d35Sstevel 
771803831d35Sstevel 		statname = man_kstat_info[i].mk_name;
771903831d35Sstevel 		flags = man_kstat_info[i].mk_flags;
772003831d35Sstevel 
772103831d35Sstevel 		if (!(flags & MK_NOT_PHYSICAL))
772203831d35Sstevel 			continue;
772303831d35Sstevel 
772403831d35Sstevel 		if (strcmp(statname, "man_switches") == 0) {
772503831d35Sstevel 			knp[i].value.ui64 = switches;
772603831d35Sstevel 		} else if (strcmp(statname, "man_link_fails") == 0) {
772703831d35Sstevel 			knp[i].value.ui64 = linkfails;
772803831d35Sstevel 		} else if (strcmp(statname, "man_link_stales") == 0) {
772903831d35Sstevel 			knp[i].value.ui64 = linkstales;
773003831d35Sstevel 		} else if (strcmp(statname, "man_icmpv4_probes") == 0) {
773103831d35Sstevel 			knp[i].value.ui64 = icmpv4probes;
773203831d35Sstevel 		} else if (strcmp(statname, "man_icmpv6_probes") == 0) {
773303831d35Sstevel 			knp[i].value.ui64 = icmpv6probes;
773403831d35Sstevel 		}
773503831d35Sstevel 	}
773603831d35Sstevel 
773703831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_sum_dests_kstats: returns"));
773803831d35Sstevel }
773903831d35Sstevel 
774003831d35Sstevel /*
774103831d35Sstevel  * Initialize MAN named kstats in the space provided.
774203831d35Sstevel  */
774303831d35Sstevel static void
man_kstat_named_init(kstat_named_t * knp,int num_stats)774403831d35Sstevel man_kstat_named_init(kstat_named_t *knp, int num_stats)
774503831d35Sstevel {
774603831d35Sstevel 	int	i;
774703831d35Sstevel 
774803831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_kstat_named_init: knp(0x%p) num_stats = %d",
774903831d35Sstevel 	    (void *)knp, num_stats));
775003831d35Sstevel 
775103831d35Sstevel 	for (i = 0; i < num_stats; i++) {
775203831d35Sstevel 		kstat_named_init(&knp[i], man_kstat_info[i].mk_name,
775303831d35Sstevel 		    man_kstat_info[i].mk_type);
775403831d35Sstevel 	}
775503831d35Sstevel 
775603831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_kstat_named_init: returns"));
775703831d35Sstevel 
775803831d35Sstevel }
775903831d35Sstevel 
776003831d35Sstevel /*
776103831d35Sstevel  * man_kstat_byname - get a kernel stat value from its structure
776203831d35Sstevel  *
776303831d35Sstevel  *	ksp - kstat_t structure to play with
776403831d35Sstevel  *	s   - string to match names with
776503831d35Sstevel  *	res - in/out result data pointer
776603831d35Sstevel  *
776703831d35Sstevel  *	returns	- success - 1 (found)
776803831d35Sstevel  *		- failure - 0 (not found)
776903831d35Sstevel  */
777003831d35Sstevel static int
man_kstat_byname(kstat_t * ksp,char * s,kstat_named_t * res)777103831d35Sstevel man_kstat_byname(kstat_t *ksp, char *s, kstat_named_t *res)
777203831d35Sstevel {
777303831d35Sstevel 	int		found = 0;
777403831d35Sstevel 
777503831d35Sstevel 	MAN_DBG(MAN_KSTAT2, ("man_kstat_byname: GETTING %s\n", s));
777603831d35Sstevel 
777703831d35Sstevel 	if (ksp->ks_type == KSTAT_TYPE_NAMED) {
777803831d35Sstevel 		kstat_named_t *knp;
777903831d35Sstevel 
778003831d35Sstevel 		for (knp = KSTAT_NAMED_PTR(ksp);
778103831d35Sstevel 		    (caddr_t)knp < ((caddr_t)ksp->ks_data+ksp->ks_data_size);
778203831d35Sstevel 		    knp++) {
778303831d35Sstevel 
778403831d35Sstevel 			if (strcmp(s, knp->name) == NULL) {
778503831d35Sstevel 
778603831d35Sstevel 				res->data_type = knp->data_type;
778703831d35Sstevel 				res->value = knp->value;
778803831d35Sstevel 				found++;
778903831d35Sstevel 
779003831d35Sstevel 				MAN_DBG(MAN_KSTAT2, ("\t%s: %d\n", knp->name,
779103831d35Sstevel 				    (int)knp->value.ul));
779203831d35Sstevel 			}
779303831d35Sstevel 		}
779403831d35Sstevel 	} else {
779503831d35Sstevel 		MAN_DBG(MAN_KSTAT2, ("\tbad kstats type %d\n", ksp->ks_type));
779603831d35Sstevel 	}
779703831d35Sstevel 
779803831d35Sstevel 	/*
779903831d35Sstevel 	 * if getting a value but couldn't find the namestring, result = 0.
780003831d35Sstevel 	 */
780103831d35Sstevel 	if (!found) {
780203831d35Sstevel 		/*
780303831d35Sstevel 		 * a reasonable default
780403831d35Sstevel 		 */
780503831d35Sstevel 		res->data_type = KSTAT_DATA_ULONG;
780603831d35Sstevel 		res->value.l = 0;
780703831d35Sstevel 		MAN_DBG(MAN_KSTAT2, ("\tcouldn't find, using defaults\n"));
780803831d35Sstevel 	}
780903831d35Sstevel 
781003831d35Sstevel 	MAN_DBG(MAN_KSTAT2, ("man_kstat_byname: returns\n"));
781103831d35Sstevel 
781203831d35Sstevel 	return (found);
781303831d35Sstevel }
781403831d35Sstevel 
781503831d35Sstevel 
781603831d35Sstevel /*
781703831d35Sstevel  *
781803831d35Sstevel  * Accumulate MAN driver kstats from the incremental values of the underlying
781903831d35Sstevel  * physical interfaces.
782003831d35Sstevel  *
782103831d35Sstevel  * Parameters:
782203831d35Sstevel  *	sum_knp		- The named kstat area to put cumulative value,
782303831d35Sstevel  *			  NULL if we just want to sync next two params.
782403831d35Sstevel  *	phys_ksp	- Physical interface kstat_t pointer. Contains
782503831d35Sstevel  *			  more current counts.
782603831d35Sstevel  * 	phys_last_knp	- counts from the last time we were called for this
782703831d35Sstevel  *			  physical interface. Note that the name kstats
782803831d35Sstevel  *			  pointed to are actually in MAN format, but they
782903831d35Sstevel  *			  hold the mirrored physical devices last read
783003831d35Sstevel  *			  kstats.
783103831d35Sstevel  * Basic algorithm is:
783203831d35Sstevel  *
783303831d35Sstevel  * 	for each named kstat variable {
783403831d35Sstevel  *	    sum_knp[i] += (phys_ksp->ksp_data[i] - phys_last_knp[i]);
783503831d35Sstevel  *	    phys_last_knp[i] = phys_ksp->ksp_data[i];
783603831d35Sstevel  *	}
783703831d35Sstevel  *
783803831d35Sstevel  */
783903831d35Sstevel static void
man_sum_kstats(kstat_named_t * sum_knp,kstat_t * phys_ksp,kstat_named_t * phys_last_knp)784003831d35Sstevel man_sum_kstats(kstat_named_t *sum_knp, kstat_t *phys_ksp,
784103831d35Sstevel 	kstat_named_t *phys_last_knp)
784203831d35Sstevel {
784303831d35Sstevel 	char		*physname;
784403831d35Sstevel 	char		*physalias;
784503831d35Sstevel 	char		*statname;
784603831d35Sstevel 	kstat_named_t	phys_kn_entry;
784703831d35Sstevel 	uint64_t	delta64;
784803831d35Sstevel 	int		i;
784903831d35Sstevel 
785003831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_sum_kstats: sum_knp(0x%p) phys_ksp(0x%p)"
785103831d35Sstevel 	    " phys_last_knp(0x%p)\n", (void *)sum_knp, (void *)phys_ksp,
785203831d35Sstevel 	    (void *)phys_last_knp));
785303831d35Sstevel 
785403831d35Sstevel 	/*
785503831d35Sstevel 	 * Now for each entry in man_kstat_info, sum the named kstat.
785603831d35Sstevel 	 * Not that all MAN specific kstats will end up !found.
785703831d35Sstevel 	 */
785803831d35Sstevel 	for (i = 0; i < MAN_NUMSTATS; i++) {
785903831d35Sstevel 		int	found = 0;
786003831d35Sstevel 		int	flags = 0;
786103831d35Sstevel 
786203831d35Sstevel 		delta64 = 0;
786303831d35Sstevel 
786403831d35Sstevel 		statname = man_kstat_info[i].mk_name;
786503831d35Sstevel 		physname = man_kstat_info[i].mk_physname;
786603831d35Sstevel 		physalias = man_kstat_info[i].mk_physalias;
786703831d35Sstevel 		flags = man_kstat_info[i].mk_flags;
786803831d35Sstevel 
786903831d35Sstevel 		/*
787003831d35Sstevel 		 * Update MAN private kstats.
787103831d35Sstevel 		 */
787203831d35Sstevel 		if (flags & MK_NOT_PHYSICAL) {
787303831d35Sstevel 
787403831d35Sstevel 			kstat_named_t	*knp = phys_last_knp;
787503831d35Sstevel 
787603831d35Sstevel 			if (sum_knp == NULL)
787703831d35Sstevel 				continue;
787803831d35Sstevel 
787903831d35Sstevel 			if (strcmp(statname, "man_switches") == 0) {
788003831d35Sstevel 				sum_knp[i].value.ui64 = knp[i].value.ui64;
788103831d35Sstevel 			} else if (strcmp(statname, "man_link_fails") == 0) {
788203831d35Sstevel 				sum_knp[i].value.ui64 = knp[i].value.ui64;
788303831d35Sstevel 			} else if (strcmp(statname, "man_link_stales") == 0) {
788403831d35Sstevel 				sum_knp[i].value.ui64 = knp[i].value.ui64;
788503831d35Sstevel 			} else if (strcmp(statname, "man_icmpv4_probes") == 0) {
788603831d35Sstevel 				sum_knp[i].value.ui64 = knp[i].value.ui64;
788703831d35Sstevel 			} else if (strcmp(statname, "man_icmpv6_probes") == 0) {
788803831d35Sstevel 				sum_knp[i].value.ui64 = knp[i].value.ui64;
788903831d35Sstevel 			}
789003831d35Sstevel 
789103831d35Sstevel 			continue;	/* phys_ksp doesnt have this stat */
789203831d35Sstevel 		}
789303831d35Sstevel 
789403831d35Sstevel 		/*
789503831d35Sstevel 		 * first try it by the "official" name
789603831d35Sstevel 		 */
789703831d35Sstevel 		if (phys_ksp) {
789803831d35Sstevel 			if (man_kstat_byname(phys_ksp, physname,
789903831d35Sstevel 			    &phys_kn_entry)) {
790003831d35Sstevel 
790103831d35Sstevel 				found = 1;
790203831d35Sstevel 
790303831d35Sstevel 			} else if ((physalias) && (man_kstat_byname(phys_ksp,
790403831d35Sstevel 			    physalias, &phys_kn_entry))) {
790503831d35Sstevel 
790603831d35Sstevel 				found = 1;
790703831d35Sstevel 			}
790803831d35Sstevel 		}
790903831d35Sstevel 
791003831d35Sstevel 		if (!found) {
791103831d35Sstevel 			/*
791203831d35Sstevel 			 * clear up the "last" value, no change to the sum
791303831d35Sstevel 			 */
791403831d35Sstevel 			phys_last_knp[i].value.ui64 = 0;
791503831d35Sstevel 			continue;
791603831d35Sstevel 		}
791703831d35Sstevel 
791803831d35Sstevel 		/*
791903831d35Sstevel 		 * at this point, we should have the good underlying
792003831d35Sstevel 		 * kstat value stored in phys_kn_entry
792103831d35Sstevel 		 */
792203831d35Sstevel 		if (flags & MK_NOT_COUNTER) {
792303831d35Sstevel 			/*
792403831d35Sstevel 			 * it isn't a counter, so store the value and
792503831d35Sstevel 			 * move on (e.g. ifspeed)
792603831d35Sstevel 			 */
792703831d35Sstevel 			phys_last_knp[i].value = phys_kn_entry.value;
792803831d35Sstevel 			continue;
792903831d35Sstevel 		}
793003831d35Sstevel 
793103831d35Sstevel 		switch (phys_kn_entry.data_type) {
793203831d35Sstevel 		case KSTAT_DATA_UINT32:
793303831d35Sstevel 
793403831d35Sstevel 			/*
793503831d35Sstevel 			 * this handles 32-bit wrapping
793603831d35Sstevel 			 */
793703831d35Sstevel 			if (phys_kn_entry.value.ui32 <
793803831d35Sstevel 			    phys_last_knp[i].value.ui32) {
793903831d35Sstevel 
794003831d35Sstevel 				/*
794103831d35Sstevel 				 * we've wrapped!
794203831d35Sstevel 				 */
794303831d35Sstevel 				delta64 += (UINT_MAX -
794403831d35Sstevel 				    phys_last_knp[i].value.ui32);
794503831d35Sstevel 				phys_last_knp[i].value.ui32 = 0;
794603831d35Sstevel 			}
794703831d35Sstevel 
794803831d35Sstevel 			delta64 += phys_kn_entry.value.ui32 -
794903831d35Sstevel 			    phys_last_knp[i].value.ui32;
795003831d35Sstevel 			phys_last_knp[i].value.ui32 = phys_kn_entry.value.ui32;
795103831d35Sstevel 			break;
795203831d35Sstevel 
795303831d35Sstevel 		default:
795403831d35Sstevel 			/*
795503831d35Sstevel 			 * must be a 64-bit value, we ignore 64-bit
795603831d35Sstevel 			 * wraps, since they shouldn't ever happen
795703831d35Sstevel 			 * within the life of a machine (if we assume
795803831d35Sstevel 			 * machines don't stay up for more than a few
795903831d35Sstevel 			 * hundred years without a reboot...)
796003831d35Sstevel 			 */
796103831d35Sstevel 			delta64 = phys_kn_entry.value.ui64 -
796203831d35Sstevel 			    phys_last_knp[i].value.ui64;
796303831d35Sstevel 			phys_last_knp[i].value.ui64 = phys_kn_entry.value.ui64;
796403831d35Sstevel 		}
796503831d35Sstevel 
796603831d35Sstevel 		if (sum_knp != NULL) {
796703831d35Sstevel 			/*
796803831d35Sstevel 			 * now we need to save the value
796903831d35Sstevel 			 */
797003831d35Sstevel 			switch (sum_knp[i].data_type) {
797103831d35Sstevel 			case KSTAT_DATA_UINT32:
797203831d35Sstevel 				/* trunk down to 32 bits, possibly lossy */
797303831d35Sstevel 				sum_knp[i].value.ui32 += (uint32_t)delta64;
797403831d35Sstevel 				break;
797503831d35Sstevel 
797603831d35Sstevel 			default:
797703831d35Sstevel 				sum_knp[i].value.ui64 += delta64;
797803831d35Sstevel 				break;
797903831d35Sstevel 			}
798003831d35Sstevel 		}
798103831d35Sstevel 	}
798203831d35Sstevel 
798303831d35Sstevel 	MAN_DBG(MAN_KSTAT, ("man_sum_kstats: returns\n"));
798403831d35Sstevel }
798503831d35Sstevel 
798603831d35Sstevel 
798703831d35Sstevel #if defined(DEBUG)
798803831d35Sstevel 
798903831d35Sstevel 
799003831d35Sstevel static char *_ms_flags[] = {
799103831d35Sstevel 	"NONE",
799203831d35Sstevel 	"FAST", 	/* 0x1 */
799303831d35Sstevel 	"RAW",		/* 0x2 */
799403831d35Sstevel 	"ALLPHYS",	/* 0x4 */
799503831d35Sstevel 	"ALLMULTI",	/* 0x8 */
799603831d35Sstevel 	"ALLSAP",	/* 0x10 */
799703831d35Sstevel 	"CKSUM",	/* 0x20 */
799803831d35Sstevel 	"MULTI",	/* 0x40 */
799903831d35Sstevel 	"SERLPBK",	/* 0x80 */
800003831d35Sstevel 	"MACLPBK",	/* 0x100 */
800103831d35Sstevel 	"CLOSING",	/* 0x200 */
800203831d35Sstevel 	"CLOSE_DONE",	/* 0x400 */
800303831d35Sstevel 	"CONTROL"	/* 0x800 */
800403831d35Sstevel };
800503831d35Sstevel 
800603831d35Sstevel static void
man_print_msp(manstr_t * msp)800703831d35Sstevel man_print_msp(manstr_t *msp)
800803831d35Sstevel {
800903831d35Sstevel 	char	buf[512];
801003831d35Sstevel 	char	prbuf[512];
801103831d35Sstevel 	uint_t	flags;
801203831d35Sstevel 	int	i;
801303831d35Sstevel 
801403831d35Sstevel 	cmn_err(CE_CONT, "\tmsp(0x%p)\n", (void *)msp);
801503831d35Sstevel 
801603831d35Sstevel 	if (msp == NULL)
801703831d35Sstevel 		return;
801803831d35Sstevel 
801903831d35Sstevel 	cmn_err(CE_CONT, "\t%s%d SAP(0x%x):\n",
802003831d35Sstevel 	    ddi_major_to_name(msp->ms_meta_maj), msp->ms_meta_ppa,
802103831d35Sstevel 	    msp->ms_sap);
802203831d35Sstevel 
802303831d35Sstevel 	buf[0] = '\0';
802403831d35Sstevel 	prbuf[0] = '\0';
802503831d35Sstevel 	flags = msp->ms_flags;
802603831d35Sstevel 	for (i = 0; i < A_CNT(_ms_flags); i++) {
802703831d35Sstevel 		if ((flags >> i) & 0x1) {
8028*07d06da5SSurya Prakki 			(void) sprintf(buf, " %s |", _ms_flags[i+1]);
8029*07d06da5SSurya Prakki 			(void) strcat(prbuf, buf);
803003831d35Sstevel 		}
803103831d35Sstevel 	}
803203831d35Sstevel 	prbuf[strlen(prbuf) - 1] = '\0';
803303831d35Sstevel 	cmn_err(CE_CONT, "\tms_flags: %s\n", prbuf);
803403831d35Sstevel 
803503831d35Sstevel 	cmn_err(CE_CONT, "\tms_dlpistate: %s\n", dss[msp->ms_dlpistate]);
803603831d35Sstevel 
803703831d35Sstevel 	cmn_err(CE_CONT, "\tms_dl_mp: 0x%p\n", (void *)msp->ms_dl_mp);
803803831d35Sstevel 
803903831d35Sstevel 	cmn_err(CE_CONT, "\tms_manp: 0x%p\n", (void *)msp->ms_manp);
804003831d35Sstevel 
804103831d35Sstevel 	cmn_err(CE_CONT, "\tms_dests: 0x%p\n", (void *)msp->ms_dests);
804203831d35Sstevel 
804303831d35Sstevel }
804403831d35Sstevel 
804503831d35Sstevel static char *_md_state[] = {
804603831d35Sstevel 	"NOTPRESENT",		/* 0x0 */
804703831d35Sstevel 	"INITIALIZING",		/* 0x1 */
804803831d35Sstevel 	"READY",		/* 0x2 */
804903831d35Sstevel 	"PLUMBING",		/* 0x4 */
805003831d35Sstevel 	"CLOSING"		/* 0x8 */
805103831d35Sstevel };
805203831d35Sstevel 
805303831d35Sstevel static void
man_print_mdp(man_dest_t * mdp)805403831d35Sstevel man_print_mdp(man_dest_t *mdp)
805503831d35Sstevel {
805603831d35Sstevel 	uint_t		state;
805703831d35Sstevel 	int		i;
805803831d35Sstevel 	char		buf[64];
805903831d35Sstevel 	char		prbuf[512];
806003831d35Sstevel 
806103831d35Sstevel 	buf[0] = '\0';
806203831d35Sstevel 	prbuf[0] = '\0';
806303831d35Sstevel 
806403831d35Sstevel 	cmn_err(CE_CONT, "\tmdp(0x%p)\n", (void *)mdp);
806503831d35Sstevel 
806603831d35Sstevel 	if (mdp == NULL)
806703831d35Sstevel 		return;
806803831d35Sstevel 
806903831d35Sstevel 	cmn_err(CE_CONT, "\tmd_pg_id: %d\n", mdp->md_pg_id);
807003831d35Sstevel 	cmn_err(CE_CONT, "\tmd_dst_eaddr: %s\n",
807103831d35Sstevel 	    ether_sprintf(&mdp->md_dst_eaddr));
807203831d35Sstevel 	cmn_err(CE_CONT, "\tmd_src_eaddr: %s\n",
807303831d35Sstevel 	    ether_sprintf(&mdp->md_src_eaddr));
807403831d35Sstevel 	cmn_err(CE_CONT, "\tmd_dlpistate: %s", dss[mdp->md_dlpistate]);
807503831d35Sstevel 	cmn_err(CE_CONT, "\tmd_muxid: 0x%u", mdp->md_muxid);
807603831d35Sstevel 	cmn_err(CE_CONT, "\tmd_rcvcnt %lu md_lastrcvcnt %lu", mdp->md_rcvcnt,
807703831d35Sstevel 	    mdp->md_lastrcvcnt);
807803831d35Sstevel 
807903831d35Sstevel 	/*
808003831d35Sstevel 	 * Print out state as text.
808103831d35Sstevel 	 */
808203831d35Sstevel 	state = mdp->md_state;
808303831d35Sstevel 
808403831d35Sstevel 	if (state == 0) {
8085*07d06da5SSurya Prakki 		(void) strcat(prbuf, _md_state[0]);
808603831d35Sstevel 	} else {
808703831d35Sstevel 
808803831d35Sstevel 		for (i = 0; i < A_CNT(_md_state); i++) {
808903831d35Sstevel 			if ((state >> i) & 0x1)  {
8090*07d06da5SSurya Prakki 				(void) sprintf(buf, " %s |", _md_state[i+1]);
8091*07d06da5SSurya Prakki 				(void) strcat(prbuf, buf);
809203831d35Sstevel 			}
809303831d35Sstevel 		}
809403831d35Sstevel 		prbuf[strlen(prbuf) -1] = '\0';
809503831d35Sstevel 	}
809603831d35Sstevel 	cmn_err(CE_CONT, "\tmd_state: %s", prbuf);
809703831d35Sstevel 
809803831d35Sstevel 	cmn_err(CE_CONT, "\tmd_device:\n");
809903831d35Sstevel 	man_print_dev(&mdp->md_device);
810003831d35Sstevel 
810103831d35Sstevel }
810203831d35Sstevel 
810303831d35Sstevel static void
man_print_man(man_t * manp)810403831d35Sstevel man_print_man(man_t *manp)
810503831d35Sstevel {
810603831d35Sstevel 	char	buf[512];
810703831d35Sstevel 	char	prbuf[512];
810803831d35Sstevel 
810903831d35Sstevel 	buf[0] = '\0';
811003831d35Sstevel 	prbuf[0] = '\0';
811103831d35Sstevel 
811203831d35Sstevel 	if (manp == NULL)
811303831d35Sstevel 		return;
811403831d35Sstevel 
811503831d35Sstevel 	if (ddi_major_to_name(manp->man_meta_major)) {
8116*07d06da5SSurya Prakki 		(void) sprintf(buf, "\t man_device: %s%d\n",
811703831d35Sstevel 		    ddi_major_to_name(manp->man_meta_major),
811803831d35Sstevel 		    manp->man_meta_ppa);
811903831d35Sstevel 	} else {
8120*07d06da5SSurya Prakki 		(void) sprintf(buf, "\t major: %d", manp->man_meta_major);
8121*07d06da5SSurya Prakki 		(void) sprintf(buf, "\t ppa: %d", manp->man_meta_ppa);
812203831d35Sstevel 	}
812303831d35Sstevel 
812403831d35Sstevel 	cmn_err(CE_CONT, "%s", buf);
812503831d35Sstevel 
812603831d35Sstevel }
812703831d35Sstevel 
812803831d35Sstevel static char *_mdev_state[] = {
812903831d35Sstevel 	"UNASSIGNED  ",
813003831d35Sstevel 	"ASSIGNED",
813103831d35Sstevel 	"ACTIVE",
813203831d35Sstevel 	"FAILED"
813303831d35Sstevel };
813403831d35Sstevel 
813503831d35Sstevel static void
man_print_dev(man_dev_t * mdevp)813603831d35Sstevel man_print_dev(man_dev_t *mdevp)
813703831d35Sstevel {
813803831d35Sstevel 	char	buf[512];
813903831d35Sstevel 	char	prbuf[512];
814003831d35Sstevel 	int	i;
814103831d35Sstevel 	uint_t	state;
814203831d35Sstevel 
814303831d35Sstevel 	buf[0] = '\0';
814403831d35Sstevel 	prbuf[0] = '\0';
814503831d35Sstevel 
814603831d35Sstevel 	if (mdevp == NULL)
814703831d35Sstevel 		return;
814803831d35Sstevel 
814903831d35Sstevel 	if (mdevp->mdev_major == 0) {
815003831d35Sstevel number:
8151*07d06da5SSurya Prakki 		(void) sprintf(buf, "\t mdev_major: %d\n", mdevp->mdev_major);
815203831d35Sstevel 	} else if (ddi_major_to_name(mdevp->mdev_major)) {
8153*07d06da5SSurya Prakki 		(void) sprintf(buf, "\t mdev_device: %s%d\n",
815403831d35Sstevel 		    ddi_major_to_name(mdevp->mdev_major),
815503831d35Sstevel 		    mdevp->mdev_ppa);
815603831d35Sstevel 	} else
815703831d35Sstevel 		goto number;
815803831d35Sstevel 
815903831d35Sstevel 	cmn_err(CE_CONT, "%s", buf);
816003831d35Sstevel 
816103831d35Sstevel 	cmn_err(CE_CONT, "\t mdev_exp_id: %d\n", mdevp->mdev_exp_id);
816203831d35Sstevel 
816303831d35Sstevel 	buf[0] = '\0';
816403831d35Sstevel 	prbuf[0] = '\0';
816503831d35Sstevel 	state = mdevp->mdev_state;
816603831d35Sstevel 
816703831d35Sstevel 	if (state == 0) {
8168*07d06da5SSurya Prakki 		(void) strcat(prbuf, _mdev_state[0]);
816903831d35Sstevel 	} else {
817003831d35Sstevel 		for (i = 0; i < A_CNT(_mdev_state); i++) {
817103831d35Sstevel 			if ((state >> i) & 0x1) {
8172*07d06da5SSurya Prakki 				(void) sprintf(buf, " %s |", _mdev_state[i+1]);
8173*07d06da5SSurya Prakki 				(void) strcat(prbuf, buf);
817403831d35Sstevel 			}
817503831d35Sstevel 		}
817603831d35Sstevel 	}
817703831d35Sstevel 
817803831d35Sstevel 	prbuf[strlen(prbuf) - 2] = '\0';
817903831d35Sstevel 
818003831d35Sstevel 	cmn_err(CE_CONT, "\t mdev_state: %s\n", prbuf);
818103831d35Sstevel 
818203831d35Sstevel }
818303831d35Sstevel 
818403831d35Sstevel static char *_mip_cmd[] = {
818503831d35Sstevel 	"MI_PATH_READ",
818603831d35Sstevel 	"MI_PATH_ASSIGN",
818703831d35Sstevel 	"MI_PATH_ACTIVATE",
818803831d35Sstevel 	"MI_PATH_DEACTIVATE",
818903831d35Sstevel 	"MI_PATH_UNASSIGN"
819003831d35Sstevel };
819103831d35Sstevel 
819203831d35Sstevel static void
man_print_mtp(mi_time_t * mtp)819303831d35Sstevel man_print_mtp(mi_time_t *mtp)
819403831d35Sstevel {
819503831d35Sstevel 	cmn_err(CE_CONT, "\tmtp(0x%p)\n", (void *)mtp);
819603831d35Sstevel 
819703831d35Sstevel 	if (mtp == NULL)
819803831d35Sstevel 		return;
819903831d35Sstevel 
820003831d35Sstevel 	cmn_err(CE_CONT, "\tmtp_instance: %d\n", mtp->mtp_man_ppa);
820103831d35Sstevel 
820203831d35Sstevel 	cmn_err(CE_CONT, "\tmtp_time: %d\n", mtp->mtp_time);
820303831d35Sstevel 
820403831d35Sstevel }
820503831d35Sstevel 
820603831d35Sstevel static void
man_print_mip(mi_path_t * mip)820703831d35Sstevel man_print_mip(mi_path_t *mip)
820803831d35Sstevel {
820903831d35Sstevel 	cmn_err(CE_CONT, "\tmip(0x%p)\n", (void *)mip);
821003831d35Sstevel 
821103831d35Sstevel 	if (mip == NULL)
821203831d35Sstevel 		return;
821303831d35Sstevel 
821403831d35Sstevel 	cmn_err(CE_CONT, "\tmip_pg_id: %d\n", mip->mip_pg_id);
821503831d35Sstevel 
821603831d35Sstevel 	cmn_err(CE_CONT, "\tmip_cmd: %s\n", _mip_cmd[mip->mip_cmd]);
821703831d35Sstevel 
821803831d35Sstevel 	cmn_err(CE_CONT, "\tmip_eaddr: %s\n", ether_sprintf(&mip->mip_eaddr));
821903831d35Sstevel 
822003831d35Sstevel 	cmn_err(CE_CONT, "\tmip_devs: 0x%p\n", (void *)mip->mip_devs);
822103831d35Sstevel 
822203831d35Sstevel 	cmn_err(CE_CONT, "\tmip_ndevs: %d\n", mip->mip_ndevs);
822303831d35Sstevel 
822403831d35Sstevel }
822503831d35Sstevel 
822603831d35Sstevel static void
man_print_mpg(man_pg_t * mpg)822703831d35Sstevel man_print_mpg(man_pg_t *mpg)
822803831d35Sstevel {
822903831d35Sstevel 	cmn_err(CE_CONT, "\tmpg(0x%p)\n", (void *)mpg);
823003831d35Sstevel 
823103831d35Sstevel 	if (mpg == NULL)
823203831d35Sstevel 		return;
823303831d35Sstevel 
823403831d35Sstevel 	cmn_err(CE_CONT, "\tmpg_next: 0x%p\n", (void *)mpg->mpg_next);
823503831d35Sstevel 
823603831d35Sstevel 	cmn_err(CE_CONT, "\tmpg_pg_id: %d\n", mpg->mpg_pg_id);
823703831d35Sstevel 
823803831d35Sstevel 	cmn_err(CE_CONT, "\tmpg_man_ppa: %d\n", mpg->mpg_man_ppa);
823903831d35Sstevel 
824003831d35Sstevel 	cmn_err(CE_CONT, "\tmpg_dst_eaddr: %s\n",
824103831d35Sstevel 	    ether_sprintf(&mpg->mpg_dst_eaddr));
824203831d35Sstevel 
824303831d35Sstevel 	cmn_err(CE_CONT, "\tmpg_pathp: 0x%p\n", (void *)mpg->mpg_pathp);
824403831d35Sstevel 
824503831d35Sstevel }
824603831d35Sstevel 
824703831d35Sstevel static char *_mw_flags[] = {
824803831d35Sstevel 	"NOWAITER",		/* 0x0 */
824903831d35Sstevel 	"CVWAITER",		/* 0x1 */
825003831d35Sstevel 	"QWAITER",		/* 0x2 */
825103831d35Sstevel 	"DONE"		/* 0x3 */
825203831d35Sstevel };
825303831d35Sstevel 
825403831d35Sstevel static void
man_print_work(man_work_t * wp)825503831d35Sstevel man_print_work(man_work_t *wp)
825603831d35Sstevel {
825703831d35Sstevel 	int 	i;
825803831d35Sstevel 
825903831d35Sstevel 	cmn_err(CE_CONT, "\twp(0x%p)\n\n", (void *)wp);
826003831d35Sstevel 
826103831d35Sstevel 	if (wp == NULL)
826203831d35Sstevel 		return;
826303831d35Sstevel 
826403831d35Sstevel 	cmn_err(CE_CONT, "\tmw_type: %s\n", _mw_type[wp->mw_type]);
826503831d35Sstevel 
826603831d35Sstevel 	cmn_err(CE_CONT, "\tmw_flags: ");
826703831d35Sstevel 	for (i = 0; i < A_CNT(_mw_flags); i++) {
826803831d35Sstevel 		if ((wp->mw_flags >> i) & 0x1)
826903831d35Sstevel 			cmn_err(CE_CONT, "%s", _mw_flags[i]);
827003831d35Sstevel 	}
827103831d35Sstevel 	cmn_err(CE_CONT, "\n");
827203831d35Sstevel 
827303831d35Sstevel 	cmn_err(CE_CONT, "\twp_status: %d\n", wp->mw_status);
827403831d35Sstevel 
827503831d35Sstevel 	cmn_err(CE_CONT, "\twp_arg: 0x%p\n", (void *)&wp->mw_arg);
827603831d35Sstevel 
827703831d35Sstevel 	cmn_err(CE_CONT, "\tmw_next: 0x%p\n", (void *)wp->mw_next);
827803831d35Sstevel 
827903831d35Sstevel 	cmn_err(CE_CONT, "\twp_q: 0x%p", (void *)wp->mw_q);
828003831d35Sstevel 
828103831d35Sstevel }
828203831d35Sstevel 
828303831d35Sstevel static void
man_print_path(man_path_t * mp)828403831d35Sstevel man_print_path(man_path_t *mp)
828503831d35Sstevel {
828603831d35Sstevel 	cmn_err(CE_CONT, "\tmp(0x%p)\n\n", (void *)mp);
828703831d35Sstevel 
828803831d35Sstevel 	if (mp == NULL)
828903831d35Sstevel 		return;
829003831d35Sstevel 
829103831d35Sstevel 	cmn_err(CE_CONT, "\tmp_device:");
829203831d35Sstevel 	man_print_dev(&mp->mp_device);
829303831d35Sstevel 
829403831d35Sstevel 	cmn_err(CE_CONT, "\tmp_next: 0x%p\n", (void *)mp->mp_next);
829503831d35Sstevel 
829603831d35Sstevel 	cmn_err(CE_CONT, "\tmp_last_knp: 0x%p\n", (void *)mp->mp_last_knp);
829703831d35Sstevel 
829803831d35Sstevel 	cmn_err(CE_CONT, "\tmp_lru: 0x%lx", mp->mp_lru);
829903831d35Sstevel 
830003831d35Sstevel }
830103831d35Sstevel 
830203831d35Sstevel void *
man_dbg_kzalloc(int line,size_t size,int kmflags)830303831d35Sstevel man_dbg_kzalloc(int line, size_t size, int kmflags)
830403831d35Sstevel {
830503831d35Sstevel 	void *tmp;
830603831d35Sstevel 
830703831d35Sstevel 	tmp = kmem_zalloc(size, kmflags);
830803831d35Sstevel 	MAN_DBG(MAN_KMEM, ("0x%p %lu\tzalloc'd @ %d\n", (void *)tmp,
830903831d35Sstevel 	    size, line));
831003831d35Sstevel 
831103831d35Sstevel 	return (tmp);
831203831d35Sstevel 
831303831d35Sstevel }
831403831d35Sstevel 
831503831d35Sstevel void
man_dbg_kfree(int line,void * buf,size_t size)831603831d35Sstevel man_dbg_kfree(int line, void *buf, size_t size)
831703831d35Sstevel {
831803831d35Sstevel 
831903831d35Sstevel 	MAN_DBG(MAN_KMEM, ("0x%p %lu\tfree'd @ %d\n", (void *)buf, size, line));
832003831d35Sstevel 
832103831d35Sstevel 	kmem_free(buf, size);
832203831d35Sstevel 
832303831d35Sstevel }
832403831d35Sstevel 
832503831d35Sstevel #endif  /* DEBUG */
8326