xref: /titanic_53/usr/src/uts/sun4v/io/vsw.c (revision 3af08d828975d7e2581b6829e0eecff14d87a483)
11ae08745Sheppo /*
21ae08745Sheppo  * CDDL HEADER START
31ae08745Sheppo  *
41ae08745Sheppo  * The contents of this file are subject to the terms of the
51ae08745Sheppo  * Common Development and Distribution License (the "License").
61ae08745Sheppo  * You may not use this file except in compliance with the License.
71ae08745Sheppo  *
81ae08745Sheppo  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91ae08745Sheppo  * or http://www.opensolaris.org/os/licensing.
101ae08745Sheppo  * See the License for the specific language governing permissions
111ae08745Sheppo  * and limitations under the License.
121ae08745Sheppo  *
131ae08745Sheppo  * When distributing Covered Code, include this CDDL HEADER in each
141ae08745Sheppo  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151ae08745Sheppo  * If applicable, add the following below this CDDL HEADER, with the
161ae08745Sheppo  * fields enclosed by brackets "[]" replaced with your own identifying
171ae08745Sheppo  * information: Portions Copyright [yyyy] [name of copyright owner]
181ae08745Sheppo  *
191ae08745Sheppo  * CDDL HEADER END
201ae08745Sheppo  */
211ae08745Sheppo 
221ae08745Sheppo /*
231ae08745Sheppo  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
241ae08745Sheppo  * Use is subject to license terms.
251ae08745Sheppo  */
261ae08745Sheppo 
271ae08745Sheppo #pragma ident	"%Z%%M%	%I%	%E% SMI"
281ae08745Sheppo 
291ae08745Sheppo #include <sys/types.h>
301ae08745Sheppo #include <sys/errno.h>
311ae08745Sheppo #include <sys/debug.h>
321ae08745Sheppo #include <sys/time.h>
331ae08745Sheppo #include <sys/sysmacros.h>
341ae08745Sheppo #include <sys/systm.h>
351ae08745Sheppo #include <sys/user.h>
361ae08745Sheppo #include <sys/stropts.h>
371ae08745Sheppo #include <sys/stream.h>
381ae08745Sheppo #include <sys/strlog.h>
391ae08745Sheppo #include <sys/strsubr.h>
401ae08745Sheppo #include <sys/cmn_err.h>
411ae08745Sheppo #include <sys/cpu.h>
421ae08745Sheppo #include <sys/kmem.h>
431ae08745Sheppo #include <sys/conf.h>
441ae08745Sheppo #include <sys/ddi.h>
451ae08745Sheppo #include <sys/sunddi.h>
461ae08745Sheppo #include <sys/ksynch.h>
471ae08745Sheppo #include <sys/stat.h>
481ae08745Sheppo #include <sys/kstat.h>
491ae08745Sheppo #include <sys/vtrace.h>
501ae08745Sheppo #include <sys/strsun.h>
511ae08745Sheppo #include <sys/dlpi.h>
521ae08745Sheppo #include <sys/ethernet.h>
531ae08745Sheppo #include <net/if.h>
541ae08745Sheppo #include <sys/varargs.h>
551ae08745Sheppo #include <sys/machsystm.h>
561ae08745Sheppo #include <sys/modctl.h>
571ae08745Sheppo #include <sys/modhash.h>
581ae08745Sheppo #include <sys/mac.h>
59ba2e4443Sseb #include <sys/mac_ether.h>
601ae08745Sheppo #include <sys/taskq.h>
611ae08745Sheppo #include <sys/note.h>
621ae08745Sheppo #include <sys/mach_descrip.h>
631ae08745Sheppo #include <sys/mac.h>
641ae08745Sheppo #include <sys/mdeg.h>
651ae08745Sheppo #include <sys/ldc.h>
661ae08745Sheppo #include <sys/vsw_fdb.h>
671ae08745Sheppo #include <sys/vsw.h>
681ae08745Sheppo #include <sys/vio_mailbox.h>
691ae08745Sheppo #include <sys/vnet_mailbox.h>
701ae08745Sheppo #include <sys/vnet_common.h>
71d10e4ef2Snarayan #include <sys/vio_util.h>
72d10e4ef2Snarayan #include <sys/sdt.h>
731ae08745Sheppo 
741ae08745Sheppo /*
751ae08745Sheppo  * Function prototypes.
761ae08745Sheppo  */
771ae08745Sheppo static	int vsw_attach(dev_info_t *, ddi_attach_cmd_t);
781ae08745Sheppo static	int vsw_detach(dev_info_t *, ddi_detach_cmd_t);
791ae08745Sheppo static	int vsw_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
801ae08745Sheppo static	void vsw_get_md_properties(vsw_t *vswp);
81e1ebb9ecSlm66018 static	int vsw_get_physaddr(vsw_t *);
821ae08745Sheppo static	int vsw_setup_layer2(vsw_t *);
831ae08745Sheppo static	int vsw_setup_layer3(vsw_t *);
841ae08745Sheppo 
857636cb21Slm66018 /* MAC Ring table functions. */
867636cb21Slm66018 static void vsw_mac_ring_tbl_init(vsw_t *vswp);
877636cb21Slm66018 static void vsw_mac_ring_tbl_destroy(vsw_t *vswp);
887636cb21Slm66018 static void vsw_queue_worker(vsw_mac_ring_t *rrp);
897636cb21Slm66018 static void vsw_queue_stop(vsw_queue_t *vqp);
907636cb21Slm66018 static vsw_queue_t *vsw_queue_create();
917636cb21Slm66018 static void vsw_queue_destroy(vsw_queue_t *vqp);
927636cb21Slm66018 
931ae08745Sheppo /* MAC layer routines */
947636cb21Slm66018 static mac_resource_handle_t vsw_mac_ring_add_cb(void *arg,
957636cb21Slm66018 		mac_resource_t *mrp);
96e1ebb9ecSlm66018 static	int vsw_get_hw_maddr(vsw_t *);
97e1ebb9ecSlm66018 static	int vsw_set_hw(vsw_t *, vsw_port_t *);
98e1ebb9ecSlm66018 static	int vsw_set_hw_promisc(vsw_t *, vsw_port_t *);
99e1ebb9ecSlm66018 static	int vsw_unset_hw(vsw_t *, vsw_port_t *);
100e1ebb9ecSlm66018 static	int vsw_unset_hw_promisc(vsw_t *, vsw_port_t *);
101e1ebb9ecSlm66018 static	int vsw_reconfig_hw(vsw_t *);
1027636cb21Slm66018 static int vsw_mac_attach(vsw_t *vswp);
1037636cb21Slm66018 static void vsw_mac_detach(vsw_t *vswp);
1047636cb21Slm66018 
1057636cb21Slm66018 static void vsw_rx_queue_cb(void *, mac_resource_handle_t, mblk_t *);
1061ae08745Sheppo static void vsw_rx_cb(void *, mac_resource_handle_t, mblk_t *);
1071ae08745Sheppo static mblk_t *vsw_tx_msg(vsw_t *, mblk_t *);
1081ae08745Sheppo static int vsw_mac_register(vsw_t *);
1091ae08745Sheppo static int vsw_mac_unregister(vsw_t *);
110ba2e4443Sseb static int vsw_m_stat(void *, uint_t, uint64_t *);
1111ae08745Sheppo static void vsw_m_stop(void *arg);
1121ae08745Sheppo static int vsw_m_start(void *arg);
1131ae08745Sheppo static int vsw_m_unicst(void *arg, const uint8_t *);
1141ae08745Sheppo static int vsw_m_multicst(void *arg, boolean_t, const uint8_t *);
1151ae08745Sheppo static int vsw_m_promisc(void *arg, boolean_t);
1161ae08745Sheppo static mblk_t *vsw_m_tx(void *arg, mblk_t *);
1171ae08745Sheppo 
1181ae08745Sheppo /* MDEG routines */
1191ae08745Sheppo static	void vsw_mdeg_register(vsw_t *vswp);
1201ae08745Sheppo static	void vsw_mdeg_unregister(vsw_t *vswp);
1211ae08745Sheppo static	int vsw_mdeg_cb(void *cb_argp, mdeg_result_t *);
1221ae08745Sheppo 
1231ae08745Sheppo /* Port add/deletion routines */
1241ae08745Sheppo static	int vsw_port_add(vsw_t *vswp, md_t *mdp, mde_cookie_t *node);
1251ae08745Sheppo static	int vsw_port_attach(vsw_t *vswp, int p_instance,
1261ae08745Sheppo 	uint64_t *ldcids, int nids, struct ether_addr *macaddr);
1271ae08745Sheppo static	int vsw_detach_ports(vsw_t *vswp);
1281ae08745Sheppo static	int vsw_port_detach(vsw_t *vswp, int p_instance);
1291ae08745Sheppo static	int vsw_port_delete(vsw_port_t *port);
1301ae08745Sheppo static	int vsw_ldc_attach(vsw_port_t *port, uint64_t ldc_id);
1311ae08745Sheppo static	int vsw_ldc_detach(vsw_port_t *port, uint64_t ldc_id);
1321ae08745Sheppo static	int vsw_init_ldcs(vsw_port_t *port);
1331ae08745Sheppo static	int vsw_uninit_ldcs(vsw_port_t *port);
1341ae08745Sheppo static	int vsw_ldc_init(vsw_ldc_t *ldcp);
1351ae08745Sheppo static	int vsw_ldc_uninit(vsw_ldc_t *ldcp);
1361ae08745Sheppo static	int vsw_drain_ldcs(vsw_port_t *port);
1371ae08745Sheppo static	int vsw_drain_port_taskq(vsw_port_t *port);
1381ae08745Sheppo static	void vsw_marker_task(void *);
1391ae08745Sheppo static	vsw_port_t *vsw_lookup_port(vsw_t *vswp, int p_instance);
1401ae08745Sheppo static	int vsw_plist_del_node(vsw_t *, vsw_port_t *port);
1411ae08745Sheppo 
1421ae08745Sheppo /* Interrupt routines */
1431ae08745Sheppo static	uint_t vsw_ldc_cb(uint64_t cb, caddr_t arg);
1441ae08745Sheppo 
1451ae08745Sheppo /* Handshake routines */
146*3af08d82Slm66018 static	void vsw_restart_ldc(vsw_ldc_t *);
1471ae08745Sheppo static	void vsw_restart_handshake(vsw_ldc_t *);
148*3af08d82Slm66018 static	void vsw_handle_reset(vsw_ldc_t *);
1491ae08745Sheppo static	int vsw_check_flag(vsw_ldc_t *, int, uint64_t);
1501ae08745Sheppo static	void vsw_next_milestone(vsw_ldc_t *);
1511ae08745Sheppo static	int vsw_supported_version(vio_ver_msg_t *);
1521ae08745Sheppo 
1531ae08745Sheppo /* Data processing routines */
1541ae08745Sheppo static void vsw_process_pkt(void *);
1551ae08745Sheppo static void vsw_dispatch_ctrl_task(vsw_ldc_t *, void *, vio_msg_tag_t);
1561ae08745Sheppo static void vsw_process_ctrl_pkt(void *);
1571ae08745Sheppo static void vsw_process_ctrl_ver_pkt(vsw_ldc_t *, void *);
1581ae08745Sheppo static void vsw_process_ctrl_attr_pkt(vsw_ldc_t *, void *);
1591ae08745Sheppo static void vsw_process_ctrl_mcst_pkt(vsw_ldc_t *, void *);
1601ae08745Sheppo static void vsw_process_ctrl_dring_reg_pkt(vsw_ldc_t *, void *);
1611ae08745Sheppo static void vsw_process_ctrl_dring_unreg_pkt(vsw_ldc_t *, void *);
1621ae08745Sheppo static void vsw_process_ctrl_rdx_pkt(vsw_ldc_t *, void *);
1631ae08745Sheppo static void vsw_process_data_pkt(vsw_ldc_t *, void *, vio_msg_tag_t);
1641ae08745Sheppo static void vsw_process_data_dring_pkt(vsw_ldc_t *, void *);
1651ae08745Sheppo static void vsw_process_data_raw_pkt(vsw_ldc_t *, void *);
1661ae08745Sheppo static void vsw_process_data_ibnd_pkt(vsw_ldc_t *, void *);
1671ae08745Sheppo static void vsw_process_err_pkt(vsw_ldc_t *, void *, vio_msg_tag_t);
1681ae08745Sheppo 
1691ae08745Sheppo /* Switching/data transmit routines */
1701ae08745Sheppo static	void vsw_switch_l2_frame(vsw_t *vswp, mblk_t *mp, int caller,
1711ae08745Sheppo 	    vsw_port_t *port, mac_resource_handle_t);
1721ae08745Sheppo static	void vsw_switch_l3_frame(vsw_t *vswp, mblk_t *mp, int caller,
1731ae08745Sheppo 	    vsw_port_t *port, mac_resource_handle_t);
1741ae08745Sheppo static	int vsw_forward_all(vsw_t *vswp, mblk_t *mp, int caller,
1751ae08745Sheppo 	    vsw_port_t *port);
1761ae08745Sheppo static	int vsw_forward_grp(vsw_t *vswp, mblk_t *mp, int caller,
1771ae08745Sheppo 	    vsw_port_t *port);
1781ae08745Sheppo static	int vsw_portsend(vsw_port_t *, mblk_t *);
1791ae08745Sheppo static	int vsw_dringsend(vsw_ldc_t *, mblk_t *);
1801ae08745Sheppo static	int vsw_descrsend(vsw_ldc_t *, mblk_t *);
1811ae08745Sheppo 
1821ae08745Sheppo /* Packet creation routines */
183*3af08d82Slm66018 static void vsw_send_ver(void *);
1841ae08745Sheppo static void vsw_send_attr(vsw_ldc_t *);
1851ae08745Sheppo static vio_dring_reg_msg_t *vsw_create_dring_info_pkt(vsw_ldc_t *);
1861ae08745Sheppo static void vsw_send_dring_info(vsw_ldc_t *);
1871ae08745Sheppo static void vsw_send_rdx(vsw_ldc_t *);
1881ae08745Sheppo 
1891ae08745Sheppo static void vsw_send_msg(vsw_ldc_t *, void *, int);
1901ae08745Sheppo 
1911ae08745Sheppo /* Forwarding database (FDB) routines */
1921ae08745Sheppo static	int vsw_add_fdb(vsw_t *vswp, vsw_port_t *port);
1931ae08745Sheppo static	int vsw_del_fdb(vsw_t *vswp, vsw_port_t *port);
1941ae08745Sheppo static	vsw_port_t *vsw_lookup_fdb(vsw_t *vswp, struct ether_header *);
1951ae08745Sheppo static	int vsw_add_rem_mcst(vnet_mcast_msg_t *, vsw_port_t *);
1961ae08745Sheppo static	int vsw_add_mcst(vsw_t *, uint8_t, uint64_t, void *);
1971ae08745Sheppo static	int vsw_del_mcst(vsw_t *, uint8_t, uint64_t, void *);
1981ae08745Sheppo static	void vsw_del_addr(uint8_t, void *, uint64_t);
1991ae08745Sheppo static	void vsw_del_mcst_port(vsw_port_t *);
2001ae08745Sheppo static	void vsw_del_mcst_vsw(vsw_t *);
2011ae08745Sheppo 
2021ae08745Sheppo /* Dring routines */
2031ae08745Sheppo static dring_info_t *vsw_create_dring(vsw_ldc_t *);
2041ae08745Sheppo static void vsw_create_privring(vsw_ldc_t *);
2051ae08745Sheppo static int vsw_setup_ring(vsw_ldc_t *ldcp, dring_info_t *dp);
2061ae08745Sheppo static int vsw_dring_find_free_desc(dring_info_t *, vsw_private_desc_t **,
2071ae08745Sheppo     int *);
2081ae08745Sheppo static dring_info_t *vsw_ident2dring(lane_t *, uint64_t);
2091ae08745Sheppo 
2101ae08745Sheppo static void vsw_set_lane_attr(vsw_t *, lane_t *);
2111ae08745Sheppo static int vsw_check_attr(vnet_attr_msg_t *, vsw_port_t *);
2121ae08745Sheppo static int vsw_dring_match(dring_info_t *dp, vio_dring_reg_msg_t *msg);
2131ae08745Sheppo static int vsw_mem_cookie_match(ldc_mem_cookie_t *, ldc_mem_cookie_t *);
2141ae08745Sheppo static int vsw_check_dring_info(vio_dring_reg_msg_t *);
2151ae08745Sheppo 
2161ae08745Sheppo /* Misc support routines */
2171ae08745Sheppo static	caddr_t vsw_print_ethaddr(uint8_t *addr, char *ebuf);
2181ae08745Sheppo static void vsw_free_lane_resources(vsw_ldc_t *, uint64_t);
2191ae08745Sheppo static int vsw_free_ring(dring_info_t *);
2201ae08745Sheppo 
221d10e4ef2Snarayan 
2221ae08745Sheppo /* Debugging routines */
2231ae08745Sheppo static void dump_flags(uint64_t);
2241ae08745Sheppo static void display_state(void);
2251ae08745Sheppo static void display_lane(lane_t *);
2261ae08745Sheppo static void display_ring(dring_info_t *);
2271ae08745Sheppo 
2281ae08745Sheppo int	vsw_num_handshakes = 3;		/* # of handshake attempts */
2291ae08745Sheppo int	vsw_wretries = 100;		/* # of write attempts */
230d10e4ef2Snarayan int	vsw_chain_len = 150;		/* max # of mblks in msg chain */
231d10e4ef2Snarayan int	vsw_desc_delay = 0;		/* delay in us */
232d10e4ef2Snarayan int	vsw_read_attempts = 5;		/* # of reads of descriptor */
233d10e4ef2Snarayan 
234d10e4ef2Snarayan uint32_t	vsw_mblk_size = VSW_MBLK_SIZE;
235d10e4ef2Snarayan uint32_t	vsw_num_mblks = VSW_NUM_MBLKS;
236d10e4ef2Snarayan 
2371ae08745Sheppo 
2381ae08745Sheppo /*
2391ae08745Sheppo  * mode specific frame switching function
2401ae08745Sheppo  */
2411ae08745Sheppo void		(*vsw_switch_frame)(vsw_t *, mblk_t *, int, vsw_port_t *,
2421ae08745Sheppo 			mac_resource_handle_t);
2431ae08745Sheppo 
244ba2e4443Sseb static	mac_callbacks_t	vsw_m_callbacks = {
245ba2e4443Sseb 	0,
246ba2e4443Sseb 	vsw_m_stat,
247ba2e4443Sseb 	vsw_m_start,
248ba2e4443Sseb 	vsw_m_stop,
249ba2e4443Sseb 	vsw_m_promisc,
250ba2e4443Sseb 	vsw_m_multicst,
251ba2e4443Sseb 	vsw_m_unicst,
252ba2e4443Sseb 	vsw_m_tx,
253ba2e4443Sseb 	NULL,
254ba2e4443Sseb 	NULL,
255ba2e4443Sseb 	NULL
256ba2e4443Sseb };
257ba2e4443Sseb 
2581ae08745Sheppo static	struct	cb_ops	vsw_cb_ops = {
2591ae08745Sheppo 	nulldev,			/* cb_open */
2601ae08745Sheppo 	nulldev,			/* cb_close */
2611ae08745Sheppo 	nodev,				/* cb_strategy */
2621ae08745Sheppo 	nodev,				/* cb_print */
2631ae08745Sheppo 	nodev,				/* cb_dump */
2641ae08745Sheppo 	nodev,				/* cb_read */
2651ae08745Sheppo 	nodev,				/* cb_write */
2661ae08745Sheppo 	nodev,				/* cb_ioctl */
2671ae08745Sheppo 	nodev,				/* cb_devmap */
2681ae08745Sheppo 	nodev,				/* cb_mmap */
2691ae08745Sheppo 	nodev,				/* cb_segmap */
2701ae08745Sheppo 	nochpoll,			/* cb_chpoll */
2711ae08745Sheppo 	ddi_prop_op,			/* cb_prop_op */
2721ae08745Sheppo 	NULL,				/* cb_stream */
2731ae08745Sheppo 	D_MP,				/* cb_flag */
2741ae08745Sheppo 	CB_REV,				/* rev */
2751ae08745Sheppo 	nodev,				/* int (*cb_aread)() */
2761ae08745Sheppo 	nodev				/* int (*cb_awrite)() */
2771ae08745Sheppo };
2781ae08745Sheppo 
2791ae08745Sheppo static	struct	dev_ops	vsw_ops = {
2801ae08745Sheppo 	DEVO_REV,		/* devo_rev */
2811ae08745Sheppo 	0,			/* devo_refcnt */
2821ae08745Sheppo 	vsw_getinfo,		/* devo_getinfo */
2831ae08745Sheppo 	nulldev,		/* devo_identify */
2841ae08745Sheppo 	nulldev,		/* devo_probe */
2851ae08745Sheppo 	vsw_attach,		/* devo_attach */
2861ae08745Sheppo 	vsw_detach,		/* devo_detach */
2871ae08745Sheppo 	nodev,			/* devo_reset */
2881ae08745Sheppo 	&vsw_cb_ops,		/* devo_cb_ops */
2891ae08745Sheppo 	(struct bus_ops *)NULL,	/* devo_bus_ops */
2901ae08745Sheppo 	ddi_power		/* devo_power */
2911ae08745Sheppo };
2921ae08745Sheppo 
2931ae08745Sheppo extern	struct	mod_ops	mod_driverops;
2941ae08745Sheppo static struct modldrv vswmodldrv = {
2951ae08745Sheppo 	&mod_driverops,
2961ae08745Sheppo 	"sun4v Virtual Switch Driver %I%",
2971ae08745Sheppo 	&vsw_ops,
2981ae08745Sheppo };
2991ae08745Sheppo 
3001ae08745Sheppo #define	LDC_ENTER_LOCK(ldcp)	\
3011ae08745Sheppo 				mutex_enter(&((ldcp)->ldc_cblock));\
3021ae08745Sheppo 				mutex_enter(&((ldcp)->ldc_txlock));
3031ae08745Sheppo #define	LDC_EXIT_LOCK(ldcp)	\
3041ae08745Sheppo 				mutex_exit(&((ldcp)->ldc_txlock));\
3051ae08745Sheppo 				mutex_exit(&((ldcp)->ldc_cblock));
3061ae08745Sheppo 
3071ae08745Sheppo /* Driver soft state ptr  */
3081ae08745Sheppo static void	*vsw_state;
3091ae08745Sheppo 
3101ae08745Sheppo /*
3111ae08745Sheppo  * Linked list of "vsw_t" structures - one per instance.
3121ae08745Sheppo  */
3131ae08745Sheppo vsw_t		*vsw_head = NULL;
3141ae08745Sheppo krwlock_t	vsw_rw;
3151ae08745Sheppo 
3161ae08745Sheppo /*
3171ae08745Sheppo  * Property names
3181ae08745Sheppo  */
3191ae08745Sheppo static char vdev_propname[] = "virtual-device";
3201ae08745Sheppo static char vsw_propname[] = "virtual-network-switch";
3211ae08745Sheppo static char physdev_propname[] = "vsw-phys-dev";
3221ae08745Sheppo static char smode_propname[] = "vsw-switch-mode";
3231ae08745Sheppo static char macaddr_propname[] = "local-mac-address";
3241ae08745Sheppo static char remaddr_propname[] = "remote-mac-address";
3251ae08745Sheppo static char ldcids_propname[] = "ldc-ids";
3261ae08745Sheppo static char chan_propname[] = "channel-endpoint";
3271ae08745Sheppo static char id_propname[] = "id";
3281ae08745Sheppo static char reg_propname[] = "reg";
3291ae08745Sheppo 
3301ae08745Sheppo /* supported versions */
3311ae08745Sheppo static	ver_sup_t	vsw_versions[] = { {1, 0} };
3321ae08745Sheppo 
3331ae08745Sheppo /*
3341ae08745Sheppo  * Matching criteria passed to the MDEG to register interest
3351ae08745Sheppo  * in changes to 'virtual-device-port' nodes identified by their
3361ae08745Sheppo  * 'id' property.
3371ae08745Sheppo  */
3381ae08745Sheppo static md_prop_match_t vport_prop_match[] = {
3391ae08745Sheppo 	{ MDET_PROP_VAL,    "id"   },
3401ae08745Sheppo 	{ MDET_LIST_END,    NULL    }
3411ae08745Sheppo };
3421ae08745Sheppo 
3431ae08745Sheppo static mdeg_node_match_t vport_match = { "virtual-device-port",
3441ae08745Sheppo 						vport_prop_match };
3451ae08745Sheppo 
3461ae08745Sheppo /*
3471ae08745Sheppo  * Specification of an MD node passed to the MDEG to filter any
3481ae08745Sheppo  * 'vport' nodes that do not belong to the specified node. This
3491ae08745Sheppo  * template is copied for each vsw instance and filled in with
3501ae08745Sheppo  * the appropriate 'cfg-handle' value before being passed to the MDEG.
3511ae08745Sheppo  */
3521ae08745Sheppo static mdeg_prop_spec_t vsw_prop_template[] = {
3531ae08745Sheppo 	{ MDET_PROP_STR,    "name",		vsw_propname },
3541ae08745Sheppo 	{ MDET_PROP_VAL,    "cfg-handle",	NULL	},
3551ae08745Sheppo 	{ MDET_LIST_END,    NULL,		NULL	}
3561ae08745Sheppo };
3571ae08745Sheppo 
3581ae08745Sheppo #define	VSW_SET_MDEG_PROP_INST(specp, val)	(specp)[1].ps_val = (val);
3591ae08745Sheppo 
3601ae08745Sheppo /*
3617636cb21Slm66018  * From /etc/system enable/disable thread per ring. This is a mode
3627636cb21Slm66018  * selection that is done a vsw driver attach time.
3637636cb21Slm66018  */
3647636cb21Slm66018 boolean_t vsw_multi_ring_enable = B_FALSE;
3657636cb21Slm66018 int vsw_mac_rx_rings = VSW_MAC_RX_RINGS;
3667636cb21Slm66018 
3677636cb21Slm66018 /*
3681ae08745Sheppo  * Print debug messages - set to 0x1f to enable all msgs
3691ae08745Sheppo  * or 0x0 to turn all off.
3701ae08745Sheppo  */
3711ae08745Sheppo int vswdbg = 0x0;
3721ae08745Sheppo 
3731ae08745Sheppo /*
3741ae08745Sheppo  * debug levels:
3751ae08745Sheppo  * 0x01:	Function entry/exit tracing
3761ae08745Sheppo  * 0x02:	Internal function messages
3771ae08745Sheppo  * 0x04:	Verbose internal messages
3781ae08745Sheppo  * 0x08:	Warning messages
3791ae08745Sheppo  * 0x10:	Error messages
3801ae08745Sheppo  */
3811ae08745Sheppo 
3821ae08745Sheppo static void
3831ae08745Sheppo vswdebug(vsw_t *vswp, const char *fmt, ...)
3841ae08745Sheppo {
3851ae08745Sheppo 	char buf[512];
3861ae08745Sheppo 	va_list ap;
3871ae08745Sheppo 
3881ae08745Sheppo 	va_start(ap, fmt);
3891ae08745Sheppo 	(void) vsprintf(buf, fmt, ap);
3901ae08745Sheppo 	va_end(ap);
3911ae08745Sheppo 
3921ae08745Sheppo 	if (vswp == NULL)
3931ae08745Sheppo 		cmn_err(CE_CONT, "%s\n", buf);
3941ae08745Sheppo 	else
3951ae08745Sheppo 		cmn_err(CE_CONT, "vsw%d: %s\n", vswp->instance, buf);
3961ae08745Sheppo }
3971ae08745Sheppo 
3981ae08745Sheppo /*
3991ae08745Sheppo  * For the moment the state dump routines have their own
4001ae08745Sheppo  * private flag.
4011ae08745Sheppo  */
4021ae08745Sheppo #define	DUMP_STATE	0
4031ae08745Sheppo 
4041ae08745Sheppo #if DUMP_STATE
4051ae08745Sheppo 
4061ae08745Sheppo #define	DUMP_TAG(tag) \
4071ae08745Sheppo {			\
4081ae08745Sheppo 	D1(NULL, "DUMP_TAG: type 0x%llx", (tag).vio_msgtype); \
4091ae08745Sheppo 	D1(NULL, "DUMP_TAG: stype 0x%llx", (tag).vio_subtype);	\
4101ae08745Sheppo 	D1(NULL, "DUMP_TAG: senv 0x%llx", (tag).vio_subtype_env);	\
4111ae08745Sheppo }
4121ae08745Sheppo 
4131ae08745Sheppo #define	DUMP_TAG_PTR(tag) \
4141ae08745Sheppo {			\
4151ae08745Sheppo 	D1(NULL, "DUMP_TAG: type 0x%llx", (tag)->vio_msgtype); \
4161ae08745Sheppo 	D1(NULL, "DUMP_TAG: stype 0x%llx", (tag)->vio_subtype);	\
4171ae08745Sheppo 	D1(NULL, "DUMP_TAG: senv 0x%llx", (tag)->vio_subtype_env);	\
4181ae08745Sheppo }
4191ae08745Sheppo 
4201ae08745Sheppo #define	DUMP_FLAGS(flags) dump_flags(flags);
4211ae08745Sheppo #define	DISPLAY_STATE()	display_state()
4221ae08745Sheppo 
4231ae08745Sheppo #else
4241ae08745Sheppo 
4251ae08745Sheppo #define	DUMP_TAG(tag)
4261ae08745Sheppo #define	DUMP_TAG_PTR(tag)
4271ae08745Sheppo #define	DUMP_FLAGS(state)
4281ae08745Sheppo #define	DISPLAY_STATE()
4291ae08745Sheppo 
4301ae08745Sheppo #endif	/* DUMP_STATE */
4311ae08745Sheppo 
4321ae08745Sheppo #ifdef DEBUG
4331ae08745Sheppo 
4341ae08745Sheppo #define	D1		\
4351ae08745Sheppo if (vswdbg & 0x01)	\
4361ae08745Sheppo 	vswdebug
4371ae08745Sheppo 
4381ae08745Sheppo #define	D2		\
4391ae08745Sheppo if (vswdbg & 0x02)	\
4401ae08745Sheppo 	vswdebug
4411ae08745Sheppo 
4421ae08745Sheppo #define	D3		\
4431ae08745Sheppo if (vswdbg & 0x04)	\
4441ae08745Sheppo 	vswdebug
4451ae08745Sheppo 
4461ae08745Sheppo #define	DWARN		\
4471ae08745Sheppo if (vswdbg & 0x08)	\
4481ae08745Sheppo 	vswdebug
4491ae08745Sheppo 
4501ae08745Sheppo #define	DERR		\
4511ae08745Sheppo if (vswdbg & 0x10)	\
4521ae08745Sheppo 	vswdebug
4531ae08745Sheppo 
4541ae08745Sheppo #else
4551ae08745Sheppo 
4561ae08745Sheppo #define	DERR		if (0)	vswdebug
4571ae08745Sheppo #define	DWARN		if (0)	vswdebug
4581ae08745Sheppo #define	D1		if (0)	vswdebug
4591ae08745Sheppo #define	D2		if (0)	vswdebug
4601ae08745Sheppo #define	D3		if (0)	vswdebug
4611ae08745Sheppo 
4621ae08745Sheppo #endif	/* DEBUG */
4631ae08745Sheppo 
4641ae08745Sheppo static struct modlinkage modlinkage = {
4651ae08745Sheppo 	MODREV_1,
4661ae08745Sheppo 	&vswmodldrv,
4671ae08745Sheppo 	NULL
4681ae08745Sheppo };
4691ae08745Sheppo 
4701ae08745Sheppo int
4711ae08745Sheppo _init(void)
4721ae08745Sheppo {
4731ae08745Sheppo 	int status;
4741ae08745Sheppo 
4751ae08745Sheppo 	rw_init(&vsw_rw, NULL, RW_DRIVER, NULL);
4761ae08745Sheppo 
4771ae08745Sheppo 	status = ddi_soft_state_init(&vsw_state, sizeof (vsw_t), 1);
4781ae08745Sheppo 	if (status != 0) {
4791ae08745Sheppo 		return (status);
4801ae08745Sheppo 	}
4811ae08745Sheppo 
4821ae08745Sheppo 	mac_init_ops(&vsw_ops, "vsw");
4831ae08745Sheppo 	status = mod_install(&modlinkage);
4841ae08745Sheppo 	if (status != 0) {
4851ae08745Sheppo 		ddi_soft_state_fini(&vsw_state);
4861ae08745Sheppo 	}
4871ae08745Sheppo 	return (status);
4881ae08745Sheppo }
4891ae08745Sheppo 
4901ae08745Sheppo int
4911ae08745Sheppo _fini(void)
4921ae08745Sheppo {
4931ae08745Sheppo 	int status;
4941ae08745Sheppo 
4951ae08745Sheppo 	status = mod_remove(&modlinkage);
4961ae08745Sheppo 	if (status != 0)
4971ae08745Sheppo 		return (status);
4981ae08745Sheppo 	mac_fini_ops(&vsw_ops);
4991ae08745Sheppo 	ddi_soft_state_fini(&vsw_state);
5001ae08745Sheppo 
5011ae08745Sheppo 	rw_destroy(&vsw_rw);
5021ae08745Sheppo 
5031ae08745Sheppo 	return (status);
5041ae08745Sheppo }
5051ae08745Sheppo 
5061ae08745Sheppo int
5071ae08745Sheppo _info(struct modinfo *modinfop)
5081ae08745Sheppo {
5091ae08745Sheppo 	return (mod_info(&modlinkage, modinfop));
5101ae08745Sheppo }
5111ae08745Sheppo 
5121ae08745Sheppo static int
5131ae08745Sheppo vsw_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
5141ae08745Sheppo {
5151ae08745Sheppo 	vsw_t		*vswp;
516e1ebb9ecSlm66018 	int		instance, i;
5171ae08745Sheppo 	char		hashname[MAXNAMELEN];
5181ae08745Sheppo 	char		qname[TASKQ_NAMELEN];
5191ae08745Sheppo 	int		rv = 1;
5207636cb21Slm66018 	enum		{ PROG_init = 0x00,
5217636cb21Slm66018 				PROG_if_lock = 0x01,
5227636cb21Slm66018 				PROG_fdb = 0x02,
5237636cb21Slm66018 				PROG_mfdb = 0x04,
5247636cb21Slm66018 				PROG_report_dev = 0x08,
5257636cb21Slm66018 				PROG_plist = 0x10,
5261ae08745Sheppo 				PROG_taskq = 0x20}
5271ae08745Sheppo 			progress;
5281ae08745Sheppo 
5291ae08745Sheppo 	progress = PROG_init;
5301ae08745Sheppo 
5311ae08745Sheppo 	switch (cmd) {
5321ae08745Sheppo 	case DDI_ATTACH:
5331ae08745Sheppo 		break;
5341ae08745Sheppo 	case DDI_RESUME:
5351ae08745Sheppo 		/* nothing to do for this non-device */
5361ae08745Sheppo 		return (DDI_SUCCESS);
5371ae08745Sheppo 	case DDI_PM_RESUME:
5381ae08745Sheppo 	default:
5391ae08745Sheppo 		return (DDI_FAILURE);
5401ae08745Sheppo 	}
5411ae08745Sheppo 
5421ae08745Sheppo 	instance = ddi_get_instance(dip);
5431ae08745Sheppo 	if (ddi_soft_state_zalloc(vsw_state, instance) != DDI_SUCCESS) {
5441ae08745Sheppo 		DERR(NULL, "vsw%d: ddi_soft_state_zalloc failed", instance);
5451ae08745Sheppo 		return (DDI_FAILURE);
5461ae08745Sheppo 	}
5471ae08745Sheppo 	vswp = ddi_get_soft_state(vsw_state, instance);
5481ae08745Sheppo 
5491ae08745Sheppo 	if (vswp == NULL) {
5501ae08745Sheppo 		DERR(NULL, "vsw%d: ddi_get_soft_state failed", instance);
5511ae08745Sheppo 		goto vsw_attach_fail;
5521ae08745Sheppo 	}
5531ae08745Sheppo 
5541ae08745Sheppo 	vswp->dip = dip;
5551ae08745Sheppo 	vswp->instance = instance;
5561ae08745Sheppo 	ddi_set_driver_private(dip, (caddr_t)vswp);
5571ae08745Sheppo 
5581ae08745Sheppo 	rw_init(&vswp->if_lockrw, NULL, RW_DRIVER, NULL);
5591ae08745Sheppo 	progress |= PROG_if_lock;
5601ae08745Sheppo 
5611ae08745Sheppo 	/*
5621ae08745Sheppo 	 * Get the various properties such as physical device name
5631ae08745Sheppo 	 * (vsw-phys-dev), switch mode etc from the MD.
5641ae08745Sheppo 	 */
5651ae08745Sheppo 	vsw_get_md_properties(vswp);
5661ae08745Sheppo 
5671ae08745Sheppo 	/* setup the unicast forwarding database  */
5681ae08745Sheppo 	(void) snprintf(hashname, MAXNAMELEN, "vsw_unicst_table-%d",
5691ae08745Sheppo 							vswp->instance);
5701ae08745Sheppo 	D2(vswp, "creating unicast hash table (%s)...", hashname);
5711ae08745Sheppo 	vswp->fdb = mod_hash_create_ptrhash(hashname, VSW_NCHAINS,
5721ae08745Sheppo 		mod_hash_null_valdtor, sizeof (void *));
5731ae08745Sheppo 
5741ae08745Sheppo 	progress |= PROG_fdb;
5751ae08745Sheppo 
5761ae08745Sheppo 	/* setup the multicast fowarding database */
5771ae08745Sheppo 	(void) snprintf(hashname, MAXNAMELEN, "vsw_mcst_table-%d",
5781ae08745Sheppo 							vswp->instance);
5791ae08745Sheppo 	D2(vswp, "creating multicast hash table %s)...", hashname);
5801ae08745Sheppo 	rw_init(&vswp->mfdbrw, NULL, RW_DRIVER, NULL);
5811ae08745Sheppo 	vswp->mfdb = mod_hash_create_ptrhash(hashname, VSW_NCHAINS,
5821ae08745Sheppo 			mod_hash_null_valdtor, sizeof (void *));
5831ae08745Sheppo 
5841ae08745Sheppo 	progress |= PROG_mfdb;
5851ae08745Sheppo 
5861ae08745Sheppo 	/*
5871ae08745Sheppo 	 * create lock protecting list of multicast addresses
5881ae08745Sheppo 	 * which could come via m_multicst() entry point when plumbed.
5891ae08745Sheppo 	 */
5901ae08745Sheppo 	mutex_init(&vswp->mca_lock, NULL, MUTEX_DRIVER, NULL);
5911ae08745Sheppo 	vswp->mcap = NULL;
5921ae08745Sheppo 
5931ae08745Sheppo 	ddi_report_dev(vswp->dip);
5941ae08745Sheppo 
5951ae08745Sheppo 	progress |= PROG_report_dev;
5961ae08745Sheppo 
5971ae08745Sheppo 	WRITE_ENTER(&vsw_rw);
5981ae08745Sheppo 	vswp->next = vsw_head;
5991ae08745Sheppo 	vsw_head = vswp;
6001ae08745Sheppo 	RW_EXIT(&vsw_rw);
6011ae08745Sheppo 
6021ae08745Sheppo 	/* setup the port list */
6031ae08745Sheppo 	rw_init(&vswp->plist.lockrw, NULL, RW_DRIVER, NULL);
6041ae08745Sheppo 	vswp->plist.head = NULL;
6051ae08745Sheppo 
6061ae08745Sheppo 	progress |= PROG_plist;
6071ae08745Sheppo 
6081ae08745Sheppo 	/*
6091ae08745Sheppo 	 * Create the taskq which will process all the VIO
6101ae08745Sheppo 	 * control messages.
6111ae08745Sheppo 	 */
6121ae08745Sheppo 	(void) snprintf(qname, TASKQ_NAMELEN, "vsw_taskq%d", vswp->instance);
6131ae08745Sheppo 	if ((vswp->taskq_p = ddi_taskq_create(vswp->dip, qname, 1,
6141ae08745Sheppo 					TASKQ_DEFAULTPRI, 0)) == NULL) {
6151ae08745Sheppo 		cmn_err(CE_WARN, "Unable to create task queue");
6161ae08745Sheppo 		goto vsw_attach_fail;
6171ae08745Sheppo 	}
6181ae08745Sheppo 
6191ae08745Sheppo 	progress |= PROG_taskq;
6201ae08745Sheppo 
6211ae08745Sheppo 	/* select best switching mode */
622e1ebb9ecSlm66018 	for (i = 0; i < vswp->smode_num; i++) {
623e1ebb9ecSlm66018 		vswp->smode_idx = i;
624e1ebb9ecSlm66018 		switch (vswp->smode[i]) {
6251ae08745Sheppo 		case VSW_LAYER2:
6261ae08745Sheppo 		case VSW_LAYER2_PROMISC:
6271ae08745Sheppo 			rv = vsw_setup_layer2(vswp);
6281ae08745Sheppo 			break;
6291ae08745Sheppo 
6301ae08745Sheppo 		case VSW_LAYER3:
6311ae08745Sheppo 			rv = vsw_setup_layer3(vswp);
6321ae08745Sheppo 			break;
6331ae08745Sheppo 
6341ae08745Sheppo 		default:
6351ae08745Sheppo 			DERR(vswp, "unknown switch mode");
636e1ebb9ecSlm66018 			rv = 1;
6371ae08745Sheppo 			break;
6381ae08745Sheppo 		}
6391ae08745Sheppo 
640e1ebb9ecSlm66018 		if (rv == 0)
6411ae08745Sheppo 			break;
6421ae08745Sheppo 	}
6431ae08745Sheppo 
6441ae08745Sheppo 	if (rv == 1) {
6451ae08745Sheppo 		cmn_err(CE_WARN, "Unable to setup switching mode");
6461ae08745Sheppo 		goto vsw_attach_fail;
6471ae08745Sheppo 	}
6481ae08745Sheppo 
6491ae08745Sheppo 	D2(vswp, "Operating in mode %d", vswp->smode[vswp->smode_idx]);
6501ae08745Sheppo 
6511ae08745Sheppo 	/*
6521ae08745Sheppo 	 * Register with the MAC layer as a network device so
6531ae08745Sheppo 	 * we can be plumbed if desired.
6541ae08745Sheppo 	 *
6551ae08745Sheppo 	 * Do this in both layer 2 and layer 3 mode.
6561ae08745Sheppo 	 */
6571ae08745Sheppo 	vswp->if_state &= ~VSW_IF_UP;
658e1ebb9ecSlm66018 	if (vswp->mdprops & (VSW_MD_MACADDR | VSW_DEV_MACADDR)) {
6591ae08745Sheppo 		if (vsw_mac_register(vswp) != 0) {
6601ae08745Sheppo 			cmn_err(CE_WARN, "Unable to register as provider "
6611ae08745Sheppo 				" with MAC layer, continuing with attach");
6621ae08745Sheppo 		}
6631ae08745Sheppo 	}
6641ae08745Sheppo 
665d10e4ef2Snarayan 	/* prevent auto-detaching */
666d10e4ef2Snarayan 	if (ddi_prop_update_int(DDI_DEV_T_NONE, vswp->dip,
667d10e4ef2Snarayan 				DDI_NO_AUTODETACH, 1) != DDI_SUCCESS) {
668d10e4ef2Snarayan 		cmn_err(CE_NOTE, "Unable to set \"%s\" property for "
669d10e4ef2Snarayan 			"instance %u", DDI_NO_AUTODETACH, instance);
670d10e4ef2Snarayan 	}
671d10e4ef2Snarayan 
6721ae08745Sheppo 	/*
6731ae08745Sheppo 	 * Now we have everything setup, register for MD change
6741ae08745Sheppo 	 * events.
6751ae08745Sheppo 	 */
6761ae08745Sheppo 	vsw_mdeg_register(vswp);
6771ae08745Sheppo 
6781ae08745Sheppo 	return (DDI_SUCCESS);
6791ae08745Sheppo 
6801ae08745Sheppo vsw_attach_fail:
6811ae08745Sheppo 	DERR(NULL, "vsw_attach: failed");
6821ae08745Sheppo 
6831ae08745Sheppo 	if (progress & PROG_taskq)
6841ae08745Sheppo 		ddi_taskq_destroy(vswp->taskq_p);
6851ae08745Sheppo 
6861ae08745Sheppo 	if (progress & PROG_plist)
6871ae08745Sheppo 		rw_destroy(&vswp->plist.lockrw);
6881ae08745Sheppo 
6891ae08745Sheppo 	if (progress & PROG_report_dev) {
6901ae08745Sheppo 		ddi_remove_minor_node(dip, NULL);
6911ae08745Sheppo 		mutex_destroy(&vswp->mca_lock);
6921ae08745Sheppo 	}
6931ae08745Sheppo 
6941ae08745Sheppo 	if (progress & PROG_mfdb) {
6951ae08745Sheppo 		mod_hash_destroy_hash(vswp->mfdb);
6961ae08745Sheppo 		vswp->mfdb = NULL;
6971ae08745Sheppo 		rw_destroy(&vswp->mfdbrw);
6981ae08745Sheppo 	}
6991ae08745Sheppo 
7001ae08745Sheppo 	if (progress & PROG_fdb) {
7011ae08745Sheppo 		mod_hash_destroy_hash(vswp->fdb);
7021ae08745Sheppo 		vswp->fdb = NULL;
7031ae08745Sheppo 	}
7041ae08745Sheppo 
7051ae08745Sheppo 	if (progress & PROG_if_lock)
7061ae08745Sheppo 		rw_destroy(&vswp->if_lockrw);
7071ae08745Sheppo 
7081ae08745Sheppo 	ddi_soft_state_free(vsw_state, instance);
7091ae08745Sheppo 	return (DDI_FAILURE);
7101ae08745Sheppo }
7111ae08745Sheppo 
7121ae08745Sheppo static int
7131ae08745Sheppo vsw_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7141ae08745Sheppo {
715d10e4ef2Snarayan 	vio_mblk_pool_t		*poolp, *npoolp;
7161ae08745Sheppo 	vsw_t			**vswpp, *vswp;
7171ae08745Sheppo 	int 			instance;
7181ae08745Sheppo 
7191ae08745Sheppo 	instance = ddi_get_instance(dip);
7201ae08745Sheppo 	vswp = ddi_get_soft_state(vsw_state, instance);
7211ae08745Sheppo 
7221ae08745Sheppo 	if (vswp == NULL) {
7231ae08745Sheppo 		return (DDI_FAILURE);
7241ae08745Sheppo 	}
7251ae08745Sheppo 
7261ae08745Sheppo 	switch (cmd) {
7271ae08745Sheppo 	case DDI_DETACH:
7281ae08745Sheppo 		break;
7291ae08745Sheppo 	case DDI_SUSPEND:
7301ae08745Sheppo 	case DDI_PM_SUSPEND:
7311ae08745Sheppo 	default:
7321ae08745Sheppo 		return (DDI_FAILURE);
7331ae08745Sheppo 	}
7341ae08745Sheppo 
7351ae08745Sheppo 	D2(vswp, "detaching instance %d", instance);
7361ae08745Sheppo 
737e1ebb9ecSlm66018 	if (vswp->mdprops & (VSW_MD_MACADDR | VSW_DEV_MACADDR)) {
7381ae08745Sheppo 		if (vsw_mac_unregister(vswp) != 0) {
7391ae08745Sheppo 			cmn_err(CE_WARN, "Unable to detach from MAC layer");
7401ae08745Sheppo 			return (DDI_FAILURE);
7411ae08745Sheppo 		}
7421ae08745Sheppo 		rw_destroy(&vswp->if_lockrw);
743d10e4ef2Snarayan 	}
7441ae08745Sheppo 
7451ae08745Sheppo 	vsw_mdeg_unregister(vswp);
7461ae08745Sheppo 
747e1ebb9ecSlm66018 	/* remove mac layer callback */
748e1ebb9ecSlm66018 	if ((vswp->mh != NULL) && (vswp->mrh != NULL)) {
749e1ebb9ecSlm66018 		mac_rx_remove(vswp->mh, vswp->mrh);
750e1ebb9ecSlm66018 		vswp->mrh = NULL;
7511ae08745Sheppo 	}
7521ae08745Sheppo 
7531ae08745Sheppo 	if (vsw_detach_ports(vswp) != 0) {
7541ae08745Sheppo 		cmn_err(CE_WARN, "Unable to detach ports");
7551ae08745Sheppo 		return (DDI_FAILURE);
7561ae08745Sheppo 	}
7571ae08745Sheppo 
7581ae08745Sheppo 	/*
759e1ebb9ecSlm66018 	 * Now that the ports have been deleted, stop and close
760e1ebb9ecSlm66018 	 * the physical device.
761e1ebb9ecSlm66018 	 */
762e1ebb9ecSlm66018 	if (vswp->mh != NULL) {
7637636cb21Slm66018 		if (vswp->mstarted)
764e1ebb9ecSlm66018 			mac_stop(vswp->mh);
7657636cb21Slm66018 		if (vswp->mresources)
7667636cb21Slm66018 			mac_resource_set(vswp->mh, NULL, NULL);
767e1ebb9ecSlm66018 		mac_close(vswp->mh);
768e1ebb9ecSlm66018 
769e1ebb9ecSlm66018 		vswp->mh = NULL;
770e1ebb9ecSlm66018 		vswp->txinfo = NULL;
771e1ebb9ecSlm66018 	}
772e1ebb9ecSlm66018 
773e1ebb9ecSlm66018 	/*
774d10e4ef2Snarayan 	 * Destroy any free pools that may still exist.
775d10e4ef2Snarayan 	 */
776d10e4ef2Snarayan 	poolp = vswp->rxh;
777d10e4ef2Snarayan 	while (poolp != NULL) {
778d10e4ef2Snarayan 		npoolp = vswp->rxh = poolp->nextp;
779d10e4ef2Snarayan 		if (vio_destroy_mblks(poolp) != 0) {
780d10e4ef2Snarayan 			vswp->rxh = poolp;
781d10e4ef2Snarayan 			return (DDI_FAILURE);
782d10e4ef2Snarayan 		}
783d10e4ef2Snarayan 		poolp = npoolp;
784d10e4ef2Snarayan 	}
785d10e4ef2Snarayan 
786d10e4ef2Snarayan 	/*
7871ae08745Sheppo 	 * Remove this instance from any entries it may be on in
7881ae08745Sheppo 	 * the hash table by using the list of addresses maintained
7891ae08745Sheppo 	 * in the vsw_t structure.
7901ae08745Sheppo 	 */
7911ae08745Sheppo 	vsw_del_mcst_vsw(vswp);
7921ae08745Sheppo 
7931ae08745Sheppo 	vswp->mcap = NULL;
7941ae08745Sheppo 	mutex_destroy(&vswp->mca_lock);
7951ae08745Sheppo 
7961ae08745Sheppo 	/*
7971ae08745Sheppo 	 * By now any pending tasks have finished and the underlying
7981ae08745Sheppo 	 * ldc's have been destroyed, so its safe to delete the control
7991ae08745Sheppo 	 * message taskq.
8001ae08745Sheppo 	 */
8011ae08745Sheppo 	if (vswp->taskq_p != NULL)
8021ae08745Sheppo 		ddi_taskq_destroy(vswp->taskq_p);
8031ae08745Sheppo 
8041ae08745Sheppo 	/*
8051ae08745Sheppo 	 * At this stage all the data pointers in the hash table
8061ae08745Sheppo 	 * should be NULL, as all the ports have been removed and will
8071ae08745Sheppo 	 * have deleted themselves from the port lists which the data
8081ae08745Sheppo 	 * pointers point to. Hence we can destroy the table using the
8091ae08745Sheppo 	 * default destructors.
8101ae08745Sheppo 	 */
8111ae08745Sheppo 	D2(vswp, "vsw_detach: destroying hash tables..");
8121ae08745Sheppo 	mod_hash_destroy_hash(vswp->fdb);
8131ae08745Sheppo 	vswp->fdb = NULL;
8141ae08745Sheppo 
8151ae08745Sheppo 	WRITE_ENTER(&vswp->mfdbrw);
8161ae08745Sheppo 	mod_hash_destroy_hash(vswp->mfdb);
8171ae08745Sheppo 	vswp->mfdb = NULL;
8181ae08745Sheppo 	RW_EXIT(&vswp->mfdbrw);
8191ae08745Sheppo 	rw_destroy(&vswp->mfdbrw);
8201ae08745Sheppo 
8211ae08745Sheppo 	ddi_remove_minor_node(dip, NULL);
8221ae08745Sheppo 
8231ae08745Sheppo 	rw_destroy(&vswp->plist.lockrw);
8241ae08745Sheppo 	WRITE_ENTER(&vsw_rw);
8251ae08745Sheppo 	for (vswpp = &vsw_head; *vswpp; vswpp = &(*vswpp)->next) {
8261ae08745Sheppo 		if (*vswpp == vswp) {
8271ae08745Sheppo 			*vswpp = vswp->next;
8281ae08745Sheppo 			break;
8291ae08745Sheppo 		}
8301ae08745Sheppo 	}
8311ae08745Sheppo 	RW_EXIT(&vsw_rw);
8321ae08745Sheppo 	ddi_soft_state_free(vsw_state, instance);
8331ae08745Sheppo 
8341ae08745Sheppo 	return (DDI_SUCCESS);
8351ae08745Sheppo }
8361ae08745Sheppo 
8371ae08745Sheppo static int
8381ae08745Sheppo vsw_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
8391ae08745Sheppo {
8401ae08745Sheppo 	_NOTE(ARGUNUSED(dip))
8411ae08745Sheppo 
8421ae08745Sheppo 	vsw_t	*vswp = NULL;
8431ae08745Sheppo 	dev_t	dev = (dev_t)arg;
8441ae08745Sheppo 	int	instance;
8451ae08745Sheppo 
8461ae08745Sheppo 	instance = getminor(dev);
8471ae08745Sheppo 
8481ae08745Sheppo 	switch (infocmd) {
8491ae08745Sheppo 	case DDI_INFO_DEVT2DEVINFO:
8501ae08745Sheppo 		if ((vswp = ddi_get_soft_state(vsw_state, instance)) == NULL) {
8511ae08745Sheppo 			*result = NULL;
8521ae08745Sheppo 			return (DDI_FAILURE);
8531ae08745Sheppo 		}
8541ae08745Sheppo 		*result = vswp->dip;
8551ae08745Sheppo 		return (DDI_SUCCESS);
8561ae08745Sheppo 
8571ae08745Sheppo 	case DDI_INFO_DEVT2INSTANCE:
8581ae08745Sheppo 		*result = (void *)(uintptr_t)instance;
8591ae08745Sheppo 		return (DDI_SUCCESS);
8601ae08745Sheppo 
8611ae08745Sheppo 	default:
8621ae08745Sheppo 		*result = NULL;
8631ae08745Sheppo 		return (DDI_FAILURE);
8641ae08745Sheppo 	}
8651ae08745Sheppo }
8661ae08745Sheppo 
8671ae08745Sheppo /*
8681ae08745Sheppo  * Get the properties from our MD node.
8691ae08745Sheppo  */
8701ae08745Sheppo static void
8711ae08745Sheppo vsw_get_md_properties(vsw_t *vswp)
8721ae08745Sheppo {
8731ae08745Sheppo 	md_t		*mdp = NULL;
8741ae08745Sheppo 	int		num_nodes = 0;
8751ae08745Sheppo 	int		len = 0, listsz = 0;
8761ae08745Sheppo 	int		num_vdev = 0;
8771ae08745Sheppo 	int		i, idx;
8781ae08745Sheppo 	boolean_t	found_node = B_FALSE;
8791ae08745Sheppo 	char		*smode = NULL;
8801ae08745Sheppo 	char		*curr_mode = NULL;
8811ae08745Sheppo 	char		*physname = NULL;
8821ae08745Sheppo 	char		*node_name = NULL;
8831ae08745Sheppo 	char		*dev;
8841ae08745Sheppo 	uint64_t 	macaddr = 0;
8851ae08745Sheppo 	uint64_t	md_inst, obp_inst;
8861ae08745Sheppo 	mde_cookie_t	*listp = NULL;
8871ae08745Sheppo 	mde_cookie_t	rootnode;
8881ae08745Sheppo 
8891ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
8901ae08745Sheppo 
8911ae08745Sheppo 	/*
8921ae08745Sheppo 	 * Further down we compare the obp 'reg' property to the
8931ae08745Sheppo 	 * 'cfg-handle' property in the vsw MD node to determine
8941ae08745Sheppo 	 * if the node refers to this particular instance. So if
8951ae08745Sheppo 	 * we can't read the obp value then there is no point
8961ae08745Sheppo 	 * in proceeding further.
8971ae08745Sheppo 	 */
8981ae08745Sheppo 	if (ddi_prop_exists(DDI_DEV_T_ANY, vswp->dip,
8991ae08745Sheppo 			DDI_PROP_DONTPASS, reg_propname) != 1) {
9001ae08745Sheppo 		cmn_err(CE_WARN, "Unable to read %s property "
9011ae08745Sheppo 			"from OBP device node", reg_propname);
9021ae08745Sheppo 		return;
9031ae08745Sheppo 	}
9041ae08745Sheppo 
9051ae08745Sheppo 	obp_inst = ddi_prop_get_int(DDI_DEV_T_ANY, vswp->dip,
9061ae08745Sheppo 		DDI_PROP_DONTPASS, reg_propname, 0);
9071ae08745Sheppo 
9081ae08745Sheppo 	D2(vswp, "%s: obp_inst 0x%llx", __func__, obp_inst);
9091ae08745Sheppo 
9101ae08745Sheppo 	if ((mdp = md_get_handle()) == NULL) {
9111ae08745Sheppo 		DERR(vswp, "%s: unable to init MD", __func__);
9121ae08745Sheppo 		return;
9131ae08745Sheppo 	}
9141ae08745Sheppo 
9151ae08745Sheppo 	if ((num_nodes = md_node_count(mdp)) <= 0) {
9161ae08745Sheppo 		DERR(vswp, "%s: invalid number of  nodes found %d",
9171ae08745Sheppo 			__func__, num_nodes);
9181ae08745Sheppo 		(void) md_fini_handle(mdp);
9191ae08745Sheppo 		return;
9201ae08745Sheppo 	}
9211ae08745Sheppo 
9221ae08745Sheppo 	D2(vswp, "%s: %d nodes in total in MD", __func__, num_nodes);
9231ae08745Sheppo 
9241ae08745Sheppo 	/* allocate enough space for node list */
9251ae08745Sheppo 	listsz = num_nodes * sizeof (mde_cookie_t);
9261ae08745Sheppo 	listp = kmem_zalloc(listsz, KM_SLEEP);
9271ae08745Sheppo 
9281ae08745Sheppo 	rootnode = md_root_node(mdp);
9291ae08745Sheppo 
9301ae08745Sheppo 	/* Get the list of virtual devices */
9311ae08745Sheppo 	num_vdev = md_scan_dag(mdp, rootnode,
9321ae08745Sheppo 		md_find_name(mdp, vdev_propname),
9331ae08745Sheppo 		md_find_name(mdp, "fwd"), listp);
9341ae08745Sheppo 
9351ae08745Sheppo 	if (num_vdev <= 0) {
9361ae08745Sheppo 		DERR(vswp, "%s: didn't find any virtual-device nodes in MD",
9371ae08745Sheppo 			__func__);
9381ae08745Sheppo 		goto md_prop_exit;
9391ae08745Sheppo 	}
9401ae08745Sheppo 
9411ae08745Sheppo 	D2(vswp, "%s: %d virtual-device nodes found", __func__, num_vdev);
9421ae08745Sheppo 
9431ae08745Sheppo 	/* Look for the virtual switch nodes in the list */
9441ae08745Sheppo 	for (idx = 0; idx < num_vdev; idx++) {
9451ae08745Sheppo 		if (md_get_prop_str(mdp, listp[idx],
9461ae08745Sheppo 				"name", &node_name) != 0) {
9471ae08745Sheppo 			DERR(vswp, "%s: unable to get node name", __func__);
9481ae08745Sheppo 			continue;
9491ae08745Sheppo 
9501ae08745Sheppo 		}
9511ae08745Sheppo 
9521ae08745Sheppo 		if (strcmp(node_name, vsw_propname) == 0) {
9531ae08745Sheppo 			/* Virtual switch node */
9541ae08745Sheppo 			if (md_get_prop_val(mdp, listp[idx],
9551ae08745Sheppo 				"cfg-handle", &md_inst) != 0) {
9561ae08745Sheppo 				DERR(vswp, "%s: unable to get cfg-handle from"
9571ae08745Sheppo 					" node %d", __func__, idx);
9581ae08745Sheppo 				goto md_prop_exit;
9591ae08745Sheppo 			} else if (md_inst == obp_inst) {
9601ae08745Sheppo 				D2(vswp, "%s: found matching node (%d)"
9611ae08745Sheppo 					" 0x%llx == 0x%llx", __func__, idx,
9621ae08745Sheppo 					md_inst, obp_inst);
9631ae08745Sheppo 				found_node = B_TRUE;
9641ae08745Sheppo 				break;
9651ae08745Sheppo 			}
9661ae08745Sheppo 		}
9671ae08745Sheppo 	}
9681ae08745Sheppo 
9691ae08745Sheppo 	if (!found_node) {
9701ae08745Sheppo 		DWARN(vswp, "%s: couldn't find correct vsw node", __func__);
9711ae08745Sheppo 		goto md_prop_exit;
9721ae08745Sheppo 	}
9731ae08745Sheppo 
9741ae08745Sheppo 	/*
9751ae08745Sheppo 	 * Now, having found the correct node, get the various properties.
9761ae08745Sheppo 	 */
9771ae08745Sheppo 
9781ae08745Sheppo 	if (md_get_prop_data(mdp, listp[idx], physdev_propname,
9791ae08745Sheppo 				(uint8_t **)(&physname), &len) != 0) {
9801ae08745Sheppo 		cmn_err(CE_WARN, "%s: unable to get name(s) of physical "
9811ae08745Sheppo 			"device(s) from MD", __func__);
9821ae08745Sheppo 	} else if ((strlen(physname) + 1) > LIFNAMSIZ) {
9831ae08745Sheppo 		cmn_err(CE_WARN, "%s is too long a device name", physname);
9841ae08745Sheppo 	} else {
9851ae08745Sheppo 		(void) strncpy(vswp->physname, physname, strlen(physname) + 1);
9861ae08745Sheppo 		vswp->mdprops |= VSW_MD_PHYSNAME;
9871ae08745Sheppo 		D2(vswp, "%s: using first device specified (%s)",
9881ae08745Sheppo 			__func__, vswp->physname);
9891ae08745Sheppo 	}
9901ae08745Sheppo 
9911ae08745Sheppo #ifdef DEBUG
9921ae08745Sheppo 	/*
9931ae08745Sheppo 	 * As a temporary measure to aid testing we check to see if there
9941ae08745Sheppo 	 * is a vsw.conf file present. If there is we use the value of the
9951ae08745Sheppo 	 * vsw_physname property in the file as the name of the physical
9961ae08745Sheppo 	 * device, overriding the value from the MD.
9971ae08745Sheppo 	 *
9981ae08745Sheppo 	 * There may be multiple devices listed, but for the moment
9991ae08745Sheppo 	 * we just use the first one.
10001ae08745Sheppo 	 */
10011ae08745Sheppo 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, vswp->dip, 0,
10021ae08745Sheppo 		"vsw_physname", &dev) == DDI_PROP_SUCCESS) {
10031ae08745Sheppo 		if ((strlen(dev) + 1) > LIFNAMSIZ) {
10041ae08745Sheppo 			cmn_err(CE_WARN, "%s is too long a device name", dev);
10051ae08745Sheppo 		} else {
10061ae08745Sheppo 			cmn_err(CE_NOTE, "%s: using device name (%s) from "
10071ae08745Sheppo 				"config file", __func__, dev);
10081ae08745Sheppo 
10091ae08745Sheppo 			(void) strncpy(vswp->physname, dev, strlen(dev) + 1);
10101ae08745Sheppo 			vswp->mdprops |= VSW_MD_PHYSNAME;
10111ae08745Sheppo 		}
10121ae08745Sheppo 
10131ae08745Sheppo 		ddi_prop_free(dev);
10141ae08745Sheppo 
10151ae08745Sheppo 	}
10161ae08745Sheppo #endif
10171ae08745Sheppo 
1018e1ebb9ecSlm66018 	/* mac address for vswitch device itself */
10191ae08745Sheppo 	if (md_get_prop_val(mdp, listp[idx],
10201ae08745Sheppo 			macaddr_propname, &macaddr) != 0) {
1021e1ebb9ecSlm66018 		cmn_err(CE_WARN, "!Unable to get MAC address from MD");
1022e1ebb9ecSlm66018 
1023e1ebb9ecSlm66018 		/*
1024e1ebb9ecSlm66018 		 * Fallback to using the mac address of the physical
1025e1ebb9ecSlm66018 		 * device.
1026e1ebb9ecSlm66018 		 */
1027e1ebb9ecSlm66018 		if (vsw_get_physaddr(vswp) == 0) {
1028e1ebb9ecSlm66018 			cmn_err(CE_NOTE, "!Using MAC address from physical "
1029e1ebb9ecSlm66018 				"device (%s)", vswp->physname);
1030e1ebb9ecSlm66018 		}
10311ae08745Sheppo 	} else {
10321ae08745Sheppo 		READ_ENTER(&vswp->if_lockrw);
10331ae08745Sheppo 		for (i = ETHERADDRL - 1; i >= 0; i--) {
10341ae08745Sheppo 			vswp->if_addr.ether_addr_octet[i] = macaddr & 0xFF;
10351ae08745Sheppo 			macaddr >>= 8;
10361ae08745Sheppo 		}
10371ae08745Sheppo 		RW_EXIT(&vswp->if_lockrw);
10381ae08745Sheppo 		vswp->mdprops |= VSW_MD_MACADDR;
10391ae08745Sheppo 	}
10401ae08745Sheppo 
10411ae08745Sheppo 	/*
10421ae08745Sheppo 	 * Get the switch-mode property. The modes are listed in
10431ae08745Sheppo 	 * decreasing order of preference, i.e. prefered mode is
10441ae08745Sheppo 	 * first item in list.
10451ae08745Sheppo 	 */
10461ae08745Sheppo 	len = 0;
1047e1ebb9ecSlm66018 	vswp->smode_num = 0;
10481ae08745Sheppo 	if (md_get_prop_data(mdp, listp[idx], smode_propname,
10491ae08745Sheppo 				(uint8_t **)(&smode), &len) != 0) {
10501ae08745Sheppo 		/*
1051e1ebb9ecSlm66018 		 * Unable to get switch-mode property from MD, nothing
1052e1ebb9ecSlm66018 		 * more we can do.
10531ae08745Sheppo 		 */
1054e1ebb9ecSlm66018 		cmn_err(CE_WARN, "!unable to get switch mode property");
1055e1ebb9ecSlm66018 		goto md_prop_exit;
1056e1ebb9ecSlm66018 	}
1057e1ebb9ecSlm66018 
10581ae08745Sheppo 	curr_mode = smode;
10591ae08745Sheppo 	/*
10601ae08745Sheppo 	 * Modes of operation:
10611ae08745Sheppo 	 * 'switched'	 - layer 2 switching, underlying HW in
1062e1ebb9ecSlm66018 	 *			programmed mode.
10631ae08745Sheppo 	 * 'promiscuous' - layer 2 switching, underlying HW in
10641ae08745Sheppo 	 *			promiscuous mode.
10651ae08745Sheppo 	 * 'routed'	 - layer 3 (i.e. IP) routing, underlying HW
10661ae08745Sheppo 	 *			in non-promiscuous mode.
10671ae08745Sheppo 	 */
1068e1ebb9ecSlm66018 	while ((curr_mode < (smode + len)) && (vswp->smode_num < NUM_SMODES)) {
10691ae08745Sheppo 		D2(vswp, "%s: curr_mode = [%s]", __func__, curr_mode);
1070e1ebb9ecSlm66018 		if (strcmp(curr_mode, "switched") == 0) {
1071e1ebb9ecSlm66018 			vswp->smode[vswp->smode_num++] = VSW_LAYER2;
1072e1ebb9ecSlm66018 		} else if (strcmp(curr_mode, "promiscuous") == 0) {
1073e1ebb9ecSlm66018 			vswp->smode[vswp->smode_num++] = VSW_LAYER2_PROMISC;
1074e1ebb9ecSlm66018 		} else if (strcmp(curr_mode, "routed") == 0) {
1075e1ebb9ecSlm66018 			vswp->smode[vswp->smode_num++] = VSW_LAYER3;
1076e1ebb9ecSlm66018 		} else {
1077e1ebb9ecSlm66018 			cmn_err(CE_WARN, "Unknown switch mode %s, setting to"
1078e1ebb9ecSlm66018 				" default switched mode", curr_mode);
1079e1ebb9ecSlm66018 			vswp->smode[vswp->smode_num++] = VSW_LAYER2;
10801ae08745Sheppo 		}
10811ae08745Sheppo 		curr_mode += strlen(curr_mode) + 1;
10821ae08745Sheppo 	}
10831ae08745Sheppo 
1084e1ebb9ecSlm66018 	D2(vswp, "%d switching modes specified", vswp->smode_num);
1085e1ebb9ecSlm66018 
1086e1ebb9ecSlm66018 	if (vswp->smode_num > 0)
10871ae08745Sheppo 		vswp->mdprops |= VSW_MD_SMODE;
10881ae08745Sheppo 
10891ae08745Sheppo md_prop_exit:
10901ae08745Sheppo 	(void) md_fini_handle(mdp);
10911ae08745Sheppo 
10921ae08745Sheppo 	kmem_free(listp, listsz);
10931ae08745Sheppo 
10941ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
10951ae08745Sheppo }
10961ae08745Sheppo 
1097e1ebb9ecSlm66018 /*
1098e1ebb9ecSlm66018  * Get the mac address of the physical device.
1099e1ebb9ecSlm66018  *
1100e1ebb9ecSlm66018  * Returns 0 on success, 1 on failure.
1101e1ebb9ecSlm66018  */
1102e1ebb9ecSlm66018 static int
1103e1ebb9ecSlm66018 vsw_get_physaddr(vsw_t *vswp)
1104e1ebb9ecSlm66018 {
1105e1ebb9ecSlm66018 	mac_handle_t	mh;
1106e1ebb9ecSlm66018 	char		drv[LIFNAMSIZ];
1107e1ebb9ecSlm66018 	uint_t		ddi_instance;
1108e1ebb9ecSlm66018 
1109e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1110e1ebb9ecSlm66018 
1111e1ebb9ecSlm66018 	if (ddi_parse(vswp->physname, drv, &ddi_instance) != DDI_SUCCESS)
1112e1ebb9ecSlm66018 		return (1);
1113e1ebb9ecSlm66018 
1114e1ebb9ecSlm66018 	if (mac_open(vswp->physname, ddi_instance, &mh) != 0) {
1115e1ebb9ecSlm66018 		cmn_err(CE_WARN, "!mac_open %s failed", vswp->physname);
1116e1ebb9ecSlm66018 		return (1);
1117e1ebb9ecSlm66018 	}
1118e1ebb9ecSlm66018 
1119e1ebb9ecSlm66018 	READ_ENTER(&vswp->if_lockrw);
1120e1ebb9ecSlm66018 	mac_unicst_get(mh, vswp->if_addr.ether_addr_octet);
1121e1ebb9ecSlm66018 	RW_EXIT(&vswp->if_lockrw);
1122e1ebb9ecSlm66018 
1123e1ebb9ecSlm66018 	mac_close(mh);
1124e1ebb9ecSlm66018 
1125e1ebb9ecSlm66018 	vswp->mdprops |= VSW_DEV_MACADDR;
1126e1ebb9ecSlm66018 
1127e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1128e1ebb9ecSlm66018 
1129e1ebb9ecSlm66018 	return (0);
1130e1ebb9ecSlm66018 }
1131e1ebb9ecSlm66018 
1132e1ebb9ecSlm66018 /*
1133e1ebb9ecSlm66018  * Check to see if the card supports the setting of multiple unicst
1134e1ebb9ecSlm66018  * addresses.
1135e1ebb9ecSlm66018  *
1136e1ebb9ecSlm66018  * Returns 0 if card supports the programming of multiple unicast addresses
1137e1ebb9ecSlm66018  * and there are free address slots available, otherwise returns 1.
1138e1ebb9ecSlm66018  */
1139e1ebb9ecSlm66018 static int
1140e1ebb9ecSlm66018 vsw_get_hw_maddr(vsw_t *vswp)
1141e1ebb9ecSlm66018 {
1142e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1143e1ebb9ecSlm66018 
1144e1ebb9ecSlm66018 	if (vswp->mh == NULL) {
1145e1ebb9ecSlm66018 		return (1);
1146e1ebb9ecSlm66018 	}
1147e1ebb9ecSlm66018 
1148e1ebb9ecSlm66018 	if (!mac_capab_get(vswp->mh, MAC_CAPAB_MULTIADDRESS, &vswp->maddr)) {
1149e1ebb9ecSlm66018 		DWARN(vswp, "Unable to get capabilities of"
1150e1ebb9ecSlm66018 			" underlying device (%s)", vswp->physname);
1151e1ebb9ecSlm66018 		return (1);
1152e1ebb9ecSlm66018 	}
1153e1ebb9ecSlm66018 
1154e1ebb9ecSlm66018 	if (vswp->maddr.maddr_naddrfree == 0) {
11557636cb21Slm66018 		cmn_err(CE_WARN,
11567636cb21Slm66018 			"!device %s has no free unicast address slots",
1157e1ebb9ecSlm66018 			vswp->physname);
1158e1ebb9ecSlm66018 		return (1);
1159e1ebb9ecSlm66018 	}
1160e1ebb9ecSlm66018 
1161e1ebb9ecSlm66018 	D2(vswp, "%s: %d addrs : %d free", __func__,
1162e1ebb9ecSlm66018 		vswp->maddr.maddr_naddr, vswp->maddr.maddr_naddrfree);
1163e1ebb9ecSlm66018 
1164e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1165e1ebb9ecSlm66018 
1166e1ebb9ecSlm66018 	return (0);
1167e1ebb9ecSlm66018 }
1168e1ebb9ecSlm66018 
1169e1ebb9ecSlm66018 /*
1170e1ebb9ecSlm66018  * Setup for layer 2 switching.
1171e1ebb9ecSlm66018  *
1172e1ebb9ecSlm66018  * Returns 0 on success, 1 on failure.
1173e1ebb9ecSlm66018  */
11741ae08745Sheppo static int
11751ae08745Sheppo vsw_setup_layer2(vsw_t *vswp)
11761ae08745Sheppo {
11771ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
11781ae08745Sheppo 
11791ae08745Sheppo 	vsw_switch_frame = vsw_switch_l2_frame;
11801ae08745Sheppo 
11811ae08745Sheppo 	/*
11821ae08745Sheppo 	 * Attempt to link into the MAC layer so we can get
11831ae08745Sheppo 	 * and send packets out over the physical adapter.
11841ae08745Sheppo 	 */
11851ae08745Sheppo 	if (vswp->mdprops & VSW_MD_PHYSNAME) {
11861ae08745Sheppo 		if (vsw_mac_attach(vswp) != 0) {
11871ae08745Sheppo 			/*
11881ae08745Sheppo 			 * Registration with the MAC layer has failed,
11891ae08745Sheppo 			 * so return 1 so that can fall back to next
11901ae08745Sheppo 			 * prefered switching method.
11911ae08745Sheppo 			 */
1192e1ebb9ecSlm66018 			cmn_err(CE_WARN, "!Unable to join as MAC layer "
1193e1ebb9ecSlm66018 				"client");
1194e1ebb9ecSlm66018 			return (1);
11951ae08745Sheppo 		}
1196e1ebb9ecSlm66018 
1197e1ebb9ecSlm66018 		if (vswp->smode[vswp->smode_idx] == VSW_LAYER2) {
1198e1ebb9ecSlm66018 			/*
1199e1ebb9ecSlm66018 			 * Verify that underlying device can support multiple
1200e1ebb9ecSlm66018 			 * unicast mac addresses, and has free capacity.
1201e1ebb9ecSlm66018 			 */
1202e1ebb9ecSlm66018 			if (vsw_get_hw_maddr(vswp) != 0) {
1203e1ebb9ecSlm66018 				cmn_err(CE_WARN, "!unable to setup switching");
1204e1ebb9ecSlm66018 				vsw_mac_detach(vswp);
1205e1ebb9ecSlm66018 				return (1);
1206e1ebb9ecSlm66018 			}
1207e1ebb9ecSlm66018 		}
1208e1ebb9ecSlm66018 
12091ae08745Sheppo 	} else {
1210e1ebb9ecSlm66018 		/*
1211e1ebb9ecSlm66018 		 * No physical device name found in MD which is
1212e1ebb9ecSlm66018 		 * required for layer 2.
1213e1ebb9ecSlm66018 		 */
1214e1ebb9ecSlm66018 		cmn_err(CE_WARN, "!no physical device name specified");
1215e1ebb9ecSlm66018 		return (1);
12161ae08745Sheppo 	}
12171ae08745Sheppo 
12181ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
12191ae08745Sheppo 
1220e1ebb9ecSlm66018 	return (0);
12211ae08745Sheppo }
12221ae08745Sheppo 
12231ae08745Sheppo static int
12241ae08745Sheppo vsw_setup_layer3(vsw_t *vswp)
12251ae08745Sheppo {
12261ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
12271ae08745Sheppo 
12281ae08745Sheppo 	D2(vswp, "%s: operating in layer 3 mode", __func__);
12291ae08745Sheppo 	vsw_switch_frame = vsw_switch_l3_frame;
12301ae08745Sheppo 
12311ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
12321ae08745Sheppo 
12331ae08745Sheppo 	return (0);
12341ae08745Sheppo }
12351ae08745Sheppo 
12361ae08745Sheppo /*
12371ae08745Sheppo  * Link into the MAC layer to gain access to the services provided by
12381ae08745Sheppo  * the underlying physical device driver (which should also have
12391ae08745Sheppo  * registered with the MAC layer).
12401ae08745Sheppo  *
12411ae08745Sheppo  * Only when in layer 2 mode.
12421ae08745Sheppo  */
12431ae08745Sheppo static int
12441ae08745Sheppo vsw_mac_attach(vsw_t *vswp)
12451ae08745Sheppo {
1246ba2e4443Sseb 	char	drv[LIFNAMSIZ];
1247ba2e4443Sseb 	uint_t	ddi_instance;
1248ba2e4443Sseb 
12497636cb21Slm66018 	D1(vswp, "%s: enter", __func__);
12501ae08745Sheppo 
12511ae08745Sheppo 	vswp->mh = NULL;
12521ae08745Sheppo 	vswp->mrh = NULL;
12537636cb21Slm66018 	vswp->mstarted = B_FALSE;
12547636cb21Slm66018 	vswp->mresources = B_FALSE;
12551ae08745Sheppo 
12561ae08745Sheppo 	ASSERT(vswp->mdprops & VSW_MD_PHYSNAME);
12571ae08745Sheppo 
1258ba2e4443Sseb 	if (ddi_parse(vswp->physname, drv, &ddi_instance) != DDI_SUCCESS) {
1259ba2e4443Sseb 		cmn_err(CE_WARN, "invalid device name: %s", vswp->physname);
1260ba2e4443Sseb 		goto mac_fail_exit;
1261ba2e4443Sseb 	}
1262ba2e4443Sseb 	if ((mac_open(vswp->physname, ddi_instance, &vswp->mh)) != 0) {
12631ae08745Sheppo 		cmn_err(CE_WARN, "mac_open %s failed", vswp->physname);
12641ae08745Sheppo 		goto mac_fail_exit;
12651ae08745Sheppo 	}
12661ae08745Sheppo 
12677636cb21Slm66018 	ASSERT(vswp->mh != NULL);
12687636cb21Slm66018 
12691ae08745Sheppo 	D2(vswp, "vsw_mac_attach: using device %s", vswp->physname);
12701ae08745Sheppo 
12717636cb21Slm66018 	if (vsw_multi_ring_enable) {
12727636cb21Slm66018 		vsw_mac_ring_tbl_init(vswp);
12731ae08745Sheppo 
12747636cb21Slm66018 		/*
12757636cb21Slm66018 		 * Register our receive callback.
12767636cb21Slm66018 		 */
12777636cb21Slm66018 		vswp->mrh = mac_rx_add(vswp->mh,
12787636cb21Slm66018 			vsw_rx_queue_cb, (void *)vswp);
12797636cb21Slm66018 
12807636cb21Slm66018 		/*
12817636cb21Slm66018 		 * Register our mac resource callback.
12827636cb21Slm66018 		 */
12837636cb21Slm66018 		mac_resource_set(vswp->mh, vsw_mac_ring_add_cb, (void *)vswp);
12847636cb21Slm66018 		vswp->mresources = B_TRUE;
12857636cb21Slm66018 
12867636cb21Slm66018 		/*
12877636cb21Slm66018 		 * Get the ring resources available to us from
12887636cb21Slm66018 		 * the mac below us.
12897636cb21Slm66018 		 */
12907636cb21Slm66018 		mac_resources(vswp->mh);
12917636cb21Slm66018 	} else {
12927636cb21Slm66018 		/*
12937636cb21Slm66018 		 * Just register our rx callback function
12947636cb21Slm66018 		 */
12957636cb21Slm66018 		vswp->mrh = mac_rx_add(vswp->mh, vsw_rx_cb, (void *)vswp);
12967636cb21Slm66018 	}
12977636cb21Slm66018 
12987636cb21Slm66018 	ASSERT(vswp->mrh != NULL);
12997636cb21Slm66018 
13007636cb21Slm66018 	/* Get the MAC tx fn */
13011ae08745Sheppo 	vswp->txinfo = mac_tx_get(vswp->mh);
13021ae08745Sheppo 
13031ae08745Sheppo 	/* start the interface */
13041ae08745Sheppo 	if (mac_start(vswp->mh) != 0) {
13051ae08745Sheppo 		cmn_err(CE_WARN, "could not start mac interface");
13061ae08745Sheppo 		goto mac_fail_exit;
13071ae08745Sheppo 	}
13081ae08745Sheppo 
13097636cb21Slm66018 	vswp->mstarted = B_TRUE;
13107636cb21Slm66018 
13117636cb21Slm66018 	D1(vswp, "%s: exit", __func__);
13121ae08745Sheppo 	return (0);
13131ae08745Sheppo 
13141ae08745Sheppo mac_fail_exit:
13157636cb21Slm66018 	vsw_mac_detach(vswp);
13161ae08745Sheppo 
13177636cb21Slm66018 	D1(vswp, "%s: exit", __func__);
13181ae08745Sheppo 	return (1);
13191ae08745Sheppo }
13201ae08745Sheppo 
13211ae08745Sheppo static void
13221ae08745Sheppo vsw_mac_detach(vsw_t *vswp)
13231ae08745Sheppo {
13241ae08745Sheppo 	D1(vswp, "vsw_mac_detach: enter");
13251ae08745Sheppo 
13267636cb21Slm66018 	ASSERT(vswp != NULL);
13277636cb21Slm66018 	ASSERT(vswp->mh != NULL);
13287636cb21Slm66018 
13297636cb21Slm66018 	if (vsw_multi_ring_enable) {
13307636cb21Slm66018 		vsw_mac_ring_tbl_destroy(vswp);
13317636cb21Slm66018 	}
13327636cb21Slm66018 
13337636cb21Slm66018 	if (vswp->mstarted)
13347636cb21Slm66018 		mac_stop(vswp->mh);
13351ae08745Sheppo 	if (vswp->mrh != NULL)
13361ae08745Sheppo 		mac_rx_remove(vswp->mh, vswp->mrh);
13377636cb21Slm66018 	if (vswp->mresources)
13387636cb21Slm66018 		mac_resource_set(vswp->mh, NULL, NULL);
13391ae08745Sheppo 	mac_close(vswp->mh);
13401ae08745Sheppo 
13411ae08745Sheppo 	vswp->mrh = NULL;
13421ae08745Sheppo 	vswp->mh = NULL;
13431ae08745Sheppo 	vswp->txinfo = NULL;
13447636cb21Slm66018 	vswp->mstarted = B_FALSE;
13451ae08745Sheppo 
13461ae08745Sheppo 	D1(vswp, "vsw_mac_detach: exit");
13471ae08745Sheppo }
13481ae08745Sheppo 
13491ae08745Sheppo /*
1350e1ebb9ecSlm66018  * Depending on the mode specified, the capabilites and capacity
1351e1ebb9ecSlm66018  * of the underlying device setup the physical device.
13521ae08745Sheppo  *
1353e1ebb9ecSlm66018  * If in layer 3 mode, then do nothing.
1354e1ebb9ecSlm66018  *
1355e1ebb9ecSlm66018  * If in layer 2 programmed mode attempt to program the unicast address
1356e1ebb9ecSlm66018  * associated with the port into the physical device. If this is not
1357e1ebb9ecSlm66018  * possible due to resource exhaustion or simply because the device does
1358e1ebb9ecSlm66018  * not support multiple unicast addresses then if required fallback onto
1359e1ebb9ecSlm66018  * putting the card into promisc mode.
1360e1ebb9ecSlm66018  *
1361e1ebb9ecSlm66018  * If in promisc mode then simply set the card into promisc mode.
1362e1ebb9ecSlm66018  *
1363e1ebb9ecSlm66018  * Returns 0 success, 1 on failure.
13641ae08745Sheppo  */
1365e1ebb9ecSlm66018 static int
1366e1ebb9ecSlm66018 vsw_set_hw(vsw_t *vswp, vsw_port_t *port)
13671ae08745Sheppo {
1368e1ebb9ecSlm66018 	mac_multi_addr_t	mac_addr;
1369e1ebb9ecSlm66018 	void			*mah;
1370e1ebb9ecSlm66018 	int			err;
13711ae08745Sheppo 
1372e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1373e1ebb9ecSlm66018 
1374e1ebb9ecSlm66018 	if (vswp->smode[vswp->smode_idx] == VSW_LAYER3)
1375e1ebb9ecSlm66018 		return (0);
1376e1ebb9ecSlm66018 
1377e1ebb9ecSlm66018 	if (vswp->smode[vswp->smode_idx] == VSW_LAYER2_PROMISC) {
1378e1ebb9ecSlm66018 		return (vsw_set_hw_promisc(vswp, port));
1379e1ebb9ecSlm66018 	}
1380e1ebb9ecSlm66018 
1381e1ebb9ecSlm66018 	if (vswp->maddr.maddr_handle == NULL)
1382e1ebb9ecSlm66018 		return (1);
1383e1ebb9ecSlm66018 
1384e1ebb9ecSlm66018 	mah = vswp->maddr.maddr_handle;
1385e1ebb9ecSlm66018 
1386e1ebb9ecSlm66018 	/*
1387e1ebb9ecSlm66018 	 * Attempt to program the unicast address into the HW.
1388e1ebb9ecSlm66018 	 */
1389e1ebb9ecSlm66018 	mac_addr.mma_addrlen = ETHERADDRL;
1390e1ebb9ecSlm66018 	ether_copy(&port->p_macaddr, &mac_addr.mma_addr);
1391e1ebb9ecSlm66018 
1392e1ebb9ecSlm66018 	err = vswp->maddr.maddr_add(mah, &mac_addr);
1393e1ebb9ecSlm66018 	if (err != 0) {
1394e1ebb9ecSlm66018 		cmn_err(CE_WARN, "!failed to program addr "
1395e1ebb9ecSlm66018 			"%x:%x:%x:%x:%x:%x for port %d into device %s "
1396e1ebb9ecSlm66018 			": err %d", port->p_macaddr.ether_addr_octet[0],
1397e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[1],
1398e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[2],
1399e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[3],
1400e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[4],
1401e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[5],
1402e1ebb9ecSlm66018 			port->p_instance, vswp->physname, err);
1403e1ebb9ecSlm66018 
1404e1ebb9ecSlm66018 		/*
1405e1ebb9ecSlm66018 		 * Mark that attempt should be made to re-config sometime
1406e1ebb9ecSlm66018 		 * in future if a port is deleted.
1407e1ebb9ecSlm66018 		 */
1408e1ebb9ecSlm66018 		vswp->recfg_reqd = B_TRUE;
1409e1ebb9ecSlm66018 
1410e1ebb9ecSlm66018 		/*
1411e1ebb9ecSlm66018 		 * Only 1 mode specified, nothing more to do.
1412e1ebb9ecSlm66018 		 */
1413e1ebb9ecSlm66018 		if (vswp->smode_num == 1)
1414e1ebb9ecSlm66018 			return (err);
1415e1ebb9ecSlm66018 
1416e1ebb9ecSlm66018 		/*
1417e1ebb9ecSlm66018 		 * If promiscuous was next mode specified try to
1418e1ebb9ecSlm66018 		 * set the card into that mode.
1419e1ebb9ecSlm66018 		 */
1420e1ebb9ecSlm66018 		if ((vswp->smode_idx <= (vswp->smode_num - 2)) &&
1421e1ebb9ecSlm66018 			(vswp->smode[vswp->smode_idx + 1]
1422e1ebb9ecSlm66018 					== VSW_LAYER2_PROMISC)) {
1423e1ebb9ecSlm66018 			vswp->smode_idx += 1;
1424e1ebb9ecSlm66018 			return (vsw_set_hw_promisc(vswp, port));
1425e1ebb9ecSlm66018 		}
1426e1ebb9ecSlm66018 		return (err);
1427e1ebb9ecSlm66018 	}
1428e1ebb9ecSlm66018 
1429e1ebb9ecSlm66018 	port->addr_slot = mac_addr.mma_slot;
1430e1ebb9ecSlm66018 	port->addr_set = VSW_ADDR_HW;
1431e1ebb9ecSlm66018 
1432e1ebb9ecSlm66018 	D2(vswp, "programmed addr %x:%x:%x:%x:%x:%x for port %d "
1433e1ebb9ecSlm66018 		"into slot %d of device %s",
1434e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[0],
1435e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[1],
1436e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[2],
1437e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[3],
1438e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[4],
1439e1ebb9ecSlm66018 		port->p_macaddr.ether_addr_octet[5],
1440e1ebb9ecSlm66018 		port->p_instance, port->addr_slot, vswp->physname);
1441e1ebb9ecSlm66018 
1442e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1443e1ebb9ecSlm66018 
1444e1ebb9ecSlm66018 	return (0);
1445e1ebb9ecSlm66018 }
1446e1ebb9ecSlm66018 
1447e1ebb9ecSlm66018 /*
1448e1ebb9ecSlm66018  * If in layer 3 mode do nothing.
1449e1ebb9ecSlm66018  *
1450e1ebb9ecSlm66018  * If in layer 2 switched mode remove the address from the physical
1451e1ebb9ecSlm66018  * device.
1452e1ebb9ecSlm66018  *
1453e1ebb9ecSlm66018  * If in layer 2 promiscuous mode disable promisc mode.
1454e1ebb9ecSlm66018  *
1455e1ebb9ecSlm66018  * Returns 0 on success.
1456e1ebb9ecSlm66018  */
1457e1ebb9ecSlm66018 static int
1458e1ebb9ecSlm66018 vsw_unset_hw(vsw_t *vswp, vsw_port_t *port)
1459e1ebb9ecSlm66018 {
1460e1ebb9ecSlm66018 	int		err;
1461e1ebb9ecSlm66018 	void		*mah;
1462e1ebb9ecSlm66018 
1463e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1464e1ebb9ecSlm66018 
1465e1ebb9ecSlm66018 	if (vswp->smode[vswp->smode_idx] == VSW_LAYER3)
1466e1ebb9ecSlm66018 		return (0);
1467e1ebb9ecSlm66018 
1468e1ebb9ecSlm66018 	if (port->addr_set == VSW_ADDR_PROMISC) {
1469e1ebb9ecSlm66018 		return (vsw_unset_hw_promisc(vswp, port));
1470e1ebb9ecSlm66018 	}
1471e1ebb9ecSlm66018 
1472e1ebb9ecSlm66018 	if (port->addr_set == VSW_ADDR_HW) {
1473e1ebb9ecSlm66018 		if (vswp->mh == NULL)
1474e1ebb9ecSlm66018 			return (1);
1475e1ebb9ecSlm66018 
1476e1ebb9ecSlm66018 		if (vswp->maddr.maddr_handle == NULL)
1477e1ebb9ecSlm66018 			return (1);
1478e1ebb9ecSlm66018 
1479e1ebb9ecSlm66018 		mah = vswp->maddr.maddr_handle;
1480e1ebb9ecSlm66018 
1481e1ebb9ecSlm66018 		err = vswp->maddr.maddr_remove(mah, port->addr_slot);
1482e1ebb9ecSlm66018 		if (err != 0) {
1483e1ebb9ecSlm66018 			cmn_err(CE_WARN, "!Unable to remove addr "
1484e1ebb9ecSlm66018 				"%x:%x:%x:%x:%x:%x for port %d from device %s"
1485e1ebb9ecSlm66018 				" : (err %d)",
1486e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[0],
1487e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[1],
1488e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[2],
1489e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[3],
1490e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[4],
1491e1ebb9ecSlm66018 				port->p_macaddr.ether_addr_octet[5],
1492e1ebb9ecSlm66018 				port->p_instance, vswp->physname, err);
1493e1ebb9ecSlm66018 			return (err);
1494e1ebb9ecSlm66018 		}
1495e1ebb9ecSlm66018 
1496e1ebb9ecSlm66018 		port->addr_set = VSW_ADDR_UNSET;
1497e1ebb9ecSlm66018 
1498e1ebb9ecSlm66018 		D2(vswp, "removed addr %x:%x:%x:%x:%x:%x for "
1499e1ebb9ecSlm66018 			"port %d from device %s",
1500e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[0],
1501e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[1],
1502e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[2],
1503e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[3],
1504e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[4],
1505e1ebb9ecSlm66018 			port->p_macaddr.ether_addr_octet[5],
1506e1ebb9ecSlm66018 			port->p_instance, vswp->physname);
1507e1ebb9ecSlm66018 	}
1508e1ebb9ecSlm66018 
1509e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1510e1ebb9ecSlm66018 	return (0);
1511e1ebb9ecSlm66018 }
1512e1ebb9ecSlm66018 
1513e1ebb9ecSlm66018 /*
1514e1ebb9ecSlm66018  * Set network card into promisc mode.
1515e1ebb9ecSlm66018  *
1516e1ebb9ecSlm66018  * Returns 0 on success, 1 on failure.
1517e1ebb9ecSlm66018  */
1518e1ebb9ecSlm66018 static int
1519e1ebb9ecSlm66018 vsw_set_hw_promisc(vsw_t *vswp, vsw_port_t *port)
1520e1ebb9ecSlm66018 {
1521e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1522e1ebb9ecSlm66018 
1523e1ebb9ecSlm66018 	if (vswp->mh == NULL)
1524e1ebb9ecSlm66018 		return (1);
1525e1ebb9ecSlm66018 
1526e1ebb9ecSlm66018 	if (vswp->promisc_cnt++ == 0) {
1527e1ebb9ecSlm66018 		if (mac_promisc_set(vswp->mh, B_TRUE, MAC_DEVPROMISC) != 0) {
1528e1ebb9ecSlm66018 			vswp->promisc_cnt--;
1529e1ebb9ecSlm66018 			return (1);
1530e1ebb9ecSlm66018 		}
1531e1ebb9ecSlm66018 		cmn_err(CE_NOTE, "!switching device %s into promiscuous mode",
1532e1ebb9ecSlm66018 				vswp->physname);
1533e1ebb9ecSlm66018 	}
1534e1ebb9ecSlm66018 	port->addr_set = VSW_ADDR_PROMISC;
1535e1ebb9ecSlm66018 
1536e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1537e1ebb9ecSlm66018 
1538e1ebb9ecSlm66018 	return (0);
1539e1ebb9ecSlm66018 }
1540e1ebb9ecSlm66018 
1541e1ebb9ecSlm66018 /*
1542e1ebb9ecSlm66018  * Turn off promiscuous mode on network card.
1543e1ebb9ecSlm66018  *
1544e1ebb9ecSlm66018  * Returns 0 on success, 1 on failure.
1545e1ebb9ecSlm66018  */
1546e1ebb9ecSlm66018 static int
1547e1ebb9ecSlm66018 vsw_unset_hw_promisc(vsw_t *vswp, vsw_port_t *port)
1548e1ebb9ecSlm66018 {
1549e1ebb9ecSlm66018 	vsw_port_list_t 	*plist = &vswp->plist;
1550e1ebb9ecSlm66018 
1551e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1552e1ebb9ecSlm66018 
1553e1ebb9ecSlm66018 	if (vswp->mh == NULL)
1554e1ebb9ecSlm66018 		return (1);
1555e1ebb9ecSlm66018 
1556e1ebb9ecSlm66018 	ASSERT(port->addr_set == VSW_ADDR_PROMISC);
1557e1ebb9ecSlm66018 
1558e1ebb9ecSlm66018 	if (--vswp->promisc_cnt == 0) {
1559e1ebb9ecSlm66018 		if (mac_promisc_set(vswp->mh, B_FALSE, MAC_DEVPROMISC) != 0) {
1560e1ebb9ecSlm66018 			vswp->promisc_cnt++;
1561e1ebb9ecSlm66018 			return (1);
1562e1ebb9ecSlm66018 		}
1563e1ebb9ecSlm66018 
1564e1ebb9ecSlm66018 		/*
1565e1ebb9ecSlm66018 		 * We are exiting promisc mode either because we were
1566e1ebb9ecSlm66018 		 * only in promisc mode because we had failed over from
1567e1ebb9ecSlm66018 		 * switched mode due to HW resource issues, or the user
1568e1ebb9ecSlm66018 		 * wanted the card in promisc mode for all the ports and
1569e1ebb9ecSlm66018 		 * the last port is now being deleted. Tweak the message
1570e1ebb9ecSlm66018 		 * accordingly.
1571e1ebb9ecSlm66018 		 */
1572e1ebb9ecSlm66018 		if (plist->num_ports != 0) {
1573e1ebb9ecSlm66018 			cmn_err(CE_NOTE, "!switching device %s back to "
1574e1ebb9ecSlm66018 				"programmed mode", vswp->physname);
15751ae08745Sheppo 		} else {
1576e1ebb9ecSlm66018 			cmn_err(CE_NOTE, "!switching device %s out of "
1577e1ebb9ecSlm66018 				"promiscuous mode", vswp->physname);
15781ae08745Sheppo 		}
15791ae08745Sheppo 	}
1580e1ebb9ecSlm66018 	port->addr_set = VSW_ADDR_UNSET;
1581e1ebb9ecSlm66018 
1582e1ebb9ecSlm66018 	D1(vswp, "%s: exit", __func__);
1583e1ebb9ecSlm66018 	return (0);
1584e1ebb9ecSlm66018 }
1585e1ebb9ecSlm66018 
1586e1ebb9ecSlm66018 /*
1587e1ebb9ecSlm66018  * Determine whether or not we are operating in our prefered
1588e1ebb9ecSlm66018  * mode and if not whether the physical resources now allow us
1589e1ebb9ecSlm66018  * to operate in it.
1590e1ebb9ecSlm66018  *
1591e1ebb9ecSlm66018  * Should only be invoked after port which is being deleted has been
1592e1ebb9ecSlm66018  * removed from the port list.
1593e1ebb9ecSlm66018  */
1594e1ebb9ecSlm66018 static int
1595e1ebb9ecSlm66018 vsw_reconfig_hw(vsw_t *vswp)
1596e1ebb9ecSlm66018 {
1597e1ebb9ecSlm66018 	vsw_port_list_t 	*plist = &vswp->plist;
1598e1ebb9ecSlm66018 	mac_multi_addr_t	mac_addr;
1599e1ebb9ecSlm66018 	vsw_port_t		*tp;
1600e1ebb9ecSlm66018 	void			*mah;
1601e1ebb9ecSlm66018 	int			rv = 0;
1602e1ebb9ecSlm66018 	int			s_idx;
1603e1ebb9ecSlm66018 
1604e1ebb9ecSlm66018 	D1(vswp, "%s: enter", __func__);
1605e1ebb9ecSlm66018 
1606e1ebb9ecSlm66018 	if (vswp->maddr.maddr_handle == NULL)
1607e1ebb9ecSlm66018 		return (1);
1608e1ebb9ecSlm66018 
1609e1ebb9ecSlm66018 	/*
1610e1ebb9ecSlm66018 	 * Check if there are now sufficient HW resources to
1611e1ebb9ecSlm66018 	 * attempt a re-config.
1612e1ebb9ecSlm66018 	 */
1613e1ebb9ecSlm66018 	if (plist->num_ports > vswp->maddr.maddr_naddrfree)
1614e1ebb9ecSlm66018 		return (1);
1615e1ebb9ecSlm66018 
1616e1ebb9ecSlm66018 	/*
1617e1ebb9ecSlm66018 	 * If we are in layer 2 (i.e. switched) or would like to be
1618e1ebb9ecSlm66018 	 * in layer 2 then check if any ports need to be programmed
1619e1ebb9ecSlm66018 	 * into the HW.
1620e1ebb9ecSlm66018 	 *
1621e1ebb9ecSlm66018 	 * This can happen in two cases - switched was specified as
1622e1ebb9ecSlm66018 	 * the prefered mode of operation but we exhausted the HW
1623e1ebb9ecSlm66018 	 * resources and so failed over to the next specifed mode,
1624e1ebb9ecSlm66018 	 * or switched was the only mode specified so after HW
1625e1ebb9ecSlm66018 	 * resources were exhausted there was nothing more we
1626e1ebb9ecSlm66018 	 * could do.
1627e1ebb9ecSlm66018 	 */
1628e1ebb9ecSlm66018 	if (vswp->smode_idx > 0)
1629e1ebb9ecSlm66018 		s_idx = vswp->smode_idx - 1;
1630e1ebb9ecSlm66018 	else
1631e1ebb9ecSlm66018 		s_idx = vswp->smode_idx;
1632e1ebb9ecSlm66018 
1633e1ebb9ecSlm66018 	if (vswp->smode[s_idx] == VSW_LAYER2) {
1634e1ebb9ecSlm66018 		mah = vswp->maddr.maddr_handle;
1635e1ebb9ecSlm66018 
1636e1ebb9ecSlm66018 		D2(vswp, "%s: attempting reconfig..", __func__);
1637e1ebb9ecSlm66018 
1638e1ebb9ecSlm66018 		/*
1639e1ebb9ecSlm66018 		 * Scan the port list for any port whose address has not
1640e1ebb9ecSlm66018 		 * be programmed in HW - there should be a max of one.
1641e1ebb9ecSlm66018 		 */
1642e1ebb9ecSlm66018 		for (tp = plist->head; tp != NULL; tp = tp->p_next) {
1643e1ebb9ecSlm66018 			if (tp->addr_set != VSW_ADDR_HW) {
1644e1ebb9ecSlm66018 				mac_addr.mma_addrlen = ETHERADDRL;
1645e1ebb9ecSlm66018 				ether_copy(&tp->p_macaddr, &mac_addr.mma_addr);
1646e1ebb9ecSlm66018 
1647e1ebb9ecSlm66018 				rv = vswp->maddr.maddr_add(mah, &mac_addr);
1648e1ebb9ecSlm66018 				if (rv != 0) {
1649e1ebb9ecSlm66018 					DWARN(vswp, "Error setting addr in "
1650e1ebb9ecSlm66018 						"HW for port %d err %d",
1651e1ebb9ecSlm66018 						tp->p_instance, rv);
1652e1ebb9ecSlm66018 					goto reconfig_err_exit;
1653e1ebb9ecSlm66018 				}
1654e1ebb9ecSlm66018 				tp->addr_slot = mac_addr.mma_slot;
1655e1ebb9ecSlm66018 
1656e1ebb9ecSlm66018 				D2(vswp, "re-programmed port %d "
1657e1ebb9ecSlm66018 					"addr %x:%x:%x:%x:%x:%x into slot %d"
1658e1ebb9ecSlm66018 					" of device %s", tp->p_instance,
1659e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[0],
1660e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[1],
1661e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[2],
1662e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[3],
1663e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[4],
1664e1ebb9ecSlm66018 					tp->p_macaddr.ether_addr_octet[5],
1665e1ebb9ecSlm66018 					tp->addr_slot, vswp->physname);
1666e1ebb9ecSlm66018 
1667e1ebb9ecSlm66018 				/*
1668e1ebb9ecSlm66018 				 * If up to now we had to put the card into
1669e1ebb9ecSlm66018 				 * promisc mode to see this address, we
1670e1ebb9ecSlm66018 				 * can now safely disable promisc mode.
1671e1ebb9ecSlm66018 				 */
1672e1ebb9ecSlm66018 				if (tp->addr_set == VSW_ADDR_PROMISC)
1673e1ebb9ecSlm66018 					(void) vsw_unset_hw_promisc(vswp, tp);
1674e1ebb9ecSlm66018 
1675e1ebb9ecSlm66018 				tp->addr_set = VSW_ADDR_HW;
1676e1ebb9ecSlm66018 			}
1677e1ebb9ecSlm66018 		}
1678e1ebb9ecSlm66018 
1679e1ebb9ecSlm66018 		/* no further re-config needed */
1680e1ebb9ecSlm66018 		vswp->recfg_reqd = B_FALSE;
1681e1ebb9ecSlm66018 
1682e1ebb9ecSlm66018 		vswp->smode_idx = s_idx;
1683e1ebb9ecSlm66018 
1684e1ebb9ecSlm66018 		return (0);
1685e1ebb9ecSlm66018 	}
1686e1ebb9ecSlm66018 
1687e1ebb9ecSlm66018 reconfig_err_exit:
1688e1ebb9ecSlm66018 	return (rv);
16891ae08745Sheppo }
16901ae08745Sheppo 
16917636cb21Slm66018 static void
16927636cb21Slm66018 vsw_mac_ring_tbl_entry_init(vsw_t *vswp, vsw_mac_ring_t *ringp)
16937636cb21Slm66018 {
16947636cb21Slm66018 	ringp->ring_state = VSW_MAC_RING_FREE;
16957636cb21Slm66018 	ringp->ring_arg = NULL;
16967636cb21Slm66018 	ringp->ring_blank = NULL;
16977636cb21Slm66018 	ringp->ring_vqp = NULL;
16987636cb21Slm66018 	ringp->ring_vswp = vswp;
16997636cb21Slm66018 }
17007636cb21Slm66018 
17017636cb21Slm66018 static void
17027636cb21Slm66018 vsw_mac_ring_tbl_init(vsw_t *vswp)
17037636cb21Slm66018 {
17047636cb21Slm66018 	int		i;
17057636cb21Slm66018 
17067636cb21Slm66018 	mutex_init(&vswp->mac_ring_lock, NULL, MUTEX_DRIVER, NULL);
17077636cb21Slm66018 
17087636cb21Slm66018 	vswp->mac_ring_tbl_sz = vsw_mac_rx_rings;
17097636cb21Slm66018 	vswp->mac_ring_tbl  =
17107636cb21Slm66018 		kmem_alloc(vsw_mac_rx_rings * sizeof (vsw_mac_ring_t),
17117636cb21Slm66018 		KM_SLEEP);
17127636cb21Slm66018 
17137636cb21Slm66018 	for (i = 0; i < vswp->mac_ring_tbl_sz; i++)
17147636cb21Slm66018 		vsw_mac_ring_tbl_entry_init(vswp, &vswp->mac_ring_tbl[i]);
17157636cb21Slm66018 }
17167636cb21Slm66018 
17177636cb21Slm66018 static void
17187636cb21Slm66018 vsw_mac_ring_tbl_destroy(vsw_t *vswp)
17197636cb21Slm66018 {
17207636cb21Slm66018 	int	i;
17217636cb21Slm66018 
17227636cb21Slm66018 	mutex_enter(&vswp->mac_ring_lock);
17237636cb21Slm66018 	for (i = 0; i < vswp->mac_ring_tbl_sz; i++) {
17247636cb21Slm66018 		if (vswp->mac_ring_tbl[i].ring_state != VSW_MAC_RING_FREE) {
17257636cb21Slm66018 			/*
17267636cb21Slm66018 			 * Destroy the queue.
17277636cb21Slm66018 			 */
17287636cb21Slm66018 			vsw_queue_stop(vswp->mac_ring_tbl[i].ring_vqp);
17297636cb21Slm66018 			vsw_queue_destroy(vswp->mac_ring_tbl[i].ring_vqp);
17307636cb21Slm66018 
17317636cb21Slm66018 			/*
17327636cb21Slm66018 			 * Re-initialize the structure.
17337636cb21Slm66018 			 */
17347636cb21Slm66018 			vsw_mac_ring_tbl_entry_init(vswp,
17357636cb21Slm66018 				&vswp->mac_ring_tbl[i]);
17367636cb21Slm66018 		}
17377636cb21Slm66018 	}
17387636cb21Slm66018 	mutex_exit(&vswp->mac_ring_lock);
17397636cb21Slm66018 
17407636cb21Slm66018 	mutex_destroy(&vswp->mac_ring_lock);
17417636cb21Slm66018 	kmem_free(vswp->mac_ring_tbl,
17427636cb21Slm66018 		vswp->mac_ring_tbl_sz * sizeof (vsw_mac_ring_t));
17437636cb21Slm66018 	vswp->mac_ring_tbl_sz = 0;
17447636cb21Slm66018 }
17457636cb21Slm66018 
17467636cb21Slm66018 /*
17477636cb21Slm66018  * Handle resource add callbacks from the driver below.
17487636cb21Slm66018  */
17497636cb21Slm66018 static mac_resource_handle_t
17507636cb21Slm66018 vsw_mac_ring_add_cb(void *arg, mac_resource_t *mrp)
17517636cb21Slm66018 {
17527636cb21Slm66018 	vsw_t		*vswp = (vsw_t *)arg;
17537636cb21Slm66018 	mac_rx_fifo_t	*mrfp = (mac_rx_fifo_t *)mrp;
17547636cb21Slm66018 	vsw_mac_ring_t	*ringp;
17557636cb21Slm66018 	vsw_queue_t	*vqp;
17567636cb21Slm66018 	int		i;
17577636cb21Slm66018 
17587636cb21Slm66018 	ASSERT(vswp != NULL);
17597636cb21Slm66018 	ASSERT(mrp != NULL);
17607636cb21Slm66018 	ASSERT(vswp->mac_ring_tbl != NULL);
17617636cb21Slm66018 
17627636cb21Slm66018 	D1(vswp, "%s: enter", __func__);
17637636cb21Slm66018 
17647636cb21Slm66018 	/*
17657636cb21Slm66018 	 * Check to make sure we have the correct resource type.
17667636cb21Slm66018 	 */
17677636cb21Slm66018 	if (mrp->mr_type != MAC_RX_FIFO)
17687636cb21Slm66018 		return (NULL);
17697636cb21Slm66018 
17707636cb21Slm66018 	/*
17717636cb21Slm66018 	 * Find a open entry in the ring table.
17727636cb21Slm66018 	 */
17737636cb21Slm66018 	mutex_enter(&vswp->mac_ring_lock);
17747636cb21Slm66018 	for (i = 0; i < vswp->mac_ring_tbl_sz; i++) {
17757636cb21Slm66018 		ringp = &vswp->mac_ring_tbl[i];
17767636cb21Slm66018 
17777636cb21Slm66018 		/*
17787636cb21Slm66018 		 * Check for an empty slot, if found, then setup queue
17797636cb21Slm66018 		 * and thread.
17807636cb21Slm66018 		 */
17817636cb21Slm66018 		if (ringp->ring_state == VSW_MAC_RING_FREE) {
17827636cb21Slm66018 			/*
17837636cb21Slm66018 			 * Create the queue for this ring.
17847636cb21Slm66018 			 */
17857636cb21Slm66018 			vqp = vsw_queue_create();
17867636cb21Slm66018 
17877636cb21Slm66018 			/*
17887636cb21Slm66018 			 * Initialize the ring data structure.
17897636cb21Slm66018 			 */
17907636cb21Slm66018 			ringp->ring_vqp = vqp;
17917636cb21Slm66018 			ringp->ring_arg = mrfp->mrf_arg;
17927636cb21Slm66018 			ringp->ring_blank = mrfp->mrf_blank;
17937636cb21Slm66018 			ringp->ring_state = VSW_MAC_RING_INUSE;
17947636cb21Slm66018 
17957636cb21Slm66018 			/*
17967636cb21Slm66018 			 * Create the worker thread.
17977636cb21Slm66018 			 */
17987636cb21Slm66018 			vqp->vq_worker = thread_create(NULL, 0,
17997636cb21Slm66018 				vsw_queue_worker, ringp, 0, &p0,
18007636cb21Slm66018 				TS_RUN, minclsyspri);
18017636cb21Slm66018 			if (vqp->vq_worker == NULL) {
18027636cb21Slm66018 				vsw_queue_destroy(vqp);
18037636cb21Slm66018 				vsw_mac_ring_tbl_entry_init(vswp, ringp);
18047636cb21Slm66018 				ringp = NULL;
18057636cb21Slm66018 			}
18067636cb21Slm66018 
18077636cb21Slm66018 			mutex_exit(&vswp->mac_ring_lock);
18087636cb21Slm66018 			D1(vswp, "%s: exit", __func__);
18097636cb21Slm66018 			return ((mac_resource_handle_t)ringp);
18107636cb21Slm66018 		}
18117636cb21Slm66018 	}
18127636cb21Slm66018 	mutex_exit(&vswp->mac_ring_lock);
18137636cb21Slm66018 
18147636cb21Slm66018 	/*
18157636cb21Slm66018 	 * No slots in the ring table available.
18167636cb21Slm66018 	 */
18177636cb21Slm66018 	D1(vswp, "%s: exit", __func__);
18187636cb21Slm66018 	return (NULL);
18197636cb21Slm66018 }
18207636cb21Slm66018 
18217636cb21Slm66018 static void
18227636cb21Slm66018 vsw_queue_stop(vsw_queue_t *vqp)
18237636cb21Slm66018 {
18247636cb21Slm66018 	mutex_enter(&vqp->vq_lock);
18257636cb21Slm66018 
18267636cb21Slm66018 	if (vqp->vq_state == VSW_QUEUE_RUNNING) {
18277636cb21Slm66018 		vqp->vq_state = VSW_QUEUE_STOP;
18287636cb21Slm66018 		cv_signal(&vqp->vq_cv);
18297636cb21Slm66018 
18307636cb21Slm66018 		while (vqp->vq_state != VSW_QUEUE_DRAINED)
18317636cb21Slm66018 			cv_wait(&vqp->vq_cv, &vqp->vq_lock);
18327636cb21Slm66018 	}
18337636cb21Slm66018 
18347636cb21Slm66018 	mutex_exit(&vqp->vq_lock);
18357636cb21Slm66018 }
18367636cb21Slm66018 
18377636cb21Slm66018 static vsw_queue_t *
18387636cb21Slm66018 vsw_queue_create()
18397636cb21Slm66018 {
18407636cb21Slm66018 	vsw_queue_t *vqp;
18417636cb21Slm66018 
18427636cb21Slm66018 	vqp = kmem_zalloc(sizeof (vsw_queue_t), KM_SLEEP);
18437636cb21Slm66018 
18447636cb21Slm66018 	mutex_init(&vqp->vq_lock, NULL, MUTEX_DRIVER, NULL);
18457636cb21Slm66018 	cv_init(&vqp->vq_cv, NULL, CV_DRIVER, NULL);
18467636cb21Slm66018 	vqp->vq_first = NULL;
18477636cb21Slm66018 	vqp->vq_last = NULL;
18487636cb21Slm66018 	vqp->vq_state = VSW_QUEUE_STOP;
18497636cb21Slm66018 
18507636cb21Slm66018 	return (vqp);
18517636cb21Slm66018 }
18527636cb21Slm66018 
18537636cb21Slm66018 static void
18547636cb21Slm66018 vsw_queue_destroy(vsw_queue_t *vqp)
18557636cb21Slm66018 {
18567636cb21Slm66018 	cv_destroy(&vqp->vq_cv);
18577636cb21Slm66018 	mutex_destroy(&vqp->vq_lock);
18587636cb21Slm66018 	kmem_free(vqp, sizeof (vsw_queue_t));
18597636cb21Slm66018 }
18607636cb21Slm66018 
18617636cb21Slm66018 static void
18627636cb21Slm66018 vsw_queue_worker(vsw_mac_ring_t *rrp)
18637636cb21Slm66018 {
18647636cb21Slm66018 	mblk_t		*mp;
18657636cb21Slm66018 	vsw_queue_t	*vqp = rrp->ring_vqp;
18667636cb21Slm66018 	vsw_t		*vswp = rrp->ring_vswp;
18677636cb21Slm66018 
18687636cb21Slm66018 	mutex_enter(&vqp->vq_lock);
18697636cb21Slm66018 
18707636cb21Slm66018 	ASSERT(vqp->vq_state == VSW_QUEUE_STOP);
18717636cb21Slm66018 
18727636cb21Slm66018 	/*
18737636cb21Slm66018 	 * Set the state to running, since the thread is now active.
18747636cb21Slm66018 	 */
18757636cb21Slm66018 	vqp->vq_state = VSW_QUEUE_RUNNING;
18767636cb21Slm66018 
18777636cb21Slm66018 	while (vqp->vq_state == VSW_QUEUE_RUNNING) {
18787636cb21Slm66018 		/*
18797636cb21Slm66018 		 * Wait for work to do or the state has changed
18807636cb21Slm66018 		 * to not running.
18817636cb21Slm66018 		 */
18827636cb21Slm66018 		while ((vqp->vq_state == VSW_QUEUE_RUNNING) &&
18837636cb21Slm66018 				(vqp->vq_first == NULL)) {
18847636cb21Slm66018 			cv_wait(&vqp->vq_cv, &vqp->vq_lock);
18857636cb21Slm66018 		}
18867636cb21Slm66018 
18877636cb21Slm66018 		/*
18887636cb21Slm66018 		 * Process packets that we received from the interface.
18897636cb21Slm66018 		 */
18907636cb21Slm66018 		if (vqp->vq_first != NULL) {
18917636cb21Slm66018 			mp = vqp->vq_first;
18927636cb21Slm66018 
18937636cb21Slm66018 			vqp->vq_first = NULL;
18947636cb21Slm66018 			vqp->vq_last = NULL;
18957636cb21Slm66018 
18967636cb21Slm66018 			mutex_exit(&vqp->vq_lock);
18977636cb21Slm66018 
18987636cb21Slm66018 			/* switch the chain of packets received */
18997636cb21Slm66018 			vsw_switch_frame(vswp, mp, VSW_PHYSDEV, NULL, NULL);
19007636cb21Slm66018 
19017636cb21Slm66018 			mutex_enter(&vqp->vq_lock);
19027636cb21Slm66018 		}
19037636cb21Slm66018 	}
19047636cb21Slm66018 
19057636cb21Slm66018 	/*
19067636cb21Slm66018 	 * We are drained and signal we are done.
19077636cb21Slm66018 	 */
19087636cb21Slm66018 	vqp->vq_state = VSW_QUEUE_DRAINED;
19097636cb21Slm66018 	cv_signal(&vqp->vq_cv);
19107636cb21Slm66018 
19117636cb21Slm66018 	/*
19127636cb21Slm66018 	 * Exit lock and drain the remaining packets.
19137636cb21Slm66018 	 */
19147636cb21Slm66018 	mutex_exit(&vqp->vq_lock);
19157636cb21Slm66018 
19167636cb21Slm66018 	/*
19177636cb21Slm66018 	 * Exit the thread
19187636cb21Slm66018 	 */
19197636cb21Slm66018 	thread_exit();
19207636cb21Slm66018 }
19217636cb21Slm66018 
19227636cb21Slm66018 /*
19237636cb21Slm66018  * static void
19247636cb21Slm66018  * vsw_rx_queue_cb() - Receive callback routine when
19257636cb21Slm66018  *	vsw_multi_ring_enable is non-zero.  Queue the packets
19267636cb21Slm66018  *	to a packet queue for a worker thread to process.
19277636cb21Slm66018  */
19287636cb21Slm66018 static void
19297636cb21Slm66018 vsw_rx_queue_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp)
19307636cb21Slm66018 {
19317636cb21Slm66018 	vsw_mac_ring_t	*ringp = (vsw_mac_ring_t *)mrh;
19327636cb21Slm66018 	vsw_t		*vswp = (vsw_t *)arg;
19337636cb21Slm66018 	vsw_queue_t	*vqp;
19347636cb21Slm66018 	mblk_t		*bp, *last;
19357636cb21Slm66018 
19367636cb21Slm66018 	ASSERT(mrh != NULL);
19377636cb21Slm66018 	ASSERT(vswp != NULL);
19387636cb21Slm66018 	ASSERT(mp != NULL);
19397636cb21Slm66018 
19407636cb21Slm66018 	D1(vswp, "%s: enter", __func__);
19417636cb21Slm66018 
19427636cb21Slm66018 	/*
19437636cb21Slm66018 	 * Find the last element in the mblk chain.
19447636cb21Slm66018 	 */
19457636cb21Slm66018 	bp = mp;
19467636cb21Slm66018 	do {
19477636cb21Slm66018 		last = bp;
19487636cb21Slm66018 		bp = bp->b_next;
19497636cb21Slm66018 	} while (bp != NULL);
19507636cb21Slm66018 
19517636cb21Slm66018 	/* Get the queue for the packets */
19527636cb21Slm66018 	vqp = ringp->ring_vqp;
19537636cb21Slm66018 
19547636cb21Slm66018 	/*
19557636cb21Slm66018 	 * Grab the lock such we can queue the packets.
19567636cb21Slm66018 	 */
19577636cb21Slm66018 	mutex_enter(&vqp->vq_lock);
19587636cb21Slm66018 
19597636cb21Slm66018 	if (vqp->vq_state != VSW_QUEUE_RUNNING) {
19607636cb21Slm66018 		freemsg(mp);
19617636cb21Slm66018 		goto vsw_rx_queue_cb_exit;
19627636cb21Slm66018 	}
19637636cb21Slm66018 
19647636cb21Slm66018 	/*
19657636cb21Slm66018 	 * Add the mblk chain to the queue.  If there
19667636cb21Slm66018 	 * is some mblks in the queue, then add the new
19677636cb21Slm66018 	 * chain to the end.
19687636cb21Slm66018 	 */
19697636cb21Slm66018 	if (vqp->vq_first == NULL)
19707636cb21Slm66018 		vqp->vq_first = mp;
19717636cb21Slm66018 	else
19727636cb21Slm66018 		vqp->vq_last->b_next = mp;
19737636cb21Slm66018 
19747636cb21Slm66018 	vqp->vq_last = last;
19757636cb21Slm66018 
19767636cb21Slm66018 	/*
19777636cb21Slm66018 	 * Signal the worker thread that there is work to
19787636cb21Slm66018 	 * do.
19797636cb21Slm66018 	 */
19807636cb21Slm66018 	cv_signal(&vqp->vq_cv);
19817636cb21Slm66018 
19827636cb21Slm66018 	/*
19837636cb21Slm66018 	 * Let go of the lock and exit.
19847636cb21Slm66018 	 */
19857636cb21Slm66018 vsw_rx_queue_cb_exit:
19867636cb21Slm66018 	mutex_exit(&vqp->vq_lock);
19877636cb21Slm66018 	D1(vswp, "%s: exit", __func__);
19887636cb21Slm66018 }
19897636cb21Slm66018 
19901ae08745Sheppo /*
19911ae08745Sheppo  * receive callback routine. Invoked by MAC layer when there
19921ae08745Sheppo  * are pkts being passed up from physical device.
19931ae08745Sheppo  *
19941ae08745Sheppo  * PERF: It may be more efficient when the card is in promisc
19951ae08745Sheppo  * mode to check the dest address of the pkts here (against
19961ae08745Sheppo  * the FDB) rather than checking later. Needs to be investigated.
19971ae08745Sheppo  */
19981ae08745Sheppo static void
19991ae08745Sheppo vsw_rx_cb(void *arg, mac_resource_handle_t mrh, mblk_t *mp)
20001ae08745Sheppo {
20011ae08745Sheppo 	_NOTE(ARGUNUSED(mrh))
20021ae08745Sheppo 
20031ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
20041ae08745Sheppo 
20051ae08745Sheppo 	ASSERT(vswp != NULL);
20061ae08745Sheppo 
20071ae08745Sheppo 	D1(vswp, "vsw_rx_cb: enter");
20081ae08745Sheppo 
20091ae08745Sheppo 	/* switch the chain of packets received */
20101ae08745Sheppo 	vsw_switch_frame(vswp, mp, VSW_PHYSDEV, NULL, NULL);
20111ae08745Sheppo 
20121ae08745Sheppo 	D1(vswp, "vsw_rx_cb: exit");
20131ae08745Sheppo }
20141ae08745Sheppo 
20151ae08745Sheppo /*
20161ae08745Sheppo  * Send a message out over the physical device via the MAC layer.
20171ae08745Sheppo  *
20181ae08745Sheppo  * Returns any mblks that it was unable to transmit.
20191ae08745Sheppo  */
20201ae08745Sheppo static mblk_t *
20211ae08745Sheppo vsw_tx_msg(vsw_t *vswp, mblk_t *mp)
20221ae08745Sheppo {
20231ae08745Sheppo 	const mac_txinfo_t	*mtp;
20241ae08745Sheppo 	mblk_t			*nextp;
20251ae08745Sheppo 
20261ae08745Sheppo 	if (vswp->mh == NULL) {
20271ae08745Sheppo 		DERR(vswp, "vsw_tx_msg: dropping pkts: no tx routine avail");
20281ae08745Sheppo 		return (mp);
20291ae08745Sheppo 	} else {
20301ae08745Sheppo 		for (;;) {
20311ae08745Sheppo 			nextp = mp->b_next;
20321ae08745Sheppo 			mp->b_next = NULL;
20331ae08745Sheppo 
20341ae08745Sheppo 			mtp = vswp->txinfo;
20351ae08745Sheppo 			if ((mp = mtp->mt_fn(mtp->mt_arg, mp)) != NULL) {
20361ae08745Sheppo 				mp->b_next = nextp;
20371ae08745Sheppo 				break;
20381ae08745Sheppo 			}
20391ae08745Sheppo 
20401ae08745Sheppo 			if ((mp = nextp) == NULL)
20411ae08745Sheppo 				break;
20421ae08745Sheppo 
20431ae08745Sheppo 		}
20441ae08745Sheppo 
20451ae08745Sheppo 	}
20461ae08745Sheppo 
20471ae08745Sheppo 	return (mp);
20481ae08745Sheppo }
20491ae08745Sheppo 
20501ae08745Sheppo /*
20511ae08745Sheppo  * Register with the MAC layer as a network device, so we
20521ae08745Sheppo  * can be plumbed if necessary.
20531ae08745Sheppo  */
20541ae08745Sheppo static int
20551ae08745Sheppo vsw_mac_register(vsw_t *vswp)
20561ae08745Sheppo {
2057ba2e4443Sseb 	mac_register_t	*macp;
2058ba2e4443Sseb 	int		rv;
20591ae08745Sheppo 
20601ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
20611ae08745Sheppo 
2062ba2e4443Sseb 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2063ba2e4443Sseb 		return (EINVAL);
2064ba2e4443Sseb 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
20651ae08745Sheppo 	macp->m_driver = vswp;
2066ba2e4443Sseb 	macp->m_dip = vswp->dip;
2067ba2e4443Sseb 	macp->m_src_addr = (uint8_t *)&vswp->if_addr;
2068ba2e4443Sseb 	macp->m_callbacks = &vsw_m_callbacks;
2069ba2e4443Sseb 	macp->m_min_sdu = 0;
2070ba2e4443Sseb 	macp->m_max_sdu = ETHERMTU;
2071ba2e4443Sseb 	rv = mac_register(macp, &vswp->if_mh);
2072ba2e4443Sseb 	mac_free(macp);
2073ba2e4443Sseb 	if (rv == 0)
2074ba2e4443Sseb 		vswp->if_state |= VSW_IF_REG;
20751ae08745Sheppo 
20761ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
20771ae08745Sheppo 
20781ae08745Sheppo 	return (rv);
20791ae08745Sheppo }
20801ae08745Sheppo 
20811ae08745Sheppo static int
20821ae08745Sheppo vsw_mac_unregister(vsw_t *vswp)
20831ae08745Sheppo {
20841ae08745Sheppo 	int		rv = 0;
20851ae08745Sheppo 
20861ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
20871ae08745Sheppo 
20881ae08745Sheppo 	WRITE_ENTER(&vswp->if_lockrw);
20891ae08745Sheppo 
2090ba2e4443Sseb 	if (vswp->if_state & VSW_IF_REG) {
2091ba2e4443Sseb 		rv = mac_unregister(vswp->if_mh);
20921ae08745Sheppo 		if (rv != 0) {
20931ae08745Sheppo 			DWARN(vswp, "%s: unable to unregister from MAC "
20941ae08745Sheppo 				"framework", __func__);
20951ae08745Sheppo 
20961ae08745Sheppo 			RW_EXIT(&vswp->if_lockrw);
20971ae08745Sheppo 			D1(vswp, "%s: fail exit", __func__);
20981ae08745Sheppo 			return (rv);
20991ae08745Sheppo 		}
21001ae08745Sheppo 
2101ba2e4443Sseb 		/* mark i/f as down and unregistered */
2102ba2e4443Sseb 		vswp->if_state &= ~(VSW_IF_UP | VSW_IF_REG);
21031ae08745Sheppo 	}
21041ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
21051ae08745Sheppo 
2106e1ebb9ecSlm66018 	vswp->mdprops &= ~(VSW_MD_MACADDR | VSW_DEV_MACADDR);
2107d10e4ef2Snarayan 
21081ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
21091ae08745Sheppo 
21101ae08745Sheppo 	return (rv);
21111ae08745Sheppo }
21121ae08745Sheppo 
2113ba2e4443Sseb static int
2114ba2e4443Sseb vsw_m_stat(void *arg, uint_t stat, uint64_t *val)
21151ae08745Sheppo {
21161ae08745Sheppo 	vsw_t			*vswp = (vsw_t *)arg;
21171ae08745Sheppo 
21181ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
21191ae08745Sheppo 
2120ba2e4443Sseb 	if (vswp->mh == NULL)
2121ba2e4443Sseb 		return (EINVAL);
21221ae08745Sheppo 
21231ae08745Sheppo 	/* return stats from underlying device */
2124ba2e4443Sseb 	*val = mac_stat_get(vswp->mh, stat);
2125ba2e4443Sseb 	return (0);
21261ae08745Sheppo }
21271ae08745Sheppo 
21281ae08745Sheppo static void
21291ae08745Sheppo vsw_m_stop(void *arg)
21301ae08745Sheppo {
21311ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
21321ae08745Sheppo 
21331ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
21341ae08745Sheppo 
21351ae08745Sheppo 	WRITE_ENTER(&vswp->if_lockrw);
21361ae08745Sheppo 	vswp->if_state &= ~VSW_IF_UP;
21371ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
21381ae08745Sheppo 
21391ae08745Sheppo 	D1(vswp, "%s: exit (state = %d)", __func__, vswp->if_state);
21401ae08745Sheppo }
21411ae08745Sheppo 
21421ae08745Sheppo static int
21431ae08745Sheppo vsw_m_start(void *arg)
21441ae08745Sheppo {
21451ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
21461ae08745Sheppo 
21471ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
21481ae08745Sheppo 
21491ae08745Sheppo 	WRITE_ENTER(&vswp->if_lockrw);
21501ae08745Sheppo 	vswp->if_state |= VSW_IF_UP;
21511ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
21521ae08745Sheppo 
21531ae08745Sheppo 	D1(vswp, "%s: exit (state = %d)", __func__, vswp->if_state);
21541ae08745Sheppo 	return (0);
21551ae08745Sheppo }
21561ae08745Sheppo 
21571ae08745Sheppo /*
21581ae08745Sheppo  * Change the local interface address.
21591ae08745Sheppo  */
21601ae08745Sheppo static int
21611ae08745Sheppo vsw_m_unicst(void *arg, const uint8_t *macaddr)
21621ae08745Sheppo {
21631ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
21641ae08745Sheppo 
21651ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
21661ae08745Sheppo 
21671ae08745Sheppo 	WRITE_ENTER(&vswp->if_lockrw);
21681ae08745Sheppo 	ether_copy(macaddr, &vswp->if_addr);
21691ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
21701ae08745Sheppo 
21711ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
21721ae08745Sheppo 
21731ae08745Sheppo 	return (0);
21741ae08745Sheppo }
21751ae08745Sheppo 
21761ae08745Sheppo static int
21771ae08745Sheppo vsw_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
21781ae08745Sheppo {
21791ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
21801ae08745Sheppo 	mcst_addr_t	*mcst_p = NULL;
21811ae08745Sheppo 	uint64_t	addr = 0x0;
2182e1ebb9ecSlm66018 	int		i, ret = 0;
21831ae08745Sheppo 
21841ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
21851ae08745Sheppo 
21861ae08745Sheppo 	/*
21871ae08745Sheppo 	 * Convert address into form that can be used
21881ae08745Sheppo 	 * as hash table key.
21891ae08745Sheppo 	 */
21901ae08745Sheppo 	for (i = 0; i < ETHERADDRL; i++) {
21911ae08745Sheppo 		addr = (addr << 8) | mca[i];
21921ae08745Sheppo 	}
21931ae08745Sheppo 
21941ae08745Sheppo 	D2(vswp, "%s: addr = 0x%llx", __func__, addr);
21951ae08745Sheppo 
21961ae08745Sheppo 	if (add) {
21971ae08745Sheppo 		D2(vswp, "%s: adding multicast", __func__);
21981ae08745Sheppo 		if (vsw_add_mcst(vswp, VSW_LOCALDEV, addr, NULL) == 0) {
21991ae08745Sheppo 			/*
22001ae08745Sheppo 			 * Update the list of multicast addresses
22011ae08745Sheppo 			 * contained within the vsw_t structure to
22021ae08745Sheppo 			 * include this new one.
22031ae08745Sheppo 			 */
22041ae08745Sheppo 			mcst_p = kmem_zalloc(sizeof (mcst_addr_t), KM_NOSLEEP);
22051ae08745Sheppo 			if (mcst_p == NULL) {
22061ae08745Sheppo 				DERR(vswp, "%s unable to alloc mem", __func__);
22071ae08745Sheppo 				return (1);
22081ae08745Sheppo 			}
22091ae08745Sheppo 			mcst_p->addr = addr;
22101ae08745Sheppo 
22111ae08745Sheppo 			mutex_enter(&vswp->mca_lock);
22121ae08745Sheppo 			mcst_p->nextp = vswp->mcap;
22131ae08745Sheppo 			vswp->mcap = mcst_p;
22141ae08745Sheppo 			mutex_exit(&vswp->mca_lock);
22151ae08745Sheppo 
22161ae08745Sheppo 			/*
22171ae08745Sheppo 			 * Call into the underlying driver to program the
22181ae08745Sheppo 			 * address into HW.
22191ae08745Sheppo 			 */
2220e1ebb9ecSlm66018 			if (vswp->mh != NULL) {
2221e1ebb9ecSlm66018 				ret = mac_multicst_add(vswp->mh, mca);
2222e1ebb9ecSlm66018 				if (ret != 0) {
2223e1ebb9ecSlm66018 					cmn_err(CE_WARN, "!unable to add "
2224e1ebb9ecSlm66018 						"multicast address");
2225e1ebb9ecSlm66018 					goto vsw_remove_addr;
2226e1ebb9ecSlm66018 				}
22271ae08745Sheppo 			}
22281ae08745Sheppo 		} else {
2229e1ebb9ecSlm66018 			cmn_err(CE_WARN, "!unable to add multicast address");
2230e1ebb9ecSlm66018 		}
2231e1ebb9ecSlm66018 		return (ret);
2232e1ebb9ecSlm66018 	}
2233e1ebb9ecSlm66018 
2234e1ebb9ecSlm66018 vsw_remove_addr:
2235e1ebb9ecSlm66018 
22361ae08745Sheppo 	D2(vswp, "%s: removing multicast", __func__);
22371ae08745Sheppo 	/*
22381ae08745Sheppo 	 * Remove the address from the hash table..
22391ae08745Sheppo 	 */
22401ae08745Sheppo 	if (vsw_del_mcst(vswp, VSW_LOCALDEV, addr, NULL) == 0) {
22411ae08745Sheppo 
22421ae08745Sheppo 		/*
22431ae08745Sheppo 		 * ..and then from the list maintained in the
22441ae08745Sheppo 		 * vsw_t structure.
22451ae08745Sheppo 		 */
22461ae08745Sheppo 		vsw_del_addr(VSW_LOCALDEV, vswp, addr);
22471ae08745Sheppo 
22481ae08745Sheppo 		if (vswp->mh != NULL)
22491ae08745Sheppo 			(void) mac_multicst_remove(vswp->mh, mca);
22501ae08745Sheppo 	}
22511ae08745Sheppo 
22521ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
22531ae08745Sheppo 
22541ae08745Sheppo 	return (0);
22551ae08745Sheppo }
22561ae08745Sheppo 
22571ae08745Sheppo static int
22581ae08745Sheppo vsw_m_promisc(void *arg, boolean_t on)
22591ae08745Sheppo {
22601ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
22611ae08745Sheppo 
22621ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
22631ae08745Sheppo 
22641ae08745Sheppo 	WRITE_ENTER(&vswp->if_lockrw);
22651ae08745Sheppo 	if (on)
22661ae08745Sheppo 		vswp->if_state |= VSW_IF_PROMISC;
22671ae08745Sheppo 	else
22681ae08745Sheppo 		vswp->if_state &= ~VSW_IF_PROMISC;
22691ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
22701ae08745Sheppo 
22711ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
22721ae08745Sheppo 
22731ae08745Sheppo 	return (0);
22741ae08745Sheppo }
22751ae08745Sheppo 
22761ae08745Sheppo static mblk_t *
22771ae08745Sheppo vsw_m_tx(void *arg, mblk_t *mp)
22781ae08745Sheppo {
22791ae08745Sheppo 	vsw_t		*vswp = (vsw_t *)arg;
22801ae08745Sheppo 
22811ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
22821ae08745Sheppo 
22831ae08745Sheppo 	vsw_switch_frame(vswp, mp, VSW_LOCALDEV, NULL, NULL);
22841ae08745Sheppo 
22851ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
22861ae08745Sheppo 
22871ae08745Sheppo 	return (NULL);
22881ae08745Sheppo }
22891ae08745Sheppo 
22901ae08745Sheppo /*
22911ae08745Sheppo  * Register for machine description (MD) updates.
22921ae08745Sheppo  */
22931ae08745Sheppo static void
22941ae08745Sheppo vsw_mdeg_register(vsw_t *vswp)
22951ae08745Sheppo {
22961ae08745Sheppo 	mdeg_prop_spec_t	*pspecp;
22971ae08745Sheppo 	mdeg_node_spec_t	*inst_specp;
22981ae08745Sheppo 	mdeg_handle_t		mdeg_hdl;
22991ae08745Sheppo 	size_t			templatesz;
23001ae08745Sheppo 	int			inst, rv;
23011ae08745Sheppo 
23021ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
23031ae08745Sheppo 
23041ae08745Sheppo 	inst = ddi_prop_get_int(DDI_DEV_T_ANY, vswp->dip,
23051ae08745Sheppo 		DDI_PROP_DONTPASS, reg_propname, -1);
23061ae08745Sheppo 	if (inst == -1) {
23071ae08745Sheppo 		DERR(vswp, "%s: unable to get %s property",
23081ae08745Sheppo 						__func__, reg_propname);
23091ae08745Sheppo 		return;
23101ae08745Sheppo 	}
23111ae08745Sheppo 
23121ae08745Sheppo 	D2(vswp, "%s: instance %d registering with mdeg", __func__, inst);
23131ae08745Sheppo 
23141ae08745Sheppo 	/*
23151ae08745Sheppo 	 * Allocate and initialize a per-instance copy
23161ae08745Sheppo 	 * of the global property spec array that will
23171ae08745Sheppo 	 * uniquely identify this vsw instance.
23181ae08745Sheppo 	 */
23191ae08745Sheppo 	templatesz = sizeof (vsw_prop_template);
23201ae08745Sheppo 	pspecp = kmem_zalloc(templatesz, KM_SLEEP);
23211ae08745Sheppo 
23221ae08745Sheppo 	bcopy(vsw_prop_template, pspecp, templatesz);
23231ae08745Sheppo 
23241ae08745Sheppo 	VSW_SET_MDEG_PROP_INST(pspecp, inst);
23251ae08745Sheppo 
23261ae08745Sheppo 	/* initialize the complete prop spec structure */
23271ae08745Sheppo 	inst_specp = kmem_zalloc(sizeof (mdeg_node_spec_t), KM_SLEEP);
23281ae08745Sheppo 	inst_specp->namep = "virtual-device";
23291ae08745Sheppo 	inst_specp->specp = pspecp;
23301ae08745Sheppo 
23311ae08745Sheppo 	/* perform the registration */
23321ae08745Sheppo 	rv = mdeg_register(inst_specp, &vport_match, vsw_mdeg_cb,
23331ae08745Sheppo 	    (void *)vswp, &mdeg_hdl);
23341ae08745Sheppo 
23351ae08745Sheppo 	if (rv != MDEG_SUCCESS) {
23361ae08745Sheppo 		DERR(vswp, "%s: mdeg_register failed (%d)\n", __func__, rv);
23371ae08745Sheppo 		kmem_free(inst_specp, sizeof (mdeg_node_spec_t));
23381ae08745Sheppo 		kmem_free(pspecp, templatesz);
23391ae08745Sheppo 		return;
23401ae08745Sheppo 	}
23411ae08745Sheppo 
23421ae08745Sheppo 	/* save off data that will be needed later */
23431ae08745Sheppo 	vswp->inst_spec = inst_specp;
23441ae08745Sheppo 	vswp->mdeg_hdl = mdeg_hdl;
23451ae08745Sheppo 
23461ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
23471ae08745Sheppo }
23481ae08745Sheppo 
23491ae08745Sheppo static void
23501ae08745Sheppo vsw_mdeg_unregister(vsw_t *vswp)
23511ae08745Sheppo {
23521ae08745Sheppo 	D1(vswp, "vsw_mdeg_unregister: enter");
23531ae08745Sheppo 
23541ae08745Sheppo 	(void) mdeg_unregister(vswp->mdeg_hdl);
23551ae08745Sheppo 
23561ae08745Sheppo 	if (vswp->inst_spec->specp != NULL) {
23571ae08745Sheppo 		(void) kmem_free(vswp->inst_spec->specp,
23581ae08745Sheppo 			sizeof (vsw_prop_template));
23591ae08745Sheppo 		vswp->inst_spec->specp = NULL;
23601ae08745Sheppo 	}
23611ae08745Sheppo 
23621ae08745Sheppo 	if (vswp->inst_spec != NULL) {
23631ae08745Sheppo 		(void) kmem_free(vswp->inst_spec,
23641ae08745Sheppo 			sizeof (mdeg_node_spec_t));
23651ae08745Sheppo 		vswp->inst_spec = NULL;
23661ae08745Sheppo 	}
23671ae08745Sheppo 
23681ae08745Sheppo 	D1(vswp, "vsw_mdeg_unregister: exit");
23691ae08745Sheppo }
23701ae08745Sheppo 
23711ae08745Sheppo static int
23721ae08745Sheppo vsw_mdeg_cb(void *cb_argp, mdeg_result_t *resp)
23731ae08745Sheppo {
23741ae08745Sheppo 	vsw_t		*vswp;
23751ae08745Sheppo 	int		idx;
23761ae08745Sheppo 	md_t		*mdp;
23771ae08745Sheppo 	mde_cookie_t	node;
23781ae08745Sheppo 	uint64_t	inst;
23791ae08745Sheppo 
23801ae08745Sheppo 	if (resp == NULL)
23811ae08745Sheppo 		return (MDEG_FAILURE);
23821ae08745Sheppo 
23831ae08745Sheppo 	vswp = (vsw_t *)cb_argp;
23841ae08745Sheppo 
23851ae08745Sheppo 	D1(vswp, "%s: added %d : removed %d : matched %d",
23861ae08745Sheppo 		__func__, resp->added.nelem, resp->removed.nelem,
23871ae08745Sheppo 		resp->match_prev.nelem);
23881ae08745Sheppo 
23891ae08745Sheppo 	/* process added ports */
23901ae08745Sheppo 	for (idx = 0; idx < resp->added.nelem; idx++) {
23911ae08745Sheppo 		mdp = resp->added.mdp;
23921ae08745Sheppo 		node = resp->added.mdep[idx];
23931ae08745Sheppo 
23941ae08745Sheppo 		D2(vswp, "%s: adding node(%d) 0x%lx", __func__, idx, node);
23951ae08745Sheppo 
23961ae08745Sheppo 		if (vsw_port_add(vswp, mdp, &node) != 0) {
23971ae08745Sheppo 			cmn_err(CE_WARN, "Unable to add new port (0x%lx)",
23981ae08745Sheppo 					node);
23991ae08745Sheppo 		}
24001ae08745Sheppo 	}
24011ae08745Sheppo 
24021ae08745Sheppo 	/* process removed ports */
24031ae08745Sheppo 	for (idx = 0; idx < resp->removed.nelem; idx++) {
24041ae08745Sheppo 		mdp = resp->removed.mdp;
24051ae08745Sheppo 		node = resp->removed.mdep[idx];
24061ae08745Sheppo 
24071ae08745Sheppo 		if (md_get_prop_val(mdp, node, id_propname, &inst)) {
24081ae08745Sheppo 			DERR(vswp, "%s: prop(%s) not found port(%d)",
24091ae08745Sheppo 				__func__, id_propname, idx);
24101ae08745Sheppo 			continue;
24111ae08745Sheppo 		}
24121ae08745Sheppo 
24131ae08745Sheppo 		D2(vswp, "%s: removing node(%d) 0x%lx", __func__, idx, node);
24141ae08745Sheppo 
24151ae08745Sheppo 		if (vsw_port_detach(vswp, inst) != 0) {
24161ae08745Sheppo 			cmn_err(CE_WARN, "Unable to remove port %ld", inst);
24171ae08745Sheppo 		}
24181ae08745Sheppo 	}
24191ae08745Sheppo 
24201ae08745Sheppo 	/*
24211ae08745Sheppo 	 * Currently no support for updating already active ports.
24221ae08745Sheppo 	 * So, ignore the match_curr and match_priv arrays for now.
24231ae08745Sheppo 	 */
24241ae08745Sheppo 
24251ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
24261ae08745Sheppo 
24271ae08745Sheppo 	return (MDEG_SUCCESS);
24281ae08745Sheppo }
24291ae08745Sheppo 
24301ae08745Sheppo /*
24311ae08745Sheppo  * Add a new port to the system.
24321ae08745Sheppo  *
24331ae08745Sheppo  * Returns 0 on success, 1 on failure.
24341ae08745Sheppo  */
24351ae08745Sheppo int
24361ae08745Sheppo vsw_port_add(vsw_t *vswp, md_t *mdp, mde_cookie_t *node)
24371ae08745Sheppo {
24381ae08745Sheppo 	uint64_t		ldc_id;
24391ae08745Sheppo 	uint8_t			*addrp;
24401ae08745Sheppo 	int			i, addrsz;
24411ae08745Sheppo 	int			num_nodes = 0, nchan = 0;
24421ae08745Sheppo 	int			listsz = 0;
24431ae08745Sheppo 	mde_cookie_t		*listp = NULL;
24441ae08745Sheppo 	struct ether_addr	ea;
24451ae08745Sheppo 	uint64_t		macaddr;
24461ae08745Sheppo 	uint64_t		inst = 0;
24471ae08745Sheppo 	vsw_port_t		*port;
24481ae08745Sheppo 
24491ae08745Sheppo 	if (md_get_prop_val(mdp, *node, id_propname, &inst)) {
24501ae08745Sheppo 		DWARN(vswp, "%s: prop(%s) not found", __func__,
24511ae08745Sheppo 			id_propname);
24521ae08745Sheppo 		return (1);
24531ae08745Sheppo 	}
24541ae08745Sheppo 
24551ae08745Sheppo 	/*
24561ae08745Sheppo 	 * Find the channel endpoint node(s) (which should be under this
24571ae08745Sheppo 	 * port node) which contain the channel id(s).
24581ae08745Sheppo 	 */
24591ae08745Sheppo 	if ((num_nodes = md_node_count(mdp)) <= 0) {
24601ae08745Sheppo 		DERR(vswp, "%s: invalid number of nodes found (%d)",
24611ae08745Sheppo 			__func__, num_nodes);
24621ae08745Sheppo 		return (1);
24631ae08745Sheppo 	}
24641ae08745Sheppo 
24651ae08745Sheppo 	/* allocate enough space for node list */
24661ae08745Sheppo 	listsz = num_nodes * sizeof (mde_cookie_t);
24671ae08745Sheppo 	listp = kmem_zalloc(listsz, KM_SLEEP);
24681ae08745Sheppo 
24691ae08745Sheppo 	nchan = md_scan_dag(mdp, *node,
24701ae08745Sheppo 		md_find_name(mdp, chan_propname),
24711ae08745Sheppo 		md_find_name(mdp, "fwd"), listp);
24721ae08745Sheppo 
24731ae08745Sheppo 	if (nchan <= 0) {
24741ae08745Sheppo 		DWARN(vswp, "%s: no %s nodes found", __func__, chan_propname);
24751ae08745Sheppo 		kmem_free(listp, listsz);
24761ae08745Sheppo 		return (1);
24771ae08745Sheppo 	}
24781ae08745Sheppo 
24791ae08745Sheppo 	D2(vswp, "%s: %d %s nodes found", __func__, nchan, chan_propname);
24801ae08745Sheppo 
24811ae08745Sheppo 	/* use property from first node found */
24821ae08745Sheppo 	if (md_get_prop_val(mdp, listp[0], id_propname, &ldc_id)) {
24831ae08745Sheppo 		DWARN(vswp, "%s: prop(%s) not found\n", __func__,
24841ae08745Sheppo 			id_propname);
24851ae08745Sheppo 		kmem_free(listp, listsz);
24861ae08745Sheppo 		return (1);
24871ae08745Sheppo 	}
24881ae08745Sheppo 
24891ae08745Sheppo 	/* don't need list any more */
24901ae08745Sheppo 	kmem_free(listp, listsz);
24911ae08745Sheppo 
24921ae08745Sheppo 	D2(vswp, "%s: ldc_id 0x%llx", __func__, ldc_id);
24931ae08745Sheppo 
24941ae08745Sheppo 	/* read mac-address property */
24951ae08745Sheppo 	if (md_get_prop_data(mdp, *node, remaddr_propname,
24961ae08745Sheppo 					&addrp, &addrsz)) {
24971ae08745Sheppo 		DWARN(vswp, "%s: prop(%s) not found",
24981ae08745Sheppo 				__func__, remaddr_propname);
24991ae08745Sheppo 		return (1);
25001ae08745Sheppo 	}
25011ae08745Sheppo 
25021ae08745Sheppo 	if (addrsz < ETHERADDRL) {
25031ae08745Sheppo 		DWARN(vswp, "%s: invalid address size", __func__);
25041ae08745Sheppo 		return (1);
25051ae08745Sheppo 	}
25061ae08745Sheppo 
25071ae08745Sheppo 	macaddr = *((uint64_t *)addrp);
25081ae08745Sheppo 	D2(vswp, "%s: remote mac address 0x%llx", __func__, macaddr);
25091ae08745Sheppo 
25101ae08745Sheppo 	for (i = ETHERADDRL - 1; i >= 0; i--) {
25111ae08745Sheppo 		ea.ether_addr_octet[i] = macaddr & 0xFF;
25121ae08745Sheppo 		macaddr >>= 8;
25131ae08745Sheppo 	}
25141ae08745Sheppo 
25151ae08745Sheppo 	if (vsw_port_attach(vswp, (int)inst, &ldc_id, 1, &ea) != 0) {
25161ae08745Sheppo 		DERR(vswp, "%s: failed to attach port", __func__);
25171ae08745Sheppo 		return (1);
25181ae08745Sheppo 	}
25191ae08745Sheppo 
25201ae08745Sheppo 	port = vsw_lookup_port(vswp, (int)inst);
25211ae08745Sheppo 
25221ae08745Sheppo 	/* just successfuly created the port, so it should exist */
25231ae08745Sheppo 	ASSERT(port != NULL);
25241ae08745Sheppo 
25251ae08745Sheppo 	return (0);
25261ae08745Sheppo }
25271ae08745Sheppo 
25281ae08745Sheppo /*
25291ae08745Sheppo  * Attach the specified port.
25301ae08745Sheppo  *
25311ae08745Sheppo  * Returns 0 on success, 1 on failure.
25321ae08745Sheppo  */
25331ae08745Sheppo static int
25341ae08745Sheppo vsw_port_attach(vsw_t *vswp, int p_instance, uint64_t *ldcids, int nids,
25351ae08745Sheppo struct ether_addr *macaddr)
25361ae08745Sheppo {
25371ae08745Sheppo 	vsw_port_list_t		*plist = &vswp->plist;
25381ae08745Sheppo 	vsw_port_t		*port, **prev_port;
25391ae08745Sheppo 	int			i;
25401ae08745Sheppo 
25411ae08745Sheppo 	D1(vswp, "%s: enter : port %d", __func__, p_instance);
25421ae08745Sheppo 
25431ae08745Sheppo 	/* port already exists? */
25441ae08745Sheppo 	READ_ENTER(&plist->lockrw);
25451ae08745Sheppo 	for (port = plist->head; port != NULL; port = port->p_next) {
25461ae08745Sheppo 		if (port->p_instance == p_instance) {
25471ae08745Sheppo 			DWARN(vswp, "%s: port instance %d already attached",
25481ae08745Sheppo 				__func__, p_instance);
25491ae08745Sheppo 			RW_EXIT(&plist->lockrw);
25501ae08745Sheppo 			return (1);
25511ae08745Sheppo 		}
25521ae08745Sheppo 	}
25531ae08745Sheppo 	RW_EXIT(&plist->lockrw);
25541ae08745Sheppo 
25551ae08745Sheppo 	port = kmem_zalloc(sizeof (vsw_port_t), KM_SLEEP);
25561ae08745Sheppo 	port->p_vswp = vswp;
25571ae08745Sheppo 	port->p_instance = p_instance;
25581ae08745Sheppo 	port->p_ldclist.num_ldcs = 0;
25591ae08745Sheppo 	port->p_ldclist.head = NULL;
2560e1ebb9ecSlm66018 	port->addr_set = VSW_ADDR_UNSET;
25611ae08745Sheppo 
25621ae08745Sheppo 	rw_init(&port->p_ldclist.lockrw, NULL, RW_DRIVER, NULL);
25631ae08745Sheppo 
25641ae08745Sheppo 	mutex_init(&port->tx_lock, NULL, MUTEX_DRIVER, NULL);
25651ae08745Sheppo 	mutex_init(&port->mca_lock, NULL, MUTEX_DRIVER, NULL);
25661ae08745Sheppo 
25671ae08745Sheppo 	mutex_init(&port->ref_lock, NULL, MUTEX_DRIVER, NULL);
25681ae08745Sheppo 	cv_init(&port->ref_cv, NULL, CV_DRIVER, NULL);
25691ae08745Sheppo 
25701ae08745Sheppo 	mutex_init(&port->state_lock, NULL, MUTEX_DRIVER, NULL);
25711ae08745Sheppo 	cv_init(&port->state_cv, NULL, CV_DRIVER, NULL);
25721ae08745Sheppo 	port->state = VSW_PORT_INIT;
25731ae08745Sheppo 
25741ae08745Sheppo 	if (nids > VSW_PORT_MAX_LDCS) {
25751ae08745Sheppo 		D2(vswp, "%s: using first of %d ldc ids",
25761ae08745Sheppo 			__func__, nids);
25771ae08745Sheppo 		nids = VSW_PORT_MAX_LDCS;
25781ae08745Sheppo 	}
25791ae08745Sheppo 
25801ae08745Sheppo 	D2(vswp, "%s: %d nids", __func__, nids);
25811ae08745Sheppo 	for (i = 0; i < nids; i++) {
25821ae08745Sheppo 		D2(vswp, "%s: ldcid (%llx)", __func__, (uint64_t)ldcids[i]);
25831ae08745Sheppo 		if (vsw_ldc_attach(port, (uint64_t)ldcids[i]) != 0) {
25841ae08745Sheppo 			DERR(vswp, "%s: ldc_attach failed", __func__);
25851ae08745Sheppo 
25861ae08745Sheppo 			rw_destroy(&port->p_ldclist.lockrw);
25871ae08745Sheppo 
25881ae08745Sheppo 			cv_destroy(&port->ref_cv);
25891ae08745Sheppo 			mutex_destroy(&port->ref_lock);
25901ae08745Sheppo 
25911ae08745Sheppo 			cv_destroy(&port->state_cv);
25921ae08745Sheppo 			mutex_destroy(&port->state_lock);
25931ae08745Sheppo 
25941ae08745Sheppo 			mutex_destroy(&port->tx_lock);
25951ae08745Sheppo 			mutex_destroy(&port->mca_lock);
25961ae08745Sheppo 			kmem_free(port, sizeof (vsw_port_t));
25971ae08745Sheppo 			return (1);
25981ae08745Sheppo 		}
25991ae08745Sheppo 	}
26001ae08745Sheppo 
26011ae08745Sheppo 	ether_copy(macaddr, &port->p_macaddr);
26021ae08745Sheppo 
26031ae08745Sheppo 	WRITE_ENTER(&plist->lockrw);
26041ae08745Sheppo 
26051ae08745Sheppo 	/* create the fdb entry for this port/mac address */
26061ae08745Sheppo 	(void) vsw_add_fdb(vswp, port);
26071ae08745Sheppo 
2608e1ebb9ecSlm66018 	(void) vsw_set_hw(vswp, port);
2609e1ebb9ecSlm66018 
26101ae08745Sheppo 	/* link it into the list of ports for this vsw instance */
26111ae08745Sheppo 	prev_port = (vsw_port_t **)(&plist->head);
26121ae08745Sheppo 	port->p_next = *prev_port;
26131ae08745Sheppo 	*prev_port = port;
26141ae08745Sheppo 	plist->num_ports++;
26151ae08745Sheppo 	RW_EXIT(&plist->lockrw);
26161ae08745Sheppo 
26171ae08745Sheppo 	/*
26181ae08745Sheppo 	 * Initialise the port and any ldc's under it.
26191ae08745Sheppo 	 */
26201ae08745Sheppo 	(void) vsw_init_ldcs(port);
26211ae08745Sheppo 
26221ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
26231ae08745Sheppo 	return (0);
26241ae08745Sheppo }
26251ae08745Sheppo 
26261ae08745Sheppo /*
26271ae08745Sheppo  * Detach the specified port.
26281ae08745Sheppo  *
26291ae08745Sheppo  * Returns 0 on success, 1 on failure.
26301ae08745Sheppo  */
26311ae08745Sheppo static int
26321ae08745Sheppo vsw_port_detach(vsw_t *vswp, int p_instance)
26331ae08745Sheppo {
26341ae08745Sheppo 	vsw_port_t	*port = NULL;
26351ae08745Sheppo 	vsw_port_list_t	*plist = &vswp->plist;
26361ae08745Sheppo 
26371ae08745Sheppo 	D1(vswp, "%s: enter: port id %d", __func__, p_instance);
26381ae08745Sheppo 
26391ae08745Sheppo 	WRITE_ENTER(&plist->lockrw);
26401ae08745Sheppo 
26411ae08745Sheppo 	if ((port = vsw_lookup_port(vswp, p_instance)) == NULL) {
26421ae08745Sheppo 		RW_EXIT(&plist->lockrw);
26431ae08745Sheppo 		return (1);
26441ae08745Sheppo 	}
26451ae08745Sheppo 
26461ae08745Sheppo 	if (vsw_plist_del_node(vswp, port)) {
26471ae08745Sheppo 		RW_EXIT(&plist->lockrw);
26481ae08745Sheppo 		return (1);
26491ae08745Sheppo 	}
26501ae08745Sheppo 
2651e1ebb9ecSlm66018 	/* Remove address if was programmed into HW. */
2652e1ebb9ecSlm66018 	(void) vsw_unset_hw(vswp, port);
2653e1ebb9ecSlm66018 
26541ae08745Sheppo 	/* Remove the fdb entry for this port/mac address */
26551ae08745Sheppo 	(void) vsw_del_fdb(vswp, port);
26561ae08745Sheppo 
26571ae08745Sheppo 	/* Remove any multicast addresses.. */
26581ae08745Sheppo 	vsw_del_mcst_port(port);
26591ae08745Sheppo 
26601ae08745Sheppo 	/*
2661e1ebb9ecSlm66018 	 * No longer need to hold writer lock on port list now
2662e1ebb9ecSlm66018 	 * that we have unlinked the target port from the list.
26631ae08745Sheppo 	 */
26641ae08745Sheppo 	RW_EXIT(&plist->lockrw);
26651ae08745Sheppo 
2666e1ebb9ecSlm66018 	READ_ENTER(&plist->lockrw);
2667e1ebb9ecSlm66018 
2668e1ebb9ecSlm66018 	if (vswp->recfg_reqd)
2669e1ebb9ecSlm66018 		(void) vsw_reconfig_hw(vswp);
2670e1ebb9ecSlm66018 
2671e1ebb9ecSlm66018 	RW_EXIT(&plist->lockrw);
2672e1ebb9ecSlm66018 
26731ae08745Sheppo 	if (vsw_port_delete(port)) {
26741ae08745Sheppo 		return (1);
26751ae08745Sheppo 	}
26761ae08745Sheppo 
26771ae08745Sheppo 	D1(vswp, "%s: exit: p_instance(%d)", __func__, p_instance);
26781ae08745Sheppo 	return (0);
26791ae08745Sheppo }
26801ae08745Sheppo 
26811ae08745Sheppo /*
26821ae08745Sheppo  * Detach all active ports.
26831ae08745Sheppo  *
26841ae08745Sheppo  * Returns 0 on success, 1 on failure.
26851ae08745Sheppo  */
26861ae08745Sheppo static int
26871ae08745Sheppo vsw_detach_ports(vsw_t *vswp)
26881ae08745Sheppo {
26891ae08745Sheppo 	vsw_port_list_t 	*plist = &vswp->plist;
26901ae08745Sheppo 	vsw_port_t		*port = NULL;
26911ae08745Sheppo 
26921ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
26931ae08745Sheppo 
26941ae08745Sheppo 	WRITE_ENTER(&plist->lockrw);
26951ae08745Sheppo 
26961ae08745Sheppo 	while ((port = plist->head) != NULL) {
26971ae08745Sheppo 		if (vsw_plist_del_node(vswp, port)) {
26981ae08745Sheppo 			DERR(vswp, "%s: Error deleting port %d"
26991ae08745Sheppo 				" from port list", __func__,
27001ae08745Sheppo 				port->p_instance);
27011ae08745Sheppo 			RW_EXIT(&plist->lockrw);
27021ae08745Sheppo 			return (1);
27031ae08745Sheppo 		}
27041ae08745Sheppo 
2705e1ebb9ecSlm66018 		/* Remove address if was programmed into HW. */
2706e1ebb9ecSlm66018 		(void) vsw_unset_hw(vswp, port);
2707e1ebb9ecSlm66018 
27081ae08745Sheppo 		/* Remove the fdb entry for this port/mac address */
27091ae08745Sheppo 		(void) vsw_del_fdb(vswp, port);
27101ae08745Sheppo 
27111ae08745Sheppo 		/* Remove any multicast addresses.. */
27121ae08745Sheppo 		vsw_del_mcst_port(port);
27131ae08745Sheppo 
27141ae08745Sheppo 		/*
27151ae08745Sheppo 		 * No longer need to hold the lock on the port list
27161ae08745Sheppo 		 * now that we have unlinked the target port from the
27171ae08745Sheppo 		 * list.
27181ae08745Sheppo 		 */
27191ae08745Sheppo 		RW_EXIT(&plist->lockrw);
27201ae08745Sheppo 		if (vsw_port_delete(port)) {
27211ae08745Sheppo 			DERR(vswp, "%s: Error deleting port %d",
27221ae08745Sheppo 				__func__, port->p_instance);
27231ae08745Sheppo 			return (1);
27241ae08745Sheppo 		}
27251ae08745Sheppo 		WRITE_ENTER(&plist->lockrw);
27261ae08745Sheppo 	}
27271ae08745Sheppo 	RW_EXIT(&plist->lockrw);
27281ae08745Sheppo 
27291ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
27301ae08745Sheppo 
27311ae08745Sheppo 	return (0);
27321ae08745Sheppo }
27331ae08745Sheppo 
27341ae08745Sheppo /*
27351ae08745Sheppo  * Delete the specified port.
27361ae08745Sheppo  *
27371ae08745Sheppo  * Returns 0 on success, 1 on failure.
27381ae08745Sheppo  */
27391ae08745Sheppo static int
27401ae08745Sheppo vsw_port_delete(vsw_port_t *port)
27411ae08745Sheppo {
27421ae08745Sheppo 	vsw_ldc_list_t 		*ldcl;
27431ae08745Sheppo 	vsw_t			*vswp = port->p_vswp;
27441ae08745Sheppo 
27451ae08745Sheppo 	D1(vswp, "%s: enter : port id %d", __func__, port->p_instance);
27461ae08745Sheppo 
27471ae08745Sheppo 	(void) vsw_uninit_ldcs(port);
27481ae08745Sheppo 
27491ae08745Sheppo 	/*
27501ae08745Sheppo 	 * Wait for any pending ctrl msg tasks which reference this
27511ae08745Sheppo 	 * port to finish.
27521ae08745Sheppo 	 */
27531ae08745Sheppo 	if (vsw_drain_port_taskq(port))
27541ae08745Sheppo 		return (1);
27551ae08745Sheppo 
27561ae08745Sheppo 	/*
27571ae08745Sheppo 	 * Wait for port reference count to hit zero.
27581ae08745Sheppo 	 */
27591ae08745Sheppo 	mutex_enter(&port->ref_lock);
27601ae08745Sheppo 	while (port->ref_cnt != 0)
27611ae08745Sheppo 		cv_wait(&port->ref_cv, &port->ref_lock);
27621ae08745Sheppo 	mutex_exit(&port->ref_lock);
27631ae08745Sheppo 
27641ae08745Sheppo 	/*
27651ae08745Sheppo 	 * Wait for any active callbacks to finish
27661ae08745Sheppo 	 */
27671ae08745Sheppo 	if (vsw_drain_ldcs(port))
27681ae08745Sheppo 		return (1);
27691ae08745Sheppo 
27701ae08745Sheppo 	ldcl = &port->p_ldclist;
27711ae08745Sheppo 	WRITE_ENTER(&ldcl->lockrw);
27721ae08745Sheppo 	while (ldcl->num_ldcs > 0) {
27731ae08745Sheppo 		if (vsw_ldc_detach(port, ldcl->head->ldc_id) != 0) {;
27741ae08745Sheppo 			cmn_err(CE_WARN, "unable to detach ldc %ld",
27751ae08745Sheppo 					ldcl->head->ldc_id);
27761ae08745Sheppo 			RW_EXIT(&ldcl->lockrw);
27771ae08745Sheppo 			return (1);
27781ae08745Sheppo 		}
27791ae08745Sheppo 	}
27801ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
27811ae08745Sheppo 
27821ae08745Sheppo 	rw_destroy(&port->p_ldclist.lockrw);
27831ae08745Sheppo 
27841ae08745Sheppo 	mutex_destroy(&port->mca_lock);
27851ae08745Sheppo 	mutex_destroy(&port->tx_lock);
27861ae08745Sheppo 	cv_destroy(&port->ref_cv);
27871ae08745Sheppo 	mutex_destroy(&port->ref_lock);
27881ae08745Sheppo 
27891ae08745Sheppo 	cv_destroy(&port->state_cv);
27901ae08745Sheppo 	mutex_destroy(&port->state_lock);
27911ae08745Sheppo 
27921ae08745Sheppo 	kmem_free(port, sizeof (vsw_port_t));
27931ae08745Sheppo 
27941ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
27951ae08745Sheppo 
27961ae08745Sheppo 	return (0);
27971ae08745Sheppo }
27981ae08745Sheppo 
27991ae08745Sheppo /*
28001ae08745Sheppo  * Attach a logical domain channel (ldc) under a specified port.
28011ae08745Sheppo  *
28021ae08745Sheppo  * Returns 0 on success, 1 on failure.
28031ae08745Sheppo  */
28041ae08745Sheppo static int
28051ae08745Sheppo vsw_ldc_attach(vsw_port_t *port, uint64_t ldc_id)
28061ae08745Sheppo {
28071ae08745Sheppo 	vsw_t 		*vswp = port->p_vswp;
28081ae08745Sheppo 	vsw_ldc_list_t *ldcl = &port->p_ldclist;
28091ae08745Sheppo 	vsw_ldc_t 	*ldcp = NULL;
28101ae08745Sheppo 	ldc_attr_t 	attr;
28111ae08745Sheppo 	ldc_status_t	istatus;
28121ae08745Sheppo 	int 		status = DDI_FAILURE;
2813d10e4ef2Snarayan 	int		rv;
2814*3af08d82Slm66018 	enum		{ PROG_init = 0x0, PROG_mblks = 0x1,
2815*3af08d82Slm66018 				PROG_callback = 0x2}
2816*3af08d82Slm66018 			progress;
2817*3af08d82Slm66018 
2818*3af08d82Slm66018 	progress = PROG_init;
28191ae08745Sheppo 
28201ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
28211ae08745Sheppo 
28221ae08745Sheppo 	ldcp = kmem_zalloc(sizeof (vsw_ldc_t), KM_NOSLEEP);
28231ae08745Sheppo 	if (ldcp == NULL) {
28241ae08745Sheppo 		DERR(vswp, "%s: kmem_zalloc failed", __func__);
28251ae08745Sheppo 		return (1);
28261ae08745Sheppo 	}
28271ae08745Sheppo 	ldcp->ldc_id = ldc_id;
28281ae08745Sheppo 
2829d10e4ef2Snarayan 	/* allocate pool of receive mblks */
2830d10e4ef2Snarayan 	rv = vio_create_mblks(vsw_num_mblks, vsw_mblk_size, &(ldcp->rxh));
2831d10e4ef2Snarayan 	if (rv) {
2832d10e4ef2Snarayan 		DWARN(vswp, "%s: unable to create free mblk pool for"
2833d10e4ef2Snarayan 			" channel %ld (rv %d)", __func__, ldc_id, rv);
2834d10e4ef2Snarayan 		kmem_free(ldcp, sizeof (vsw_ldc_t));
2835d10e4ef2Snarayan 		return (1);
2836d10e4ef2Snarayan 	}
2837d10e4ef2Snarayan 
2838*3af08d82Slm66018 	progress |= PROG_mblks;
2839*3af08d82Slm66018 
28401ae08745Sheppo 	mutex_init(&ldcp->ldc_txlock, NULL, MUTEX_DRIVER, NULL);
28411ae08745Sheppo 	mutex_init(&ldcp->ldc_cblock, NULL, MUTEX_DRIVER, NULL);
28421ae08745Sheppo 	mutex_init(&ldcp->drain_cv_lock, NULL, MUTEX_DRIVER, NULL);
28431ae08745Sheppo 	cv_init(&ldcp->drain_cv, NULL, CV_DRIVER, NULL);
28441ae08745Sheppo 
28451ae08745Sheppo 	/* required for handshake with peer */
28461ae08745Sheppo 	ldcp->local_session = (uint64_t)ddi_get_lbolt();
28471ae08745Sheppo 	ldcp->peer_session = 0;
28481ae08745Sheppo 	ldcp->session_status = 0;
28491ae08745Sheppo 
28501ae08745Sheppo 	mutex_init(&ldcp->hss_lock, NULL, MUTEX_DRIVER, NULL);
28511ae08745Sheppo 	ldcp->hss_id = 1;	/* Initial handshake session id */
28521ae08745Sheppo 
28531ae08745Sheppo 	/* only set for outbound lane, inbound set by peer */
2854d10e4ef2Snarayan 	mutex_init(&ldcp->lane_in.seq_lock, NULL, MUTEX_DRIVER, NULL);
2855d10e4ef2Snarayan 	mutex_init(&ldcp->lane_out.seq_lock, NULL, MUTEX_DRIVER, NULL);
28561ae08745Sheppo 	vsw_set_lane_attr(vswp, &ldcp->lane_out);
28571ae08745Sheppo 
28581ae08745Sheppo 	attr.devclass = LDC_DEV_NT_SVC;
28591ae08745Sheppo 	attr.instance = ddi_get_instance(vswp->dip);
28601ae08745Sheppo 	attr.mode = LDC_MODE_UNRELIABLE;
2861e1ebb9ecSlm66018 	attr.mtu = VSW_LDC_MTU;
28621ae08745Sheppo 	status = ldc_init(ldc_id, &attr, &ldcp->ldc_handle);
28631ae08745Sheppo 	if (status != 0) {
28641ae08745Sheppo 		DERR(vswp, "%s(%lld): ldc_init failed, rv (%d)",
28651ae08745Sheppo 		    __func__, ldc_id, status);
2866d10e4ef2Snarayan 		goto ldc_attach_fail;
28671ae08745Sheppo 	}
28681ae08745Sheppo 
28691ae08745Sheppo 	status = ldc_reg_callback(ldcp->ldc_handle, vsw_ldc_cb, (caddr_t)ldcp);
28701ae08745Sheppo 	if (status != 0) {
28711ae08745Sheppo 		DERR(vswp, "%s(%lld): ldc_reg_callback failed, rv (%d)",
28721ae08745Sheppo 		    __func__, ldc_id, status);
28731ae08745Sheppo 		(void) ldc_fini(ldcp->ldc_handle);
2874d10e4ef2Snarayan 		goto ldc_attach_fail;
28751ae08745Sheppo 	}
28761ae08745Sheppo 
2877*3af08d82Slm66018 	progress |= PROG_callback;
2878*3af08d82Slm66018 
2879*3af08d82Slm66018 	mutex_init(&ldcp->status_lock, NULL, MUTEX_DRIVER, NULL);
28801ae08745Sheppo 
28811ae08745Sheppo 	if (ldc_status(ldcp->ldc_handle, &istatus) != 0) {
28821ae08745Sheppo 		DERR(vswp, "%s: ldc_status failed", __func__);
2883*3af08d82Slm66018 		mutex_destroy(&ldcp->status_lock);
2884*3af08d82Slm66018 		goto ldc_attach_fail;
28851ae08745Sheppo 	}
28861ae08745Sheppo 
28871ae08745Sheppo 	ldcp->ldc_status = istatus;
28881ae08745Sheppo 	ldcp->ldc_port = port;
28891ae08745Sheppo 	ldcp->ldc_vswp = vswp;
28901ae08745Sheppo 
28911ae08745Sheppo 	/* link it into the list of channels for this port */
28921ae08745Sheppo 	WRITE_ENTER(&ldcl->lockrw);
28931ae08745Sheppo 	ldcp->ldc_next = ldcl->head;
28941ae08745Sheppo 	ldcl->head = ldcp;
28951ae08745Sheppo 	ldcl->num_ldcs++;
28961ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
28971ae08745Sheppo 
28981ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
28991ae08745Sheppo 	return (0);
2900d10e4ef2Snarayan 
2901d10e4ef2Snarayan ldc_attach_fail:
2902d10e4ef2Snarayan 	mutex_destroy(&ldcp->ldc_txlock);
2903d10e4ef2Snarayan 	mutex_destroy(&ldcp->ldc_cblock);
2904d10e4ef2Snarayan 
2905d10e4ef2Snarayan 	cv_destroy(&ldcp->drain_cv);
2906d10e4ef2Snarayan 
2907*3af08d82Slm66018 	if (progress & PROG_callback) {
2908*3af08d82Slm66018 		(void) ldc_unreg_callback(ldcp->ldc_handle);
2909*3af08d82Slm66018 	}
2910*3af08d82Slm66018 
2911*3af08d82Slm66018 	if ((progress & PROG_mblks) && (ldcp->rxh != NULL)) {
2912d10e4ef2Snarayan 		if (vio_destroy_mblks(ldcp->rxh) != 0) {
2913d10e4ef2Snarayan 			/*
2914d10e4ef2Snarayan 			 * Something odd has happened, as the destroy
2915d10e4ef2Snarayan 			 * will only fail if some mblks have been allocated
2916d10e4ef2Snarayan 			 * from the pool already (which shouldn't happen)
2917d10e4ef2Snarayan 			 * and have not been returned.
2918d10e4ef2Snarayan 			 *
2919d10e4ef2Snarayan 			 * Add the pool pointer to a list maintained in
2920d10e4ef2Snarayan 			 * the device instance. Another attempt will be made
2921d10e4ef2Snarayan 			 * to free the pool when the device itself detaches.
2922d10e4ef2Snarayan 			 */
2923d10e4ef2Snarayan 			cmn_err(CE_WARN, "Creation of ldc channel %ld failed"
2924d10e4ef2Snarayan 				" and cannot destroy associated mblk pool",
2925d10e4ef2Snarayan 				ldc_id);
2926d10e4ef2Snarayan 			ldcp->rxh->nextp =  vswp->rxh;
2927d10e4ef2Snarayan 			vswp->rxh = ldcp->rxh;
2928d10e4ef2Snarayan 		}
2929d10e4ef2Snarayan 	}
2930d10e4ef2Snarayan 	mutex_destroy(&ldcp->drain_cv_lock);
2931d10e4ef2Snarayan 	mutex_destroy(&ldcp->hss_lock);
2932d10e4ef2Snarayan 
2933d10e4ef2Snarayan 	mutex_destroy(&ldcp->lane_in.seq_lock);
2934d10e4ef2Snarayan 	mutex_destroy(&ldcp->lane_out.seq_lock);
2935d10e4ef2Snarayan 	kmem_free(ldcp, sizeof (vsw_ldc_t));
2936d10e4ef2Snarayan 
2937d10e4ef2Snarayan 	return (1);
29381ae08745Sheppo }
29391ae08745Sheppo 
29401ae08745Sheppo /*
29411ae08745Sheppo  * Detach a logical domain channel (ldc) belonging to a
29421ae08745Sheppo  * particular port.
29431ae08745Sheppo  *
29441ae08745Sheppo  * Returns 0 on success, 1 on failure.
29451ae08745Sheppo  */
29461ae08745Sheppo static int
29471ae08745Sheppo vsw_ldc_detach(vsw_port_t *port, uint64_t ldc_id)
29481ae08745Sheppo {
29491ae08745Sheppo 	vsw_t 		*vswp = port->p_vswp;
29501ae08745Sheppo 	vsw_ldc_t 	*ldcp, *prev_ldcp;
29511ae08745Sheppo 	vsw_ldc_list_t	*ldcl = &port->p_ldclist;
29521ae08745Sheppo 	int 		rv;
29531ae08745Sheppo 
29541ae08745Sheppo 	prev_ldcp = ldcl->head;
29551ae08745Sheppo 	for (; (ldcp = prev_ldcp) != NULL; prev_ldcp = ldcp->ldc_next) {
29561ae08745Sheppo 		if (ldcp->ldc_id == ldc_id) {
29571ae08745Sheppo 			break;
29581ae08745Sheppo 		}
29591ae08745Sheppo 	}
29601ae08745Sheppo 
29611ae08745Sheppo 	/* specified ldc id not found */
29621ae08745Sheppo 	if (ldcp == NULL) {
29631ae08745Sheppo 		DERR(vswp, "%s: ldcp = NULL", __func__);
29641ae08745Sheppo 		return (1);
29651ae08745Sheppo 	}
29661ae08745Sheppo 
29671ae08745Sheppo 	D2(vswp, "%s: detaching channel %lld", __func__, ldcp->ldc_id);
29681ae08745Sheppo 
29691ae08745Sheppo 	/*
29701ae08745Sheppo 	 * Before we can close the channel we must release any mapped
29711ae08745Sheppo 	 * resources (e.g. drings).
29721ae08745Sheppo 	 */
29731ae08745Sheppo 	vsw_free_lane_resources(ldcp, INBOUND);
29741ae08745Sheppo 	vsw_free_lane_resources(ldcp, OUTBOUND);
29751ae08745Sheppo 
29761ae08745Sheppo 	/*
29771ae08745Sheppo 	 * If the close fails we are in serious trouble, as won't
29781ae08745Sheppo 	 * be able to delete the parent port.
29791ae08745Sheppo 	 */
29801ae08745Sheppo 	if ((rv = ldc_close(ldcp->ldc_handle)) != 0) {
29811ae08745Sheppo 		DERR(vswp, "%s: error %d closing channel %lld",
29821ae08745Sheppo 			__func__, rv, ldcp->ldc_id);
29831ae08745Sheppo 		return (1);
29841ae08745Sheppo 	}
29851ae08745Sheppo 
29861ae08745Sheppo 	(void) ldc_fini(ldcp->ldc_handle);
29871ae08745Sheppo 
29881ae08745Sheppo 	ldcp->ldc_status = LDC_INIT;
29891ae08745Sheppo 	ldcp->ldc_handle = NULL;
29901ae08745Sheppo 	ldcp->ldc_vswp = NULL;
2991d10e4ef2Snarayan 
2992d10e4ef2Snarayan 	if (ldcp->rxh != NULL) {
2993d10e4ef2Snarayan 		if (vio_destroy_mblks(ldcp->rxh)) {
2994d10e4ef2Snarayan 			/*
2995d10e4ef2Snarayan 			 * Mostly likely some mblks are still in use and
2996d10e4ef2Snarayan 			 * have not been returned to the pool. Add the pool
2997d10e4ef2Snarayan 			 * to the list maintained in the device instance.
2998d10e4ef2Snarayan 			 * Another attempt will be made to destroy the pool
2999d10e4ef2Snarayan 			 * when the device detaches.
3000d10e4ef2Snarayan 			 */
3001d10e4ef2Snarayan 			ldcp->rxh->nextp =  vswp->rxh;
3002d10e4ef2Snarayan 			vswp->rxh = ldcp->rxh;
3003d10e4ef2Snarayan 		}
3004d10e4ef2Snarayan 	}
3005d10e4ef2Snarayan 
3006*3af08d82Slm66018 	/* unlink it from the list */
3007*3af08d82Slm66018 	prev_ldcp = ldcp->ldc_next;
3008*3af08d82Slm66018 	ldcl->num_ldcs--;
3009*3af08d82Slm66018 
30101ae08745Sheppo 	mutex_destroy(&ldcp->ldc_txlock);
30111ae08745Sheppo 	mutex_destroy(&ldcp->ldc_cblock);
30121ae08745Sheppo 	cv_destroy(&ldcp->drain_cv);
30131ae08745Sheppo 	mutex_destroy(&ldcp->drain_cv_lock);
30141ae08745Sheppo 	mutex_destroy(&ldcp->hss_lock);
3015d10e4ef2Snarayan 	mutex_destroy(&ldcp->lane_in.seq_lock);
3016d10e4ef2Snarayan 	mutex_destroy(&ldcp->lane_out.seq_lock);
3017*3af08d82Slm66018 	mutex_destroy(&ldcp->status_lock);
30181ae08745Sheppo 
30191ae08745Sheppo 	kmem_free(ldcp, sizeof (vsw_ldc_t));
30201ae08745Sheppo 
30211ae08745Sheppo 	return (0);
30221ae08745Sheppo }
30231ae08745Sheppo 
30241ae08745Sheppo /*
30251ae08745Sheppo  * Open and attempt to bring up the channel. Note that channel
30261ae08745Sheppo  * can only be brought up if peer has also opened channel.
30271ae08745Sheppo  *
30281ae08745Sheppo  * Returns 0 if can open and bring up channel, otherwise
30291ae08745Sheppo  * returns 1.
30301ae08745Sheppo  */
30311ae08745Sheppo static int
30321ae08745Sheppo vsw_ldc_init(vsw_ldc_t *ldcp)
30331ae08745Sheppo {
30341ae08745Sheppo 	vsw_t 		*vswp = ldcp->ldc_vswp;
30351ae08745Sheppo 	ldc_status_t	istatus = 0;
30361ae08745Sheppo 	int		rv;
30371ae08745Sheppo 
30381ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
30391ae08745Sheppo 
30401ae08745Sheppo 	LDC_ENTER_LOCK(ldcp);
30411ae08745Sheppo 
30421ae08745Sheppo 	/* don't start at 0 in case clients don't like that */
30431ae08745Sheppo 	ldcp->next_ident = 1;
30441ae08745Sheppo 
30451ae08745Sheppo 	rv = ldc_open(ldcp->ldc_handle);
30461ae08745Sheppo 	if (rv != 0) {
30471ae08745Sheppo 		DERR(vswp, "%s: ldc_open failed: id(%lld) rv(%d)",
30481ae08745Sheppo 		    __func__, ldcp->ldc_id, rv);
30491ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
30501ae08745Sheppo 		return (1);
30511ae08745Sheppo 	}
30521ae08745Sheppo 
30531ae08745Sheppo 	if (ldc_status(ldcp->ldc_handle, &istatus) != 0) {
30541ae08745Sheppo 		DERR(vswp, "%s: unable to get status", __func__);
30551ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
30561ae08745Sheppo 		return (1);
30571ae08745Sheppo 
30581ae08745Sheppo 	} else if (istatus != LDC_OPEN && istatus != LDC_READY) {
30591ae08745Sheppo 		DERR(vswp, "%s: id (%lld) status(%d) is not OPEN/READY",
30601ae08745Sheppo 		    __func__, ldcp->ldc_id, istatus);
30611ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
30621ae08745Sheppo 		return (1);
30631ae08745Sheppo 	}
30641ae08745Sheppo 
3065*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
30661ae08745Sheppo 	ldcp->ldc_status = istatus;
3067*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
3068*3af08d82Slm66018 
30691ae08745Sheppo 	rv = ldc_up(ldcp->ldc_handle);
30701ae08745Sheppo 	if (rv != 0) {
30711ae08745Sheppo 		/*
30721ae08745Sheppo 		 * Not a fatal error for ldc_up() to fail, as peer
30731ae08745Sheppo 		 * end point may simply not be ready yet.
30741ae08745Sheppo 		 */
30751ae08745Sheppo 		D2(vswp, "%s: ldc_up err id(%lld) rv(%d)", __func__,
30761ae08745Sheppo 			ldcp->ldc_id, rv);
30771ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
30781ae08745Sheppo 		return (1);
30791ae08745Sheppo 	}
30801ae08745Sheppo 
30811ae08745Sheppo 	/*
30821ae08745Sheppo 	 * ldc_up() call is non-blocking so need to explicitly
30831ae08745Sheppo 	 * check channel status to see if in fact the channel
30841ae08745Sheppo 	 * is UP.
30851ae08745Sheppo 	 */
3086*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
3087*3af08d82Slm66018 	istatus = ldcp->ldc_status;
3088*3af08d82Slm66018 	if (ldc_status(ldcp->ldc_handle, &ldcp->ldc_status) != 0) {
30891ae08745Sheppo 		DERR(vswp, "%s: unable to get status", __func__);
3090*3af08d82Slm66018 		mutex_exit(&ldcp->status_lock);
30911ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
30921ae08745Sheppo 		return (1);
30931ae08745Sheppo 
30941ae08745Sheppo 	}
3095*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
30961ae08745Sheppo 	LDC_EXIT_LOCK(ldcp);
30971ae08745Sheppo 
3098*3af08d82Slm66018 	if ((istatus != LDC_UP) && (ldcp->ldc_status == LDC_UP)) {
3099*3af08d82Slm66018 		D2(vswp, "%s: channel %ld now UP (%ld)", __func__,
3100*3af08d82Slm66018 			ldcp->ldc_id, istatus);
3101*3af08d82Slm66018 		vsw_restart_handshake(ldcp);
3102*3af08d82Slm66018 	}
3103*3af08d82Slm66018 
31041ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
31051ae08745Sheppo 	return (0);
31061ae08745Sheppo }
31071ae08745Sheppo 
31081ae08745Sheppo /* disable callbacks on the channel */
31091ae08745Sheppo static int
31101ae08745Sheppo vsw_ldc_uninit(vsw_ldc_t *ldcp)
31111ae08745Sheppo {
31121ae08745Sheppo 	vsw_t	*vswp = ldcp->ldc_vswp;
31131ae08745Sheppo 	int	rv;
31141ae08745Sheppo 
31151ae08745Sheppo 	D1(vswp, "vsw_ldc_uninit: enter: id(%lx)\n", ldcp->ldc_id);
31161ae08745Sheppo 
31171ae08745Sheppo 	LDC_ENTER_LOCK(ldcp);
31181ae08745Sheppo 
31191ae08745Sheppo 	rv = ldc_set_cb_mode(ldcp->ldc_handle, LDC_CB_DISABLE);
31201ae08745Sheppo 	if (rv != 0) {
31211ae08745Sheppo 		DERR(vswp, "vsw_ldc_uninit(%lld): error disabling "
31221ae08745Sheppo 			"interrupts (rv = %d)\n", ldcp->ldc_id, rv);
31231ae08745Sheppo 		LDC_EXIT_LOCK(ldcp);
31241ae08745Sheppo 		return (1);
31251ae08745Sheppo 	}
31261ae08745Sheppo 
3127*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
31281ae08745Sheppo 	ldcp->ldc_status = LDC_INIT;
3129*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
31301ae08745Sheppo 
31311ae08745Sheppo 	LDC_EXIT_LOCK(ldcp);
31321ae08745Sheppo 
31331ae08745Sheppo 	D1(vswp, "vsw_ldc_uninit: exit: id(%lx)", ldcp->ldc_id);
31341ae08745Sheppo 
31351ae08745Sheppo 	return (0);
31361ae08745Sheppo }
31371ae08745Sheppo 
31381ae08745Sheppo static int
31391ae08745Sheppo vsw_init_ldcs(vsw_port_t *port)
31401ae08745Sheppo {
31411ae08745Sheppo 	vsw_ldc_list_t	*ldcl = &port->p_ldclist;
31421ae08745Sheppo 	vsw_ldc_t	*ldcp;
31431ae08745Sheppo 
31441ae08745Sheppo 	READ_ENTER(&ldcl->lockrw);
31451ae08745Sheppo 	ldcp =  ldcl->head;
31461ae08745Sheppo 	for (; ldcp  != NULL; ldcp = ldcp->ldc_next) {
31471ae08745Sheppo 		(void) vsw_ldc_init(ldcp);
31481ae08745Sheppo 	}
31491ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
31501ae08745Sheppo 
31511ae08745Sheppo 	return (0);
31521ae08745Sheppo }
31531ae08745Sheppo 
31541ae08745Sheppo static int
31551ae08745Sheppo vsw_uninit_ldcs(vsw_port_t *port)
31561ae08745Sheppo {
31571ae08745Sheppo 	vsw_ldc_list_t	*ldcl = &port->p_ldclist;
31581ae08745Sheppo 	vsw_ldc_t	*ldcp;
31591ae08745Sheppo 
31601ae08745Sheppo 	D1(NULL, "vsw_uninit_ldcs: enter\n");
31611ae08745Sheppo 
31621ae08745Sheppo 	READ_ENTER(&ldcl->lockrw);
31631ae08745Sheppo 	ldcp =  ldcl->head;
31641ae08745Sheppo 	for (; ldcp  != NULL; ldcp = ldcp->ldc_next) {
31651ae08745Sheppo 		(void) vsw_ldc_uninit(ldcp);
31661ae08745Sheppo 	}
31671ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
31681ae08745Sheppo 
31691ae08745Sheppo 	D1(NULL, "vsw_uninit_ldcs: exit\n");
31701ae08745Sheppo 
31711ae08745Sheppo 	return (0);
31721ae08745Sheppo }
31731ae08745Sheppo 
31741ae08745Sheppo /*
31751ae08745Sheppo  * Wait until the callback(s) associated with the ldcs under the specified
31761ae08745Sheppo  * port have completed.
31771ae08745Sheppo  *
31781ae08745Sheppo  * Prior to this function being invoked each channel under this port
31791ae08745Sheppo  * should have been quiesced via ldc_set_cb_mode(DISABLE).
31801ae08745Sheppo  *
31811ae08745Sheppo  * A short explaination of what we are doing below..
31821ae08745Sheppo  *
31831ae08745Sheppo  * The simplest approach would be to have a reference counter in
31841ae08745Sheppo  * the ldc structure which is increment/decremented by the callbacks as
31851ae08745Sheppo  * they use the channel. The drain function could then simply disable any
31861ae08745Sheppo  * further callbacks and do a cv_wait for the ref to hit zero. Unfortunately
31871ae08745Sheppo  * there is a tiny window here - before the callback is able to get the lock
31881ae08745Sheppo  * on the channel it is interrupted and this function gets to execute. It
31891ae08745Sheppo  * sees that the ref count is zero and believes its free to delete the
31901ae08745Sheppo  * associated data structures.
31911ae08745Sheppo  *
31921ae08745Sheppo  * We get around this by taking advantage of the fact that before the ldc
31931ae08745Sheppo  * framework invokes a callback it sets a flag to indicate that there is a
31941ae08745Sheppo  * callback active (or about to become active). If when we attempt to
31951ae08745Sheppo  * unregister a callback when this active flag is set then the unregister
31961ae08745Sheppo  * will fail with EWOULDBLOCK.
31971ae08745Sheppo  *
31981ae08745Sheppo  * If the unregister fails we do a cv_timedwait. We will either be signaled
31991ae08745Sheppo  * by the callback as it is exiting (note we have to wait a short period to
32001ae08745Sheppo  * allow the callback to return fully to the ldc framework and it to clear
32011ae08745Sheppo  * the active flag), or by the timer expiring. In either case we again attempt
32021ae08745Sheppo  * the unregister. We repeat this until we can succesfully unregister the
32031ae08745Sheppo  * callback.
32041ae08745Sheppo  *
32051ae08745Sheppo  * The reason we use a cv_timedwait rather than a simple cv_wait is to catch
32061ae08745Sheppo  * the case where the callback has finished but the ldc framework has not yet
32071ae08745Sheppo  * cleared the active flag. In this case we would never get a cv_signal.
32081ae08745Sheppo  */
32091ae08745Sheppo static int
32101ae08745Sheppo vsw_drain_ldcs(vsw_port_t *port)
32111ae08745Sheppo {
32121ae08745Sheppo 	vsw_ldc_list_t	*ldcl = &port->p_ldclist;
32131ae08745Sheppo 	vsw_ldc_t	*ldcp;
32141ae08745Sheppo 	vsw_t		*vswp = port->p_vswp;
32151ae08745Sheppo 
32161ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
32171ae08745Sheppo 
32181ae08745Sheppo 	READ_ENTER(&ldcl->lockrw);
32191ae08745Sheppo 
32201ae08745Sheppo 	ldcp = ldcl->head;
32211ae08745Sheppo 
32221ae08745Sheppo 	for (; ldcp  != NULL; ldcp = ldcp->ldc_next) {
32231ae08745Sheppo 		/*
32241ae08745Sheppo 		 * If we can unregister the channel callback then we
32251ae08745Sheppo 		 * know that there is no callback either running or
32261ae08745Sheppo 		 * scheduled to run for this channel so move on to next
32271ae08745Sheppo 		 * channel in the list.
32281ae08745Sheppo 		 */
32291ae08745Sheppo 		mutex_enter(&ldcp->drain_cv_lock);
32301ae08745Sheppo 
32311ae08745Sheppo 		/* prompt active callbacks to quit */
32321ae08745Sheppo 		ldcp->drain_state = VSW_LDC_DRAINING;
32331ae08745Sheppo 
32341ae08745Sheppo 		if ((ldc_unreg_callback(ldcp->ldc_handle)) == 0) {
32351ae08745Sheppo 			D2(vswp, "%s: unreg callback for chan %ld", __func__,
32361ae08745Sheppo 				ldcp->ldc_id);
32371ae08745Sheppo 			mutex_exit(&ldcp->drain_cv_lock);
32381ae08745Sheppo 			continue;
32391ae08745Sheppo 		} else {
32401ae08745Sheppo 			/*
32411ae08745Sheppo 			 * If we end up here we know that either 1) a callback
32421ae08745Sheppo 			 * is currently executing, 2) is about to start (i.e.
32431ae08745Sheppo 			 * the ldc framework has set the active flag but
32441ae08745Sheppo 			 * has not actually invoked the callback yet, or 3)
32451ae08745Sheppo 			 * has finished and has returned to the ldc framework
32461ae08745Sheppo 			 * but the ldc framework has not yet cleared the
32471ae08745Sheppo 			 * active bit.
32481ae08745Sheppo 			 *
32491ae08745Sheppo 			 * Wait for it to finish.
32501ae08745Sheppo 			 */
32511ae08745Sheppo 			while (ldc_unreg_callback(ldcp->ldc_handle)
32521ae08745Sheppo 								== EWOULDBLOCK)
32531ae08745Sheppo 				(void) cv_timedwait(&ldcp->drain_cv,
32541ae08745Sheppo 					&ldcp->drain_cv_lock, lbolt + hz);
32551ae08745Sheppo 
32561ae08745Sheppo 			mutex_exit(&ldcp->drain_cv_lock);
32571ae08745Sheppo 			D2(vswp, "%s: unreg callback for chan %ld after "
32581ae08745Sheppo 				"timeout", __func__, ldcp->ldc_id);
32591ae08745Sheppo 		}
32601ae08745Sheppo 	}
32611ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
32621ae08745Sheppo 
32631ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
32641ae08745Sheppo 	return (0);
32651ae08745Sheppo }
32661ae08745Sheppo 
32671ae08745Sheppo /*
32681ae08745Sheppo  * Wait until all tasks which reference this port have completed.
32691ae08745Sheppo  *
32701ae08745Sheppo  * Prior to this function being invoked each channel under this port
32711ae08745Sheppo  * should have been quiesced via ldc_set_cb_mode(DISABLE).
32721ae08745Sheppo  */
32731ae08745Sheppo static int
32741ae08745Sheppo vsw_drain_port_taskq(vsw_port_t *port)
32751ae08745Sheppo {
32761ae08745Sheppo 	vsw_t		*vswp = port->p_vswp;
32771ae08745Sheppo 
32781ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
32791ae08745Sheppo 
32801ae08745Sheppo 	/*
32811ae08745Sheppo 	 * Mark the port as in the process of being detached, and
32821ae08745Sheppo 	 * dispatch a marker task to the queue so we know when all
32831ae08745Sheppo 	 * relevant tasks have completed.
32841ae08745Sheppo 	 */
32851ae08745Sheppo 	mutex_enter(&port->state_lock);
32861ae08745Sheppo 	port->state = VSW_PORT_DETACHING;
32871ae08745Sheppo 
32881ae08745Sheppo 	if ((vswp->taskq_p == NULL) ||
32891ae08745Sheppo 		(ddi_taskq_dispatch(vswp->taskq_p, vsw_marker_task,
32901ae08745Sheppo 			port, DDI_NOSLEEP) != DDI_SUCCESS)) {
32911ae08745Sheppo 		DERR(vswp, "%s: unable to dispatch marker task",
32921ae08745Sheppo 			__func__);
32931ae08745Sheppo 		mutex_exit(&port->state_lock);
32941ae08745Sheppo 		return (1);
32951ae08745Sheppo 	}
32961ae08745Sheppo 
32971ae08745Sheppo 	/*
32981ae08745Sheppo 	 * Wait for the marker task to finish.
32991ae08745Sheppo 	 */
33001ae08745Sheppo 	while (port->state != VSW_PORT_DETACHABLE)
33011ae08745Sheppo 		cv_wait(&port->state_cv, &port->state_lock);
33021ae08745Sheppo 
33031ae08745Sheppo 	mutex_exit(&port->state_lock);
33041ae08745Sheppo 
33051ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
33061ae08745Sheppo 
33071ae08745Sheppo 	return (0);
33081ae08745Sheppo }
33091ae08745Sheppo 
33101ae08745Sheppo static void
33111ae08745Sheppo vsw_marker_task(void *arg)
33121ae08745Sheppo {
33131ae08745Sheppo 	vsw_port_t	*port = arg;
33141ae08745Sheppo 	vsw_t		*vswp = port->p_vswp;
33151ae08745Sheppo 
33161ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
33171ae08745Sheppo 
33181ae08745Sheppo 	mutex_enter(&port->state_lock);
33191ae08745Sheppo 
33201ae08745Sheppo 	/*
33211ae08745Sheppo 	 * No further tasks should be dispatched which reference
33221ae08745Sheppo 	 * this port so ok to mark it as safe to detach.
33231ae08745Sheppo 	 */
33241ae08745Sheppo 	port->state = VSW_PORT_DETACHABLE;
33251ae08745Sheppo 
33261ae08745Sheppo 	cv_signal(&port->state_cv);
33271ae08745Sheppo 
33281ae08745Sheppo 	mutex_exit(&port->state_lock);
33291ae08745Sheppo 
33301ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
33311ae08745Sheppo }
33321ae08745Sheppo 
33331ae08745Sheppo static vsw_port_t *
33341ae08745Sheppo vsw_lookup_port(vsw_t *vswp, int p_instance)
33351ae08745Sheppo {
33361ae08745Sheppo 	vsw_port_list_t *plist = &vswp->plist;
33371ae08745Sheppo 	vsw_port_t	*port;
33381ae08745Sheppo 
33391ae08745Sheppo 	for (port = plist->head; port != NULL; port = port->p_next) {
33401ae08745Sheppo 		if (port->p_instance == p_instance) {
33411ae08745Sheppo 			D2(vswp, "vsw_lookup_port: found p_instance\n");
33421ae08745Sheppo 			return (port);
33431ae08745Sheppo 		}
33441ae08745Sheppo 	}
33451ae08745Sheppo 
33461ae08745Sheppo 	return (NULL);
33471ae08745Sheppo }
33481ae08745Sheppo 
33491ae08745Sheppo /*
33501ae08745Sheppo  * Search for and remove the specified port from the port
33511ae08745Sheppo  * list. Returns 0 if able to locate and remove port, otherwise
33521ae08745Sheppo  * returns 1.
33531ae08745Sheppo  */
33541ae08745Sheppo static int
33551ae08745Sheppo vsw_plist_del_node(vsw_t *vswp, vsw_port_t *port)
33561ae08745Sheppo {
33571ae08745Sheppo 	vsw_port_list_t *plist = &vswp->plist;
33581ae08745Sheppo 	vsw_port_t	*curr_p, *prev_p;
33591ae08745Sheppo 
33601ae08745Sheppo 	if (plist->head == NULL)
33611ae08745Sheppo 		return (1);
33621ae08745Sheppo 
33631ae08745Sheppo 	curr_p = prev_p = plist->head;
33641ae08745Sheppo 
33651ae08745Sheppo 	while (curr_p != NULL) {
33661ae08745Sheppo 		if (curr_p == port) {
33671ae08745Sheppo 			if (prev_p == curr_p) {
33681ae08745Sheppo 				plist->head = curr_p->p_next;
33691ae08745Sheppo 			} else {
33701ae08745Sheppo 				prev_p->p_next = curr_p->p_next;
33711ae08745Sheppo 			}
33721ae08745Sheppo 			plist->num_ports--;
33731ae08745Sheppo 			break;
33741ae08745Sheppo 		} else {
33751ae08745Sheppo 			prev_p = curr_p;
33761ae08745Sheppo 			curr_p = curr_p->p_next;
33771ae08745Sheppo 		}
33781ae08745Sheppo 	}
33791ae08745Sheppo 	return (0);
33801ae08745Sheppo }
33811ae08745Sheppo 
33821ae08745Sheppo /*
33831ae08745Sheppo  * Interrupt handler for ldc messages.
33841ae08745Sheppo  */
33851ae08745Sheppo static uint_t
33861ae08745Sheppo vsw_ldc_cb(uint64_t event, caddr_t arg)
33871ae08745Sheppo {
33881ae08745Sheppo 	vsw_ldc_t	*ldcp = (vsw_ldc_t  *)arg;
33891ae08745Sheppo 	vsw_t 		*vswp = ldcp->ldc_vswp;
33901ae08745Sheppo 	ldc_status_t	lstatus;
33911ae08745Sheppo 	int		rv;
33921ae08745Sheppo 
33931ae08745Sheppo 	D1(vswp, "%s: enter: ldcid (%lld)\n", __func__, ldcp->ldc_id);
33941ae08745Sheppo 
33951ae08745Sheppo 	mutex_enter(&ldcp->ldc_cblock);
33961ae08745Sheppo 
33971ae08745Sheppo 	if ((ldcp->ldc_status == LDC_INIT) || (ldcp->ldc_handle == NULL)) {
33981ae08745Sheppo 		mutex_exit(&ldcp->ldc_cblock);
33991ae08745Sheppo 		return (LDC_SUCCESS);
34001ae08745Sheppo 	}
34011ae08745Sheppo 
3402*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
3403*3af08d82Slm66018 	lstatus = ldcp->ldc_status;
3404*3af08d82Slm66018 	rv = ldc_status(ldcp->ldc_handle, &ldcp->ldc_status);
3405*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
3406*3af08d82Slm66018 	if (rv != 0) {
3407*3af08d82Slm66018 		cmn_err(CE_WARN, "Unable to read channel state");
3408*3af08d82Slm66018 		goto vsw_cb_exit;
3409*3af08d82Slm66018 	}
3410*3af08d82Slm66018 
34111ae08745Sheppo 	if (event & LDC_EVT_UP) {
34121ae08745Sheppo 		/*
34131ae08745Sheppo 		 * Channel has come up, get the state and then start
34141ae08745Sheppo 		 * the handshake.
34151ae08745Sheppo 		 */
34161ae08745Sheppo 		D2(vswp, "%s: id(%ld) event(%llx) UP: status(%ld)",
3417*3af08d82Slm66018 			__func__, ldcp->ldc_id, event, lstatus);
3418*3af08d82Slm66018 		D2(vswp, "%s: UP: old status %ld : cur status %ld",
3419*3af08d82Slm66018 			__func__, lstatus, ldcp->ldc_status);
3420*3af08d82Slm66018 		if ((ldcp->ldc_status != lstatus) &&
3421*3af08d82Slm66018 					(ldcp->ldc_status == LDC_UP)) {
34221ae08745Sheppo 				vsw_restart_handshake(ldcp);
3423*3af08d82Slm66018 		}
34241ae08745Sheppo 
34251ae08745Sheppo 		ASSERT((event & (LDC_EVT_RESET | LDC_EVT_DOWN)) == 0);
34261ae08745Sheppo 	}
34271ae08745Sheppo 
34281ae08745Sheppo 	if (event & LDC_EVT_READ) {
34291ae08745Sheppo 		/*
34301ae08745Sheppo 		 * Data available for reading.
34311ae08745Sheppo 		 */
34321ae08745Sheppo 		D2(vswp, "%s: id(ld) event(%llx) data READ",
34331ae08745Sheppo 				__func__, ldcp->ldc_id, event);
34341ae08745Sheppo 
34351ae08745Sheppo 		vsw_process_pkt(ldcp);
34361ae08745Sheppo 
34371ae08745Sheppo 		ASSERT((event & (LDC_EVT_RESET | LDC_EVT_DOWN)) == 0);
34381ae08745Sheppo 
34391ae08745Sheppo 		goto vsw_cb_exit;
34401ae08745Sheppo 	}
34411ae08745Sheppo 
3442*3af08d82Slm66018 	if (event & (LDC_EVT_DOWN | LDC_EVT_RESET)) {
3443*3af08d82Slm66018 		D2(vswp, "%s: id(%ld) event(%llx) DOWN/RESET",
3444*3af08d82Slm66018 					__func__, ldcp->ldc_id, event);
3445*3af08d82Slm66018 
3446*3af08d82Slm66018 		/* attempt to restart the connection */
3447*3af08d82Slm66018 		vsw_restart_ldc(ldcp);
3448*3af08d82Slm66018 
3449*3af08d82Slm66018 		/*
3450*3af08d82Slm66018 		 * vsw_restart_ldc() will attempt to bring the channel
3451*3af08d82Slm66018 		 * back up. Check here to see if that succeeded.
3452*3af08d82Slm66018 		 */
3453*3af08d82Slm66018 		mutex_enter(&ldcp->status_lock);
3454*3af08d82Slm66018 		lstatus = ldcp->ldc_status;
3455*3af08d82Slm66018 		rv = ldc_status(ldcp->ldc_handle, &ldcp->ldc_status);
3456*3af08d82Slm66018 		mutex_exit(&ldcp->status_lock);
34571ae08745Sheppo 		if (rv != 0) {
3458*3af08d82Slm66018 			DERR(vswp, "%s: unable to read status for channel %ld",
3459*3af08d82Slm66018 				__func__, ldcp->ldc_id);
3460*3af08d82Slm66018 			goto vsw_cb_exit;
34611ae08745Sheppo 		}
34621ae08745Sheppo 
3463*3af08d82Slm66018 		D2(vswp, "%s: id(%ld) event(%llx) DOWN/RESET event:"
3464*3af08d82Slm66018 			" old status %ld : cur status %ld", __func__,
3465*3af08d82Slm66018 			ldcp->ldc_id, event, lstatus, ldcp->ldc_status);
3466*3af08d82Slm66018 
3467*3af08d82Slm66018 		/*
3468*3af08d82Slm66018 		 * If channel was not previously UP then (re)start the
3469*3af08d82Slm66018 		 * handshake.
3470*3af08d82Slm66018 		 */
3471*3af08d82Slm66018 		if ((ldcp->ldc_status == LDC_UP) && (lstatus != LDC_UP)) {
3472*3af08d82Slm66018 			D2(vswp, "%s: channel %ld now UP, restarting "
3473*3af08d82Slm66018 				"handshake", __func__, ldcp->ldc_id);
3474*3af08d82Slm66018 			vsw_restart_handshake(ldcp);
34751ae08745Sheppo 		}
34761ae08745Sheppo 	}
34771ae08745Sheppo 
34781ae08745Sheppo 	/*
34791ae08745Sheppo 	 * Catch either LDC_EVT_WRITE which we don't support or any
34801ae08745Sheppo 	 * unknown event.
34811ae08745Sheppo 	 */
34821ae08745Sheppo 	if (event & ~(LDC_EVT_UP | LDC_EVT_RESET
34831ae08745Sheppo 					| LDC_EVT_DOWN | LDC_EVT_READ)) {
34841ae08745Sheppo 
34851ae08745Sheppo 		DERR(vswp, "%s: id(%ld) Unexpected event=(%llx) status(%ld)",
34861ae08745Sheppo 			__func__, ldcp->ldc_id, event, ldcp->ldc_status);
34871ae08745Sheppo 	}
34881ae08745Sheppo 
34891ae08745Sheppo vsw_cb_exit:
34901ae08745Sheppo 	mutex_exit(&ldcp->ldc_cblock);
34911ae08745Sheppo 
34921ae08745Sheppo 	/*
34931ae08745Sheppo 	 * Let the drain function know we are finishing if it
34941ae08745Sheppo 	 * is waiting.
34951ae08745Sheppo 	 */
34961ae08745Sheppo 	mutex_enter(&ldcp->drain_cv_lock);
34971ae08745Sheppo 	if (ldcp->drain_state == VSW_LDC_DRAINING)
34981ae08745Sheppo 		cv_signal(&ldcp->drain_cv);
34991ae08745Sheppo 	mutex_exit(&ldcp->drain_cv_lock);
35001ae08745Sheppo 
35011ae08745Sheppo 	return (LDC_SUCCESS);
35021ae08745Sheppo }
35031ae08745Sheppo 
35041ae08745Sheppo /*
3505*3af08d82Slm66018  * Restart the connection with our peer. Free any existing
3506*3af08d82Slm66018  * data structures and then attempt to bring channel back
3507*3af08d82Slm66018  * up.
35081ae08745Sheppo  */
35091ae08745Sheppo static void
3510*3af08d82Slm66018 vsw_restart_ldc(vsw_ldc_t *ldcp)
35111ae08745Sheppo {
3512*3af08d82Slm66018 	int		rv;
35131ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
35141ae08745Sheppo 	vsw_port_t	*port;
35151ae08745Sheppo 	vsw_ldc_list_t	*ldcl;
35161ae08745Sheppo 
3517*3af08d82Slm66018 	D1(vswp, "%s: enter", __func__);
35181ae08745Sheppo 
35191ae08745Sheppo 	port = ldcp->ldc_port;
35201ae08745Sheppo 	ldcl = &port->p_ldclist;
35211ae08745Sheppo 
3522*3af08d82Slm66018 	READ_ENTER(&ldcl->lockrw);
35231ae08745Sheppo 
35241ae08745Sheppo 	D2(vswp, "%s: in 0x%llx : out 0x%llx", __func__,
35251ae08745Sheppo 		ldcp->lane_in.lstate, ldcp->lane_out.lstate);
35261ae08745Sheppo 
35271ae08745Sheppo 	vsw_free_lane_resources(ldcp, INBOUND);
35281ae08745Sheppo 	vsw_free_lane_resources(ldcp, OUTBOUND);
35291ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
35301ae08745Sheppo 
35311ae08745Sheppo 	ldcp->lane_in.lstate = 0;
35321ae08745Sheppo 	ldcp->lane_out.lstate = 0;
35331ae08745Sheppo 
35341ae08745Sheppo 	/*
35351ae08745Sheppo 	 * Remove parent port from any multicast groups
35361ae08745Sheppo 	 * it may have registered with. Client must resend
35371ae08745Sheppo 	 * multicast add command after handshake completes.
35381ae08745Sheppo 	 */
35391ae08745Sheppo 	(void) vsw_del_fdb(vswp, port);
35401ae08745Sheppo 
35411ae08745Sheppo 	vsw_del_mcst_port(port);
35421ae08745Sheppo 
35431ae08745Sheppo 	ldcp->peer_session = 0;
35441ae08745Sheppo 	ldcp->session_status = 0;
3545*3af08d82Slm66018 	ldcp->hcnt = 0;
3546*3af08d82Slm66018 	ldcp->hphase = VSW_MILESTONE0;
3547*3af08d82Slm66018 
3548*3af08d82Slm66018 	rv = ldc_up(ldcp->ldc_handle);
3549*3af08d82Slm66018 	if (rv != 0) {
3550*3af08d82Slm66018 		/*
3551*3af08d82Slm66018 		 * Not a fatal error for ldc_up() to fail, as peer
3552*3af08d82Slm66018 		 * end point may simply not be ready yet.
3553*3af08d82Slm66018 		 */
3554*3af08d82Slm66018 		D2(vswp, "%s: ldc_up err id(%lld) rv(%d)", __func__,
3555*3af08d82Slm66018 			ldcp->ldc_id, rv);
3556*3af08d82Slm66018 	}
3557*3af08d82Slm66018 
3558*3af08d82Slm66018 	D1(vswp, "%s: exit", __func__);
3559*3af08d82Slm66018 }
3560*3af08d82Slm66018 
3561*3af08d82Slm66018 /*
3562*3af08d82Slm66018  * (Re)start a handshake with our peer by sending them
3563*3af08d82Slm66018  * our version info.
3564*3af08d82Slm66018  */
3565*3af08d82Slm66018 static void
3566*3af08d82Slm66018 vsw_restart_handshake(vsw_ldc_t *ldcp)
3567*3af08d82Slm66018 {
3568*3af08d82Slm66018 	vsw_t		*vswp = ldcp->ldc_vswp;
3569*3af08d82Slm66018 
3570*3af08d82Slm66018 	D1(vswp, "vsw_restart_handshake: enter");
3571*3af08d82Slm66018 
3572*3af08d82Slm66018 	if (ldcp->hphase != VSW_MILESTONE0) {
3573*3af08d82Slm66018 		vsw_restart_ldc(ldcp);
3574*3af08d82Slm66018 	}
35751ae08745Sheppo 
35761ae08745Sheppo 	/*
35771ae08745Sheppo 	 * We now increment the transaction group id. This allows
35781ae08745Sheppo 	 * us to identify and disard any tasks which are still pending
35791ae08745Sheppo 	 * on the taskq and refer to the handshake session we are about
35801ae08745Sheppo 	 * to restart. These stale messages no longer have any real
35811ae08745Sheppo 	 * meaning.
35821ae08745Sheppo 	 */
35831ae08745Sheppo 	mutex_enter(&ldcp->hss_lock);
35841ae08745Sheppo 	ldcp->hss_id++;
35851ae08745Sheppo 	mutex_exit(&ldcp->hss_lock);
35861ae08745Sheppo 
35871ae08745Sheppo 	if (ldcp->hcnt++ > vsw_num_handshakes) {
35881ae08745Sheppo 		cmn_err(CE_WARN, "exceeded number of permitted "
35891ae08745Sheppo 			"handshake attempts (%d) on channel %ld",
35901ae08745Sheppo 			ldcp->hcnt, ldcp->ldc_id);
35911ae08745Sheppo 		return;
35921ae08745Sheppo 	}
35931ae08745Sheppo 
3594*3af08d82Slm66018 	if ((vswp->taskq_p == NULL) ||
3595*3af08d82Slm66018 		(ddi_taskq_dispatch(vswp->taskq_p, vsw_send_ver, ldcp,
3596*3af08d82Slm66018 			DDI_NOSLEEP) != DDI_SUCCESS)) {
3597*3af08d82Slm66018 		cmn_err(CE_WARN, "Can't dispatch version handshake task");
3598*3af08d82Slm66018 	}
35991ae08745Sheppo 
36001ae08745Sheppo 	D1(vswp, "vsw_restart_handshake: exit");
36011ae08745Sheppo }
36021ae08745Sheppo 
36031ae08745Sheppo /*
3604*3af08d82Slm66018  * Deal appropriately with a ECONNRESET event encountered in a ldc_*
3605*3af08d82Slm66018  * call.
3606*3af08d82Slm66018  */
3607*3af08d82Slm66018 static void
3608*3af08d82Slm66018 vsw_handle_reset(vsw_ldc_t *ldcp)
3609*3af08d82Slm66018 {
3610*3af08d82Slm66018 	vsw_t		*vswp = ldcp->ldc_vswp;
3611*3af08d82Slm66018 	ldc_status_t	lstatus;
3612*3af08d82Slm66018 
3613*3af08d82Slm66018 	D1(vswp, "%s: enter", __func__);
3614*3af08d82Slm66018 
3615*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
3616*3af08d82Slm66018 	lstatus = ldcp->ldc_status;
3617*3af08d82Slm66018 	if (ldc_status(ldcp->ldc_handle, &ldcp->ldc_status) != 0) {
3618*3af08d82Slm66018 		DERR(vswp, "%s: unable to read status for channel %ld",
3619*3af08d82Slm66018 			__func__, ldcp->ldc_id);
3620*3af08d82Slm66018 		mutex_exit(&ldcp->status_lock);
3621*3af08d82Slm66018 		return;
3622*3af08d82Slm66018 	}
3623*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
3624*3af08d82Slm66018 
3625*3af08d82Slm66018 	/*
3626*3af08d82Slm66018 	 * Check the channel's previous recorded state to
3627*3af08d82Slm66018 	 * determine if this is the first ECONNRESET event
3628*3af08d82Slm66018 	 * we've gotten for this particular channel (i.e. was
3629*3af08d82Slm66018 	 * previously up but is no longer). If so, terminate
3630*3af08d82Slm66018 	 * the channel.
3631*3af08d82Slm66018 	 */
3632*3af08d82Slm66018 	if ((ldcp->ldc_status != LDC_UP) && (lstatus == LDC_UP)) {
3633*3af08d82Slm66018 		vsw_restart_ldc(ldcp);
3634*3af08d82Slm66018 	}
3635*3af08d82Slm66018 
3636*3af08d82Slm66018 	/*
3637*3af08d82Slm66018 	 * vsw_restart_ldc() will also attempt to bring channel
3638*3af08d82Slm66018 	 * back up. Check here if that succeeds.
3639*3af08d82Slm66018 	 */
3640*3af08d82Slm66018 	mutex_enter(&ldcp->status_lock);
3641*3af08d82Slm66018 	lstatus = ldcp->ldc_status;
3642*3af08d82Slm66018 	if (ldc_status(ldcp->ldc_handle, &ldcp->ldc_status) != 0) {
3643*3af08d82Slm66018 		DERR(vswp, "%s: unable to read status for channel %ld",
3644*3af08d82Slm66018 			__func__, ldcp->ldc_id);
3645*3af08d82Slm66018 		mutex_exit(&ldcp->status_lock);
3646*3af08d82Slm66018 		return;
3647*3af08d82Slm66018 	}
3648*3af08d82Slm66018 	mutex_exit(&ldcp->status_lock);
3649*3af08d82Slm66018 
3650*3af08d82Slm66018 	/*
3651*3af08d82Slm66018 	 * If channel is now up and no one else (i.e. the callback routine)
3652*3af08d82Slm66018 	 * has dealt with it then we restart the handshake here.
3653*3af08d82Slm66018 	 */
3654*3af08d82Slm66018 	if ((lstatus != LDC_UP) && (ldcp->ldc_status == LDC_UP)) {
3655*3af08d82Slm66018 		vsw_restart_handshake(ldcp);
3656*3af08d82Slm66018 	}
3657*3af08d82Slm66018 
3658*3af08d82Slm66018 	D1(vswp, "%s: exit", __func__);
3659*3af08d82Slm66018 }
3660*3af08d82Slm66018 
3661*3af08d82Slm66018 /*
36621ae08745Sheppo  * returns 0 if legal for event signified by flag to have
36631ae08745Sheppo  * occured at the time it did. Otherwise returns 1.
36641ae08745Sheppo  */
36651ae08745Sheppo int
36661ae08745Sheppo vsw_check_flag(vsw_ldc_t *ldcp, int dir, uint64_t flag)
36671ae08745Sheppo {
36681ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
36691ae08745Sheppo 	uint64_t	state;
36701ae08745Sheppo 	uint64_t	phase;
36711ae08745Sheppo 
36721ae08745Sheppo 	if (dir == INBOUND)
36731ae08745Sheppo 		state = ldcp->lane_in.lstate;
36741ae08745Sheppo 	else
36751ae08745Sheppo 		state = ldcp->lane_out.lstate;
36761ae08745Sheppo 
36771ae08745Sheppo 	phase = ldcp->hphase;
36781ae08745Sheppo 
36791ae08745Sheppo 	switch (flag) {
36801ae08745Sheppo 	case VSW_VER_INFO_RECV:
36811ae08745Sheppo 		if (phase > VSW_MILESTONE0) {
36821ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): VER_INFO_RECV"
36831ae08745Sheppo 				" when in state %d\n", ldcp->ldc_id, phase);
36841ae08745Sheppo 			vsw_restart_handshake(ldcp);
36851ae08745Sheppo 			return (1);
36861ae08745Sheppo 		}
36871ae08745Sheppo 		break;
36881ae08745Sheppo 
36891ae08745Sheppo 	case VSW_VER_ACK_RECV:
36901ae08745Sheppo 	case VSW_VER_NACK_RECV:
36911ae08745Sheppo 		if (!(state & VSW_VER_INFO_SENT)) {
36921ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): spurious VER_ACK"
36931ae08745Sheppo 				" or VER_NACK when in state %d\n",
36941ae08745Sheppo 				ldcp->ldc_id, phase);
36951ae08745Sheppo 			vsw_restart_handshake(ldcp);
36961ae08745Sheppo 			return (1);
36971ae08745Sheppo 		} else
36981ae08745Sheppo 			state &= ~VSW_VER_INFO_SENT;
36991ae08745Sheppo 		break;
37001ae08745Sheppo 
37011ae08745Sheppo 	case VSW_ATTR_INFO_RECV:
37021ae08745Sheppo 		if ((phase < VSW_MILESTONE1) || (phase >= VSW_MILESTONE2)) {
37031ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): ATTR_INFO_RECV"
37041ae08745Sheppo 				" when in state %d\n", ldcp->ldc_id, phase);
37051ae08745Sheppo 			vsw_restart_handshake(ldcp);
37061ae08745Sheppo 			return (1);
37071ae08745Sheppo 		}
37081ae08745Sheppo 		break;
37091ae08745Sheppo 
37101ae08745Sheppo 	case VSW_ATTR_ACK_RECV:
37111ae08745Sheppo 	case VSW_ATTR_NACK_RECV:
37121ae08745Sheppo 		if (!(state & VSW_ATTR_INFO_SENT)) {
37131ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): spurious ATTR_ACK"
37141ae08745Sheppo 				" or ATTR_NACK when in state %d\n",
37151ae08745Sheppo 				ldcp->ldc_id, phase);
37161ae08745Sheppo 			vsw_restart_handshake(ldcp);
37171ae08745Sheppo 			return (1);
37181ae08745Sheppo 		} else
37191ae08745Sheppo 			state &= ~VSW_ATTR_INFO_SENT;
37201ae08745Sheppo 		break;
37211ae08745Sheppo 
37221ae08745Sheppo 	case VSW_DRING_INFO_RECV:
37231ae08745Sheppo 		if (phase < VSW_MILESTONE1) {
37241ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): DRING_INFO_RECV"
37251ae08745Sheppo 				" when in state %d\n", ldcp->ldc_id, phase);
37261ae08745Sheppo 			vsw_restart_handshake(ldcp);
37271ae08745Sheppo 			return (1);
37281ae08745Sheppo 		}
37291ae08745Sheppo 		break;
37301ae08745Sheppo 
37311ae08745Sheppo 	case VSW_DRING_ACK_RECV:
37321ae08745Sheppo 	case VSW_DRING_NACK_RECV:
37331ae08745Sheppo 		if (!(state & VSW_DRING_INFO_SENT)) {
37341ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): spurious DRING_ACK"
37351ae08745Sheppo 				" or DRING_NACK when in state %d\n",
37361ae08745Sheppo 				ldcp->ldc_id, phase);
37371ae08745Sheppo 			vsw_restart_handshake(ldcp);
37381ae08745Sheppo 			return (1);
37391ae08745Sheppo 		} else
37401ae08745Sheppo 			state &= ~VSW_DRING_INFO_SENT;
37411ae08745Sheppo 		break;
37421ae08745Sheppo 
37431ae08745Sheppo 	case VSW_RDX_INFO_RECV:
37441ae08745Sheppo 		if (phase < VSW_MILESTONE3) {
37451ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): RDX_INFO_RECV"
37461ae08745Sheppo 				" when in state %d\n", ldcp->ldc_id, phase);
37471ae08745Sheppo 			vsw_restart_handshake(ldcp);
37481ae08745Sheppo 			return (1);
37491ae08745Sheppo 		}
37501ae08745Sheppo 		break;
37511ae08745Sheppo 
37521ae08745Sheppo 	case VSW_RDX_ACK_RECV:
37531ae08745Sheppo 	case VSW_RDX_NACK_RECV:
37541ae08745Sheppo 		if (!(state & VSW_RDX_INFO_SENT)) {
37551ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): spurious RDX_ACK"
37561ae08745Sheppo 				" or RDX_NACK when in state %d\n",
37571ae08745Sheppo 				ldcp->ldc_id, phase);
37581ae08745Sheppo 			vsw_restart_handshake(ldcp);
37591ae08745Sheppo 			return (1);
37601ae08745Sheppo 		} else
37611ae08745Sheppo 			state &= ~VSW_RDX_INFO_SENT;
37621ae08745Sheppo 		break;
37631ae08745Sheppo 
37641ae08745Sheppo 	case VSW_MCST_INFO_RECV:
37651ae08745Sheppo 		if (phase < VSW_MILESTONE3) {
37661ae08745Sheppo 			DERR(vswp, "vsw_check_flag (%d): VSW_MCST_INFO_RECV"
37671ae08745Sheppo 				" when in state %d\n", ldcp->ldc_id, phase);
37681ae08745Sheppo 			vsw_restart_handshake(ldcp);
37691ae08745Sheppo 			return (1);
37701ae08745Sheppo 		}
37711ae08745Sheppo 		break;
37721ae08745Sheppo 
37731ae08745Sheppo 	default:
37741ae08745Sheppo 		DERR(vswp, "vsw_check_flag (%lld): unknown flag (%llx)",
37751ae08745Sheppo 				ldcp->ldc_id, flag);
37761ae08745Sheppo 		return (1);
37771ae08745Sheppo 	}
37781ae08745Sheppo 
37791ae08745Sheppo 	if (dir == INBOUND)
37801ae08745Sheppo 		ldcp->lane_in.lstate = state;
37811ae08745Sheppo 	else
37821ae08745Sheppo 		ldcp->lane_out.lstate = state;
37831ae08745Sheppo 
37841ae08745Sheppo 	D1(vswp, "vsw_check_flag (chan %lld): exit", ldcp->ldc_id);
37851ae08745Sheppo 
37861ae08745Sheppo 	return (0);
37871ae08745Sheppo }
37881ae08745Sheppo 
37891ae08745Sheppo void
37901ae08745Sheppo vsw_next_milestone(vsw_ldc_t *ldcp)
37911ae08745Sheppo {
37921ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
37931ae08745Sheppo 
37941ae08745Sheppo 	D1(vswp, "%s (chan %lld): enter (phase %ld)", __func__,
37951ae08745Sheppo 		ldcp->ldc_id, ldcp->hphase);
37961ae08745Sheppo 
37971ae08745Sheppo 	DUMP_FLAGS(ldcp->lane_in.lstate);
37981ae08745Sheppo 	DUMP_FLAGS(ldcp->lane_out.lstate);
37991ae08745Sheppo 
38001ae08745Sheppo 	switch (ldcp->hphase) {
38011ae08745Sheppo 
38021ae08745Sheppo 	case VSW_MILESTONE0:
38031ae08745Sheppo 		/*
38041ae08745Sheppo 		 * If we haven't started to handshake with our peer,
38051ae08745Sheppo 		 * start to do so now.
38061ae08745Sheppo 		 */
38071ae08745Sheppo 		if (ldcp->lane_out.lstate == 0) {
38081ae08745Sheppo 			D2(vswp, "%s: (chan %lld) starting handshake "
38091ae08745Sheppo 				"with peer", __func__, ldcp->ldc_id);
38101ae08745Sheppo 			vsw_restart_handshake(ldcp);
38111ae08745Sheppo 		}
38121ae08745Sheppo 
38131ae08745Sheppo 		/*
38141ae08745Sheppo 		 * Only way to pass this milestone is to have successfully
38151ae08745Sheppo 		 * negotiated version info.
38161ae08745Sheppo 		 */
38171ae08745Sheppo 		if ((ldcp->lane_in.lstate & VSW_VER_ACK_SENT) &&
38181ae08745Sheppo 			(ldcp->lane_out.lstate & VSW_VER_ACK_RECV)) {
38191ae08745Sheppo 
38201ae08745Sheppo 			D2(vswp, "%s: (chan %lld) leaving milestone 0",
38211ae08745Sheppo 				__func__, ldcp->ldc_id);
38221ae08745Sheppo 
38231ae08745Sheppo 			/*
38241ae08745Sheppo 			 * Next milestone is passed when attribute
38251ae08745Sheppo 			 * information has been successfully exchanged.
38261ae08745Sheppo 			 */
38271ae08745Sheppo 			ldcp->hphase = VSW_MILESTONE1;
38281ae08745Sheppo 			vsw_send_attr(ldcp);
38291ae08745Sheppo 
38301ae08745Sheppo 		}
38311ae08745Sheppo 		break;
38321ae08745Sheppo 
38331ae08745Sheppo 	case VSW_MILESTONE1:
38341ae08745Sheppo 		/*
38351ae08745Sheppo 		 * Only way to pass this milestone is to have successfully
38361ae08745Sheppo 		 * negotiated attribute information.
38371ae08745Sheppo 		 */
38381ae08745Sheppo 		if (ldcp->lane_in.lstate & VSW_ATTR_ACK_SENT) {
38391ae08745Sheppo 
38401ae08745Sheppo 			ldcp->hphase = VSW_MILESTONE2;
38411ae08745Sheppo 
38421ae08745Sheppo 			/*
38431ae08745Sheppo 			 * If the peer device has said it wishes to
38441ae08745Sheppo 			 * use descriptor rings then we send it our ring
38451ae08745Sheppo 			 * info, otherwise we just set up a private ring
38461ae08745Sheppo 			 * which we use an internal buffer
38471ae08745Sheppo 			 */
38481ae08745Sheppo 			if (ldcp->lane_in.xfer_mode == VIO_DRING_MODE)
38491ae08745Sheppo 				vsw_send_dring_info(ldcp);
38501ae08745Sheppo 		}
38511ae08745Sheppo 		break;
38521ae08745Sheppo 
38531ae08745Sheppo 
38541ae08745Sheppo 	case VSW_MILESTONE2:
38551ae08745Sheppo 		/*
38561ae08745Sheppo 		 * If peer has indicated in its attribute message that
38571ae08745Sheppo 		 * it wishes to use descriptor rings then the only way
38581ae08745Sheppo 		 * to pass this milestone is for us to have received
38591ae08745Sheppo 		 * valid dring info.
38601ae08745Sheppo 		 *
38611ae08745Sheppo 		 * If peer is not using descriptor rings then just fall
38621ae08745Sheppo 		 * through.
38631ae08745Sheppo 		 */
38641ae08745Sheppo 		if ((ldcp->lane_in.xfer_mode == VIO_DRING_MODE) &&
38651ae08745Sheppo 			(!(ldcp->lane_in.lstate & VSW_DRING_ACK_SENT)))
38661ae08745Sheppo 			break;
38671ae08745Sheppo 
38681ae08745Sheppo 		D2(vswp, "%s: (chan %lld) leaving milestone 2",
38691ae08745Sheppo 				__func__, ldcp->ldc_id);
38701ae08745Sheppo 
38711ae08745Sheppo 		ldcp->hphase = VSW_MILESTONE3;
38721ae08745Sheppo 		vsw_send_rdx(ldcp);
38731ae08745Sheppo 		break;
38741ae08745Sheppo 
38751ae08745Sheppo 	case VSW_MILESTONE3:
38761ae08745Sheppo 		/*
38771ae08745Sheppo 		 * Pass this milestone when all paramaters have been
38781ae08745Sheppo 		 * successfully exchanged and RDX sent in both directions.
38791ae08745Sheppo 		 *
38801ae08745Sheppo 		 * Mark outbound lane as available to transmit data.
38811ae08745Sheppo 		 */
38821ae08745Sheppo 		if ((ldcp->lane_in.lstate & VSW_RDX_ACK_SENT) &&
38831ae08745Sheppo 			(ldcp->lane_out.lstate & VSW_RDX_ACK_RECV)) {
38841ae08745Sheppo 
38851ae08745Sheppo 			D2(vswp, "%s: (chan %lld) leaving milestone 3",
38861ae08745Sheppo 				__func__, ldcp->ldc_id);
3887*3af08d82Slm66018 			D2(vswp, "%s: ** handshake complete (0x%llx : "
3888*3af08d82Slm66018 				"0x%llx) **", __func__, ldcp->lane_in.lstate,
3889*3af08d82Slm66018 				ldcp->lane_out.lstate);
38901ae08745Sheppo 			ldcp->lane_out.lstate |= VSW_LANE_ACTIVE;
38911ae08745Sheppo 			ldcp->hphase = VSW_MILESTONE4;
38921ae08745Sheppo 			ldcp->hcnt = 0;
38931ae08745Sheppo 			DISPLAY_STATE();
3894*3af08d82Slm66018 		} else {
3895*3af08d82Slm66018 			D2(vswp, "%s: still in milestone 3 (0x%llx :"
3896*3af08d82Slm66018 				" 0x%llx", __func__, ldcp->lane_in.lstate,
3897*3af08d82Slm66018 				ldcp->lane_out.lstate);
38981ae08745Sheppo 		}
38991ae08745Sheppo 		break;
39001ae08745Sheppo 
39011ae08745Sheppo 	case VSW_MILESTONE4:
39021ae08745Sheppo 		D2(vswp, "%s: (chan %lld) in milestone 4", __func__,
39031ae08745Sheppo 							ldcp->ldc_id);
39041ae08745Sheppo 		break;
39051ae08745Sheppo 
39061ae08745Sheppo 	default:
39071ae08745Sheppo 		DERR(vswp, "%s: (chan %lld) Unknown Phase %x", __func__,
39081ae08745Sheppo 			ldcp->ldc_id, ldcp->hphase);
39091ae08745Sheppo 	}
39101ae08745Sheppo 
39111ae08745Sheppo 	D1(vswp, "%s (chan %lld): exit (phase %ld)", __func__, ldcp->ldc_id,
39121ae08745Sheppo 		ldcp->hphase);
39131ae08745Sheppo }
39141ae08745Sheppo 
39151ae08745Sheppo /*
39161ae08745Sheppo  * Check if major version is supported.
39171ae08745Sheppo  *
39181ae08745Sheppo  * Returns 0 if finds supported major number, and if necessary
39191ae08745Sheppo  * adjusts the minor field.
39201ae08745Sheppo  *
39211ae08745Sheppo  * Returns 1 if can't match major number exactly. Sets mjor/minor
39221ae08745Sheppo  * to next lowest support values, or to zero if no other values possible.
39231ae08745Sheppo  */
39241ae08745Sheppo static int
39251ae08745Sheppo vsw_supported_version(vio_ver_msg_t *vp)
39261ae08745Sheppo {
39271ae08745Sheppo 	int	i;
39281ae08745Sheppo 
39291ae08745Sheppo 	D1(NULL, "vsw_supported_version: enter");
39301ae08745Sheppo 
39311ae08745Sheppo 	for (i = 0; i < VSW_NUM_VER; i++) {
39321ae08745Sheppo 		if (vsw_versions[i].ver_major == vp->ver_major) {
39331ae08745Sheppo 			/*
39341ae08745Sheppo 			 * Matching or lower major version found. Update
39351ae08745Sheppo 			 * minor number if necessary.
39361ae08745Sheppo 			 */
39371ae08745Sheppo 			if (vp->ver_minor > vsw_versions[i].ver_minor) {
39381ae08745Sheppo 				D2(NULL, "%s: adjusting minor value"
39391ae08745Sheppo 					" from %d to %d", __func__,
39401ae08745Sheppo 					vp->ver_minor,
39411ae08745Sheppo 					vsw_versions[i].ver_minor);
39421ae08745Sheppo 				vp->ver_minor = vsw_versions[i].ver_minor;
39431ae08745Sheppo 			}
39441ae08745Sheppo 
39451ae08745Sheppo 			return (0);
39461ae08745Sheppo 		}
39471ae08745Sheppo 
39481ae08745Sheppo 		if (vsw_versions[i].ver_major < vp->ver_major) {
39491ae08745Sheppo 			if (vp->ver_minor > vsw_versions[i].ver_minor) {
39501ae08745Sheppo 				D2(NULL, "%s: adjusting minor value"
39511ae08745Sheppo 					" from %d to %d", __func__,
39521ae08745Sheppo 					vp->ver_minor,
39531ae08745Sheppo 					vsw_versions[i].ver_minor);
39541ae08745Sheppo 				vp->ver_minor = vsw_versions[i].ver_minor;
39551ae08745Sheppo 			}
39561ae08745Sheppo 			return (1);
39571ae08745Sheppo 		}
39581ae08745Sheppo 	}
39591ae08745Sheppo 
39601ae08745Sheppo 	/* No match was possible, zero out fields */
39611ae08745Sheppo 	vp->ver_major = 0;
39621ae08745Sheppo 	vp->ver_minor = 0;
39631ae08745Sheppo 
39641ae08745Sheppo 	D1(NULL, "vsw_supported_version: exit");
39651ae08745Sheppo 
39661ae08745Sheppo 	return (1);
39671ae08745Sheppo }
39681ae08745Sheppo 
39691ae08745Sheppo /*
39701ae08745Sheppo  * Main routine for processing messages received over LDC.
39711ae08745Sheppo  */
39721ae08745Sheppo static void
39731ae08745Sheppo vsw_process_pkt(void *arg)
39741ae08745Sheppo {
39751ae08745Sheppo 	vsw_ldc_t	*ldcp = (vsw_ldc_t  *)arg;
39761ae08745Sheppo 	vsw_t 		*vswp = ldcp->ldc_vswp;
39771ae08745Sheppo 	size_t		msglen;
39781ae08745Sheppo 	vio_msg_tag_t	tag;
39791ae08745Sheppo 	def_msg_t	dmsg;
39801ae08745Sheppo 	int 		rv = 0;
39811ae08745Sheppo 
3982*3af08d82Slm66018 
39831ae08745Sheppo 	D1(vswp, "%s enter: ldcid (%lld)\n", __func__, ldcp->ldc_id);
39841ae08745Sheppo 
39851ae08745Sheppo 	/*
39861ae08745Sheppo 	 * If channel is up read messages until channel is empty.
39871ae08745Sheppo 	 */
39881ae08745Sheppo 	do {
39891ae08745Sheppo 		msglen = sizeof (dmsg);
39901ae08745Sheppo 		rv = ldc_read(ldcp->ldc_handle, (caddr_t)&dmsg, &msglen);
39911ae08745Sheppo 
39921ae08745Sheppo 		if (rv != 0) {
39931ae08745Sheppo 			DERR(vswp, "%s :ldc_read err id(%lld) rv(%d) "
39941ae08745Sheppo 				"len(%d)\n", __func__, ldcp->ldc_id,
39951ae08745Sheppo 							rv, msglen);
3996*3af08d82Slm66018 		}
3997*3af08d82Slm66018 
3998*3af08d82Slm66018 		/* channel has been reset */
3999*3af08d82Slm66018 		if (rv == ECONNRESET) {
4000*3af08d82Slm66018 			vsw_handle_reset(ldcp);
40011ae08745Sheppo 			break;
40021ae08745Sheppo 		}
40031ae08745Sheppo 
40041ae08745Sheppo 		if (msglen == 0) {
40051ae08745Sheppo 			D2(vswp, "%s: ldc_read id(%lld) NODATA", __func__,
40061ae08745Sheppo 			ldcp->ldc_id);
40071ae08745Sheppo 			break;
40081ae08745Sheppo 		}
40091ae08745Sheppo 
40101ae08745Sheppo 		D2(vswp, "%s: ldc_read id(%lld): msglen(%d)", __func__,
40111ae08745Sheppo 		    ldcp->ldc_id, msglen);
40121ae08745Sheppo 
40131ae08745Sheppo 		/*
40141ae08745Sheppo 		 * Figure out what sort of packet we have gotten by
40151ae08745Sheppo 		 * examining the msg tag, and then switch it appropriately.
40161ae08745Sheppo 		 */
40171ae08745Sheppo 		bcopy(&dmsg, &tag, sizeof (vio_msg_tag_t));
40181ae08745Sheppo 
40191ae08745Sheppo 		switch (tag.vio_msgtype) {
40201ae08745Sheppo 		case VIO_TYPE_CTRL:
40211ae08745Sheppo 			vsw_dispatch_ctrl_task(ldcp, &dmsg, tag);
40221ae08745Sheppo 			break;
40231ae08745Sheppo 		case VIO_TYPE_DATA:
40241ae08745Sheppo 			vsw_process_data_pkt(ldcp, &dmsg, tag);
40251ae08745Sheppo 			break;
40261ae08745Sheppo 		case VIO_TYPE_ERR:
40271ae08745Sheppo 			vsw_process_err_pkt(ldcp, &dmsg, tag);
40281ae08745Sheppo 			break;
40291ae08745Sheppo 		default:
40301ae08745Sheppo 			DERR(vswp, "%s: Unknown tag(%lx) ", __func__,
40311ae08745Sheppo 				"id(%lx)\n", tag.vio_msgtype, ldcp->ldc_id);
40321ae08745Sheppo 			break;
40331ae08745Sheppo 		}
40341ae08745Sheppo 	} while (msglen);
40351ae08745Sheppo 
40361ae08745Sheppo 	D1(vswp, "%s exit: ldcid (%lld)\n", __func__, ldcp->ldc_id);
40371ae08745Sheppo }
40381ae08745Sheppo 
40391ae08745Sheppo /*
40401ae08745Sheppo  * Dispatch a task to process a VIO control message.
40411ae08745Sheppo  */
40421ae08745Sheppo static void
40431ae08745Sheppo vsw_dispatch_ctrl_task(vsw_ldc_t *ldcp, void *cpkt, vio_msg_tag_t tag)
40441ae08745Sheppo {
40451ae08745Sheppo 	vsw_ctrl_task_t		*ctaskp = NULL;
40461ae08745Sheppo 	vsw_port_t		*port = ldcp->ldc_port;
40471ae08745Sheppo 	vsw_t			*vswp = port->p_vswp;
40481ae08745Sheppo 
40491ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
40501ae08745Sheppo 
40511ae08745Sheppo 	/*
40521ae08745Sheppo 	 * We need to handle RDX ACK messages in-band as once they
40531ae08745Sheppo 	 * are exchanged it is possible that we will get an
40541ae08745Sheppo 	 * immediate (legitimate) data packet.
40551ae08745Sheppo 	 */
40561ae08745Sheppo 	if ((tag.vio_subtype_env == VIO_RDX) &&
40571ae08745Sheppo 		(tag.vio_subtype == VIO_SUBTYPE_ACK)) {
4058*3af08d82Slm66018 
40591ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_RDX_ACK_RECV))
40601ae08745Sheppo 			return;
40611ae08745Sheppo 
40621ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_RDX_ACK_RECV;
4063*3af08d82Slm66018 		D2(vswp, "%s (%ld) handling RDX_ACK in place "
4064*3af08d82Slm66018 			"(ostate 0x%llx : hphase %d)", __func__,
4065*3af08d82Slm66018 			ldcp->ldc_id, ldcp->lane_out.lstate, ldcp->hphase);
40661ae08745Sheppo 		vsw_next_milestone(ldcp);
40671ae08745Sheppo 		return;
40681ae08745Sheppo 	}
40691ae08745Sheppo 
40701ae08745Sheppo 	ctaskp = kmem_alloc(sizeof (vsw_ctrl_task_t), KM_NOSLEEP);
40711ae08745Sheppo 
40721ae08745Sheppo 	if (ctaskp == NULL) {
40731ae08745Sheppo 		DERR(vswp, "%s: unable to alloc space for ctrl"
40741ae08745Sheppo 			" msg", __func__);
40751ae08745Sheppo 		vsw_restart_handshake(ldcp);
40761ae08745Sheppo 		return;
40771ae08745Sheppo 	}
40781ae08745Sheppo 
40791ae08745Sheppo 	ctaskp->ldcp = ldcp;
40801ae08745Sheppo 	bcopy((def_msg_t *)cpkt, &ctaskp->pktp, sizeof (def_msg_t));
40811ae08745Sheppo 	mutex_enter(&ldcp->hss_lock);
40821ae08745Sheppo 	ctaskp->hss_id = ldcp->hss_id;
40831ae08745Sheppo 	mutex_exit(&ldcp->hss_lock);
40841ae08745Sheppo 
40851ae08745Sheppo 	/*
40861ae08745Sheppo 	 * Dispatch task to processing taskq if port is not in
40871ae08745Sheppo 	 * the process of being detached.
40881ae08745Sheppo 	 */
40891ae08745Sheppo 	mutex_enter(&port->state_lock);
40901ae08745Sheppo 	if (port->state == VSW_PORT_INIT) {
40911ae08745Sheppo 		if ((vswp->taskq_p == NULL) ||
40921ae08745Sheppo 			(ddi_taskq_dispatch(vswp->taskq_p,
40931ae08745Sheppo 			vsw_process_ctrl_pkt, ctaskp, DDI_NOSLEEP)
40941ae08745Sheppo 							!= DDI_SUCCESS)) {
40951ae08745Sheppo 			DERR(vswp, "%s: unable to dispatch task to taskq",
40961ae08745Sheppo 				__func__);
40971ae08745Sheppo 			kmem_free(ctaskp, sizeof (vsw_ctrl_task_t));
40981ae08745Sheppo 			mutex_exit(&port->state_lock);
40991ae08745Sheppo 			vsw_restart_handshake(ldcp);
41001ae08745Sheppo 			return;
41011ae08745Sheppo 		}
41021ae08745Sheppo 	} else {
41031ae08745Sheppo 		DWARN(vswp, "%s: port %d detaching, not dispatching "
41041ae08745Sheppo 			"task", __func__, port->p_instance);
41051ae08745Sheppo 	}
41061ae08745Sheppo 
41071ae08745Sheppo 	mutex_exit(&port->state_lock);
41081ae08745Sheppo 
41091ae08745Sheppo 	D2(vswp, "%s: dispatched task to taskq for chan %d", __func__,
41101ae08745Sheppo 			ldcp->ldc_id);
41111ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
41121ae08745Sheppo }
41131ae08745Sheppo 
41141ae08745Sheppo /*
41151ae08745Sheppo  * Process a VIO ctrl message. Invoked from taskq.
41161ae08745Sheppo  */
41171ae08745Sheppo static void
41181ae08745Sheppo vsw_process_ctrl_pkt(void *arg)
41191ae08745Sheppo {
41201ae08745Sheppo 	vsw_ctrl_task_t	*ctaskp = (vsw_ctrl_task_t *)arg;
41211ae08745Sheppo 	vsw_ldc_t	*ldcp = ctaskp->ldcp;
41221ae08745Sheppo 	vsw_t 		*vswp = ldcp->ldc_vswp;
41231ae08745Sheppo 	vio_msg_tag_t	tag;
41241ae08745Sheppo 	uint16_t	env;
41251ae08745Sheppo 
41261ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
41271ae08745Sheppo 
41281ae08745Sheppo 	bcopy(&ctaskp->pktp, &tag, sizeof (vio_msg_tag_t));
41291ae08745Sheppo 	env = tag.vio_subtype_env;
41301ae08745Sheppo 
41311ae08745Sheppo 	/* stale pkt check */
41321ae08745Sheppo 	mutex_enter(&ldcp->hss_lock);
41331ae08745Sheppo 	if (ctaskp->hss_id < ldcp->hss_id) {
41341ae08745Sheppo 		DWARN(vswp, "%s: discarding stale packet belonging to"
41351ae08745Sheppo 			" earlier (%ld) handshake session", __func__,
41361ae08745Sheppo 			ctaskp->hss_id);
41371ae08745Sheppo 		mutex_exit(&ldcp->hss_lock);
41381ae08745Sheppo 		return;
41391ae08745Sheppo 	}
41401ae08745Sheppo 	mutex_exit(&ldcp->hss_lock);
41411ae08745Sheppo 
41421ae08745Sheppo 	/* session id check */
41431ae08745Sheppo 	if (ldcp->session_status & VSW_PEER_SESSION) {
41441ae08745Sheppo 		if (ldcp->peer_session != tag.vio_sid) {
41451ae08745Sheppo 			DERR(vswp, "%s (chan %d): invalid session id (%llx)",
41461ae08745Sheppo 				__func__, ldcp->ldc_id, tag.vio_sid);
41471ae08745Sheppo 			kmem_free(ctaskp, sizeof (vsw_ctrl_task_t));
41481ae08745Sheppo 			vsw_restart_handshake(ldcp);
41491ae08745Sheppo 			return;
41501ae08745Sheppo 		}
41511ae08745Sheppo 	}
41521ae08745Sheppo 
41531ae08745Sheppo 	/*
41541ae08745Sheppo 	 * Switch on vio_subtype envelope, then let lower routines
41551ae08745Sheppo 	 * decide if its an INFO, ACK or NACK packet.
41561ae08745Sheppo 	 */
41571ae08745Sheppo 	switch (env) {
41581ae08745Sheppo 	case VIO_VER_INFO:
41591ae08745Sheppo 		vsw_process_ctrl_ver_pkt(ldcp, &ctaskp->pktp);
41601ae08745Sheppo 		break;
41611ae08745Sheppo 	case VIO_DRING_REG:
41621ae08745Sheppo 		vsw_process_ctrl_dring_reg_pkt(ldcp, &ctaskp->pktp);
41631ae08745Sheppo 		break;
41641ae08745Sheppo 	case VIO_DRING_UNREG:
41651ae08745Sheppo 		vsw_process_ctrl_dring_unreg_pkt(ldcp, &ctaskp->pktp);
41661ae08745Sheppo 		break;
41671ae08745Sheppo 	case VIO_ATTR_INFO:
41681ae08745Sheppo 		vsw_process_ctrl_attr_pkt(ldcp, &ctaskp->pktp);
41691ae08745Sheppo 		break;
41701ae08745Sheppo 	case VNET_MCAST_INFO:
41711ae08745Sheppo 		vsw_process_ctrl_mcst_pkt(ldcp, &ctaskp->pktp);
41721ae08745Sheppo 		break;
41731ae08745Sheppo 	case VIO_RDX:
41741ae08745Sheppo 		vsw_process_ctrl_rdx_pkt(ldcp, &ctaskp->pktp);
41751ae08745Sheppo 		break;
41761ae08745Sheppo 	default:
41771ae08745Sheppo 		DERR(vswp, "%s : unknown vio_subtype_env (%x)\n",
41781ae08745Sheppo 							__func__, env);
41791ae08745Sheppo 	}
41801ae08745Sheppo 
41811ae08745Sheppo 	kmem_free(ctaskp, sizeof (vsw_ctrl_task_t));
41821ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
41831ae08745Sheppo }
41841ae08745Sheppo 
41851ae08745Sheppo /*
41861ae08745Sheppo  * Version negotiation. We can end up here either because our peer
41871ae08745Sheppo  * has responded to a handshake message we have sent it, or our peer
41881ae08745Sheppo  * has initiated a handshake with us. If its the former then can only
41891ae08745Sheppo  * be ACK or NACK, if its the later can only be INFO.
41901ae08745Sheppo  *
41911ae08745Sheppo  * If its an ACK we move to the next stage of the handshake, namely
41921ae08745Sheppo  * attribute exchange. If its a NACK we see if we can specify another
41931ae08745Sheppo  * version, if we can't we stop.
41941ae08745Sheppo  *
41951ae08745Sheppo  * If it is an INFO we reset all params associated with communication
41961ae08745Sheppo  * in that direction over this channel (remember connection is
41971ae08745Sheppo  * essentially 2 independent simplex channels).
41981ae08745Sheppo  */
41991ae08745Sheppo void
42001ae08745Sheppo vsw_process_ctrl_ver_pkt(vsw_ldc_t *ldcp, void *pkt)
42011ae08745Sheppo {
42021ae08745Sheppo 	vio_ver_msg_t	*ver_pkt;
42031ae08745Sheppo 	vsw_t 		*vswp = ldcp->ldc_vswp;
42041ae08745Sheppo 
42051ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
42061ae08745Sheppo 
42071ae08745Sheppo 	/*
42081ae08745Sheppo 	 * We know this is a ctrl/version packet so
42091ae08745Sheppo 	 * cast it into the correct structure.
42101ae08745Sheppo 	 */
42111ae08745Sheppo 	ver_pkt = (vio_ver_msg_t *)pkt;
42121ae08745Sheppo 
42131ae08745Sheppo 	switch (ver_pkt->tag.vio_subtype) {
42141ae08745Sheppo 	case VIO_SUBTYPE_INFO:
42151ae08745Sheppo 		D2(vswp, "vsw_process_ctrl_ver_pkt: VIO_SUBTYPE_INFO\n");
42161ae08745Sheppo 
42171ae08745Sheppo 		/*
42181ae08745Sheppo 		 * Record the session id, which we will use from now
42191ae08745Sheppo 		 * until we see another VER_INFO msg. Even then the
42201ae08745Sheppo 		 * session id in most cases will be unchanged, execpt
42211ae08745Sheppo 		 * if channel was reset.
42221ae08745Sheppo 		 */
42231ae08745Sheppo 		if ((ldcp->session_status & VSW_PEER_SESSION) &&
42241ae08745Sheppo 			(ldcp->peer_session != ver_pkt->tag.vio_sid)) {
42251ae08745Sheppo 			DERR(vswp, "%s: updating session id for chan %lld "
42261ae08745Sheppo 				"from %llx to %llx", __func__, ldcp->ldc_id,
42271ae08745Sheppo 				ldcp->peer_session, ver_pkt->tag.vio_sid);
42281ae08745Sheppo 		}
42291ae08745Sheppo 
42301ae08745Sheppo 		ldcp->peer_session = ver_pkt->tag.vio_sid;
42311ae08745Sheppo 		ldcp->session_status |= VSW_PEER_SESSION;
42321ae08745Sheppo 
42331ae08745Sheppo 		/* Legal message at this time ? */
42341ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_VER_INFO_RECV))
42351ae08745Sheppo 			return;
42361ae08745Sheppo 
42371ae08745Sheppo 		/*
42381ae08745Sheppo 		 * First check the device class. Currently only expect
42391ae08745Sheppo 		 * to be talking to a network device. In the future may
42401ae08745Sheppo 		 * also talk to another switch.
42411ae08745Sheppo 		 */
42421ae08745Sheppo 		if (ver_pkt->dev_class != VDEV_NETWORK) {
42431ae08745Sheppo 			DERR(vswp, "%s: illegal device class %d", __func__,
42441ae08745Sheppo 				ver_pkt->dev_class);
42451ae08745Sheppo 
42461ae08745Sheppo 			ver_pkt->tag.vio_sid = ldcp->local_session;
42471ae08745Sheppo 			ver_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
42481ae08745Sheppo 
42491ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)ver_pkt);
42501ae08745Sheppo 
42511ae08745Sheppo 			vsw_send_msg(ldcp, (void *)ver_pkt,
42521ae08745Sheppo 					sizeof (vio_ver_msg_t));
42531ae08745Sheppo 
42541ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_VER_NACK_SENT;
42551ae08745Sheppo 			vsw_next_milestone(ldcp);
42561ae08745Sheppo 			return;
42571ae08745Sheppo 		} else {
42581ae08745Sheppo 			ldcp->dev_class = ver_pkt->dev_class;
42591ae08745Sheppo 		}
42601ae08745Sheppo 
42611ae08745Sheppo 		/*
42621ae08745Sheppo 		 * Now check the version.
42631ae08745Sheppo 		 */
42641ae08745Sheppo 		if (vsw_supported_version(ver_pkt) == 0) {
42651ae08745Sheppo 			/*
42661ae08745Sheppo 			 * Support this major version and possibly
42671ae08745Sheppo 			 * adjusted minor version.
42681ae08745Sheppo 			 */
42691ae08745Sheppo 
42701ae08745Sheppo 			D2(vswp, "%s: accepted ver %d:%d", __func__,
42711ae08745Sheppo 				ver_pkt->ver_major, ver_pkt->ver_minor);
42721ae08745Sheppo 
42731ae08745Sheppo 			/* Store accepted values */
42741ae08745Sheppo 			ldcp->lane_in.ver_major = ver_pkt->ver_major;
42751ae08745Sheppo 			ldcp->lane_in.ver_minor = ver_pkt->ver_minor;
42761ae08745Sheppo 
42771ae08745Sheppo 			ver_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
42781ae08745Sheppo 
42791ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_VER_ACK_SENT;
42801ae08745Sheppo 		} else {
42811ae08745Sheppo 			/*
42821ae08745Sheppo 			 * NACK back with the next lower major/minor
42831ae08745Sheppo 			 * pairing we support (if don't suuport any more
42841ae08745Sheppo 			 * versions then they will be set to zero.
42851ae08745Sheppo 			 */
42861ae08745Sheppo 
42871ae08745Sheppo 			D2(vswp, "%s: replying with ver %d:%d", __func__,
42881ae08745Sheppo 				ver_pkt->ver_major, ver_pkt->ver_minor);
42891ae08745Sheppo 
42901ae08745Sheppo 			/* Store updated values */
42911ae08745Sheppo 			ldcp->lane_in.ver_major = ver_pkt->ver_major;
42921ae08745Sheppo 			ldcp->lane_in.ver_minor = ver_pkt->ver_minor;
42931ae08745Sheppo 
42941ae08745Sheppo 			ver_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
42951ae08745Sheppo 
42961ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_VER_NACK_SENT;
42971ae08745Sheppo 		}
42981ae08745Sheppo 
42991ae08745Sheppo 		DUMP_TAG_PTR((vio_msg_tag_t *)ver_pkt);
43001ae08745Sheppo 		ver_pkt->tag.vio_sid = ldcp->local_session;
43011ae08745Sheppo 		vsw_send_msg(ldcp, (void *)ver_pkt, sizeof (vio_ver_msg_t));
43021ae08745Sheppo 
43031ae08745Sheppo 		vsw_next_milestone(ldcp);
43041ae08745Sheppo 		break;
43051ae08745Sheppo 
43061ae08745Sheppo 	case VIO_SUBTYPE_ACK:
43071ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_ACK\n", __func__);
43081ae08745Sheppo 
43091ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_VER_ACK_RECV))
43101ae08745Sheppo 			return;
43111ae08745Sheppo 
43121ae08745Sheppo 		/* Store updated values */
43131ae08745Sheppo 		ldcp->lane_in.ver_major = ver_pkt->ver_major;
43141ae08745Sheppo 		ldcp->lane_in.ver_minor = ver_pkt->ver_minor;
43151ae08745Sheppo 
43161ae08745Sheppo 
43171ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_VER_ACK_RECV;
43181ae08745Sheppo 		vsw_next_milestone(ldcp);
43191ae08745Sheppo 
43201ae08745Sheppo 		break;
43211ae08745Sheppo 
43221ae08745Sheppo 	case VIO_SUBTYPE_NACK:
43231ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_NACK\n", __func__);
43241ae08745Sheppo 
43251ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_VER_NACK_RECV))
43261ae08745Sheppo 			return;
43271ae08745Sheppo 
43281ae08745Sheppo 		/*
43291ae08745Sheppo 		 * If our peer sent us a NACK with the ver fields set to
43301ae08745Sheppo 		 * zero then there is nothing more we can do. Otherwise see
43311ae08745Sheppo 		 * if we support either the version suggested, or a lesser
43321ae08745Sheppo 		 * one.
43331ae08745Sheppo 		 */
43341ae08745Sheppo 		if ((ver_pkt->ver_major == 0) && (ver_pkt->ver_minor == 0)) {
43351ae08745Sheppo 			DERR(vswp, "%s: peer unable to negotiate any "
43361ae08745Sheppo 				"further.", __func__);
43371ae08745Sheppo 			ldcp->lane_out.lstate |= VSW_VER_NACK_RECV;
43381ae08745Sheppo 			vsw_next_milestone(ldcp);
43391ae08745Sheppo 			return;
43401ae08745Sheppo 		}
43411ae08745Sheppo 
43421ae08745Sheppo 		/*
43431ae08745Sheppo 		 * Check to see if we support this major version or
43441ae08745Sheppo 		 * a lower one. If we don't then maj/min will be set
43451ae08745Sheppo 		 * to zero.
43461ae08745Sheppo 		 */
43471ae08745Sheppo 		(void) vsw_supported_version(ver_pkt);
43481ae08745Sheppo 		if ((ver_pkt->ver_major == 0) && (ver_pkt->ver_minor == 0)) {
43491ae08745Sheppo 			/* Nothing more we can do */
43501ae08745Sheppo 			DERR(vswp, "%s: version negotiation failed.\n",
43511ae08745Sheppo 								__func__);
43521ae08745Sheppo 			ldcp->lane_out.lstate |= VSW_VER_NACK_RECV;
43531ae08745Sheppo 			vsw_next_milestone(ldcp);
43541ae08745Sheppo 		} else {
43551ae08745Sheppo 			/* found a supported major version */
43561ae08745Sheppo 			ldcp->lane_out.ver_major = ver_pkt->ver_major;
43571ae08745Sheppo 			ldcp->lane_out.ver_minor = ver_pkt->ver_minor;
43581ae08745Sheppo 
43591ae08745Sheppo 			D2(vswp, "%s: resending with updated values (%x, %x)",
43601ae08745Sheppo 				__func__, ver_pkt->ver_major,
43611ae08745Sheppo 				ver_pkt->ver_minor);
43621ae08745Sheppo 
43631ae08745Sheppo 			ldcp->lane_out.lstate |= VSW_VER_INFO_SENT;
43641ae08745Sheppo 			ver_pkt->tag.vio_sid = ldcp->local_session;
43651ae08745Sheppo 			ver_pkt->tag.vio_subtype = VIO_SUBTYPE_INFO;
43661ae08745Sheppo 
43671ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)ver_pkt);
43681ae08745Sheppo 
43691ae08745Sheppo 			vsw_send_msg(ldcp, (void *)ver_pkt,
43701ae08745Sheppo 					sizeof (vio_ver_msg_t));
43711ae08745Sheppo 
43721ae08745Sheppo 			vsw_next_milestone(ldcp);
43731ae08745Sheppo 
43741ae08745Sheppo 		}
43751ae08745Sheppo 		break;
43761ae08745Sheppo 
43771ae08745Sheppo 	default:
43781ae08745Sheppo 		DERR(vswp, "%s: unknown vio_subtype %x\n", __func__,
43791ae08745Sheppo 			ver_pkt->tag.vio_subtype);
43801ae08745Sheppo 	}
43811ae08745Sheppo 
43821ae08745Sheppo 	D1(vswp, "%s(%lld): exit\n", __func__, ldcp->ldc_id);
43831ae08745Sheppo }
43841ae08745Sheppo 
43851ae08745Sheppo /*
43861ae08745Sheppo  * Process an attribute packet. We can end up here either because our peer
43871ae08745Sheppo  * has ACK/NACK'ed back to an earlier ATTR msg we had sent it, or our
43881ae08745Sheppo  * peer has sent us an attribute INFO message
43891ae08745Sheppo  *
43901ae08745Sheppo  * If its an ACK we then move to the next stage of the handshake which
43911ae08745Sheppo  * is to send our descriptor ring info to our peer. If its a NACK then
43921ae08745Sheppo  * there is nothing more we can (currently) do.
43931ae08745Sheppo  *
43941ae08745Sheppo  * If we get a valid/acceptable INFO packet (and we have already negotiated
43951ae08745Sheppo  * a version) we ACK back and set channel state to ATTR_RECV, otherwise we
43961ae08745Sheppo  * NACK back and reset channel state to INACTIV.
43971ae08745Sheppo  *
43981ae08745Sheppo  * FUTURE: in time we will probably negotiate over attributes, but for
43991ae08745Sheppo  * the moment unacceptable attributes are regarded as a fatal error.
44001ae08745Sheppo  *
44011ae08745Sheppo  */
44021ae08745Sheppo void
44031ae08745Sheppo vsw_process_ctrl_attr_pkt(vsw_ldc_t *ldcp, void *pkt)
44041ae08745Sheppo {
44051ae08745Sheppo 	vnet_attr_msg_t		*attr_pkt;
44061ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
44071ae08745Sheppo 	vsw_port_t		*port = ldcp->ldc_port;
44081ae08745Sheppo 	uint64_t		macaddr = 0;
44091ae08745Sheppo 	int			i;
44101ae08745Sheppo 
44111ae08745Sheppo 	D1(vswp, "%s(%lld) enter", __func__, ldcp->ldc_id);
44121ae08745Sheppo 
44131ae08745Sheppo 	/*
44141ae08745Sheppo 	 * We know this is a ctrl/attr packet so
44151ae08745Sheppo 	 * cast it into the correct structure.
44161ae08745Sheppo 	 */
44171ae08745Sheppo 	attr_pkt = (vnet_attr_msg_t *)pkt;
44181ae08745Sheppo 
44191ae08745Sheppo 	switch (attr_pkt->tag.vio_subtype) {
44201ae08745Sheppo 	case VIO_SUBTYPE_INFO:
44211ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
44221ae08745Sheppo 
44231ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_ATTR_INFO_RECV))
44241ae08745Sheppo 			return;
44251ae08745Sheppo 
44261ae08745Sheppo 		/*
44271ae08745Sheppo 		 * If the attributes are unacceptable then we NACK back.
44281ae08745Sheppo 		 */
44291ae08745Sheppo 		if (vsw_check_attr(attr_pkt, ldcp->ldc_port)) {
44301ae08745Sheppo 
44311ae08745Sheppo 			DERR(vswp, "%s (chan %d): invalid attributes",
44321ae08745Sheppo 				__func__, ldcp->ldc_id);
44331ae08745Sheppo 
44341ae08745Sheppo 			vsw_free_lane_resources(ldcp, INBOUND);
44351ae08745Sheppo 
44361ae08745Sheppo 			attr_pkt->tag.vio_sid = ldcp->local_session;
44371ae08745Sheppo 			attr_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
44381ae08745Sheppo 
44391ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)attr_pkt);
44401ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_ATTR_NACK_SENT;
44411ae08745Sheppo 			vsw_send_msg(ldcp, (void *)attr_pkt,
44421ae08745Sheppo 					sizeof (vnet_attr_msg_t));
44431ae08745Sheppo 
44441ae08745Sheppo 			vsw_next_milestone(ldcp);
44451ae08745Sheppo 			return;
44461ae08745Sheppo 		}
44471ae08745Sheppo 
44481ae08745Sheppo 		/*
44491ae08745Sheppo 		 * Otherwise store attributes for this lane and update
44501ae08745Sheppo 		 * lane state.
44511ae08745Sheppo 		 */
44521ae08745Sheppo 		ldcp->lane_in.mtu = attr_pkt->mtu;
44531ae08745Sheppo 		ldcp->lane_in.addr = attr_pkt->addr;
44541ae08745Sheppo 		ldcp->lane_in.addr_type = attr_pkt->addr_type;
44551ae08745Sheppo 		ldcp->lane_in.xfer_mode = attr_pkt->xfer_mode;
44561ae08745Sheppo 		ldcp->lane_in.ack_freq = attr_pkt->ack_freq;
44571ae08745Sheppo 
44581ae08745Sheppo 		macaddr = ldcp->lane_in.addr;
44591ae08745Sheppo 		for (i = ETHERADDRL - 1; i >= 0; i--) {
44601ae08745Sheppo 			port->p_macaddr.ether_addr_octet[i] = macaddr & 0xFF;
44611ae08745Sheppo 			macaddr >>= 8;
44621ae08745Sheppo 		}
44631ae08745Sheppo 
44641ae08745Sheppo 		/* create the fdb entry for this port/mac address */
44651ae08745Sheppo 		(void) vsw_add_fdb(vswp, port);
44661ae08745Sheppo 
44671ae08745Sheppo 		/* setup device specifc xmit routines */
44681ae08745Sheppo 		mutex_enter(&port->tx_lock);
44691ae08745Sheppo 		if (ldcp->lane_in.xfer_mode == VIO_DRING_MODE) {
44701ae08745Sheppo 			D2(vswp, "%s: mode = VIO_DRING_MODE", __func__);
44711ae08745Sheppo 			port->transmit = vsw_dringsend;
44721ae08745Sheppo 		} else if (ldcp->lane_in.xfer_mode == VIO_DESC_MODE) {
44731ae08745Sheppo 			D2(vswp, "%s: mode = VIO_DESC_MODE", __func__);
44741ae08745Sheppo 			vsw_create_privring(ldcp);
44751ae08745Sheppo 			port->transmit = vsw_descrsend;
44761ae08745Sheppo 		}
44771ae08745Sheppo 		mutex_exit(&port->tx_lock);
44781ae08745Sheppo 
44791ae08745Sheppo 		attr_pkt->tag.vio_sid = ldcp->local_session;
44801ae08745Sheppo 		attr_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
44811ae08745Sheppo 
44821ae08745Sheppo 		DUMP_TAG_PTR((vio_msg_tag_t *)attr_pkt);
44831ae08745Sheppo 
44841ae08745Sheppo 		ldcp->lane_in.lstate |= VSW_ATTR_ACK_SENT;
44851ae08745Sheppo 
44861ae08745Sheppo 		vsw_send_msg(ldcp, (void *)attr_pkt,
44871ae08745Sheppo 					sizeof (vnet_attr_msg_t));
44881ae08745Sheppo 
44891ae08745Sheppo 		vsw_next_milestone(ldcp);
44901ae08745Sheppo 		break;
44911ae08745Sheppo 
44921ae08745Sheppo 	case VIO_SUBTYPE_ACK:
44931ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_ACK", __func__);
44941ae08745Sheppo 
44951ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_ATTR_ACK_RECV))
44961ae08745Sheppo 			return;
44971ae08745Sheppo 
44981ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_ATTR_ACK_RECV;
44991ae08745Sheppo 		vsw_next_milestone(ldcp);
45001ae08745Sheppo 		break;
45011ae08745Sheppo 
45021ae08745Sheppo 	case VIO_SUBTYPE_NACK:
45031ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
45041ae08745Sheppo 
45051ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_ATTR_NACK_RECV))
45061ae08745Sheppo 			return;
45071ae08745Sheppo 
45081ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_ATTR_NACK_RECV;
45091ae08745Sheppo 		vsw_next_milestone(ldcp);
45101ae08745Sheppo 		break;
45111ae08745Sheppo 
45121ae08745Sheppo 	default:
45131ae08745Sheppo 		DERR(vswp, "%s: unknown vio_subtype %x\n", __func__,
45141ae08745Sheppo 			attr_pkt->tag.vio_subtype);
45151ae08745Sheppo 	}
45161ae08745Sheppo 
45171ae08745Sheppo 	D1(vswp, "%s(%lld) exit", __func__, ldcp->ldc_id);
45181ae08745Sheppo }
45191ae08745Sheppo 
45201ae08745Sheppo /*
45211ae08745Sheppo  * Process a dring info packet. We can end up here either because our peer
45221ae08745Sheppo  * has ACK/NACK'ed back to an earlier DRING msg we had sent it, or our
45231ae08745Sheppo  * peer has sent us a dring INFO message.
45241ae08745Sheppo  *
45251ae08745Sheppo  * If we get a valid/acceptable INFO packet (and we have already negotiated
45261ae08745Sheppo  * a version) we ACK back and update the lane state, otherwise we NACK back.
45271ae08745Sheppo  *
45281ae08745Sheppo  * FUTURE: nothing to stop client from sending us info on multiple dring's
45291ae08745Sheppo  * but for the moment we will just use the first one we are given.
45301ae08745Sheppo  *
45311ae08745Sheppo  */
45321ae08745Sheppo void
45331ae08745Sheppo vsw_process_ctrl_dring_reg_pkt(vsw_ldc_t *ldcp, void *pkt)
45341ae08745Sheppo {
45351ae08745Sheppo 	vio_dring_reg_msg_t	*dring_pkt;
45361ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
45371ae08745Sheppo 	ldc_mem_info_t		minfo;
45381ae08745Sheppo 	dring_info_t		*dp, *dbp;
45391ae08745Sheppo 	int			dring_found = 0;
45401ae08745Sheppo 
45411ae08745Sheppo 	/*
45421ae08745Sheppo 	 * We know this is a ctrl/dring packet so
45431ae08745Sheppo 	 * cast it into the correct structure.
45441ae08745Sheppo 	 */
45451ae08745Sheppo 	dring_pkt = (vio_dring_reg_msg_t *)pkt;
45461ae08745Sheppo 
45471ae08745Sheppo 	D1(vswp, "%s(%lld) enter", __func__, ldcp->ldc_id);
45481ae08745Sheppo 
45491ae08745Sheppo 	switch (dring_pkt->tag.vio_subtype) {
45501ae08745Sheppo 	case VIO_SUBTYPE_INFO:
45511ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
45521ae08745Sheppo 
45531ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_DRING_INFO_RECV))
45541ae08745Sheppo 			return;
45551ae08745Sheppo 
45561ae08745Sheppo 		/*
45571ae08745Sheppo 		 * If the dring params are unacceptable then we NACK back.
45581ae08745Sheppo 		 */
45591ae08745Sheppo 		if (vsw_check_dring_info(dring_pkt)) {
45601ae08745Sheppo 
45611ae08745Sheppo 			DERR(vswp, "%s (%lld): invalid dring info",
45621ae08745Sheppo 				__func__, ldcp->ldc_id);
45631ae08745Sheppo 
45641ae08745Sheppo 			vsw_free_lane_resources(ldcp, INBOUND);
45651ae08745Sheppo 
45661ae08745Sheppo 			dring_pkt->tag.vio_sid = ldcp->local_session;
45671ae08745Sheppo 			dring_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
45681ae08745Sheppo 
45691ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)dring_pkt);
45701ae08745Sheppo 
45711ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_DRING_NACK_SENT;
45721ae08745Sheppo 
45731ae08745Sheppo 			vsw_send_msg(ldcp, (void *)dring_pkt,
45741ae08745Sheppo 					sizeof (vio_dring_reg_msg_t));
45751ae08745Sheppo 
45761ae08745Sheppo 			vsw_next_milestone(ldcp);
45771ae08745Sheppo 			return;
45781ae08745Sheppo 		}
45791ae08745Sheppo 
45801ae08745Sheppo 		/*
45811ae08745Sheppo 		 * Otherwise, attempt to map in the dring using the
45821ae08745Sheppo 		 * cookie. If that succeeds we send back a unique dring
45831ae08745Sheppo 		 * identifier that the sending side will use in future
45841ae08745Sheppo 		 * to refer to this descriptor ring.
45851ae08745Sheppo 		 */
45861ae08745Sheppo 		dp = kmem_zalloc(sizeof (dring_info_t), KM_SLEEP);
45871ae08745Sheppo 
45881ae08745Sheppo 		dp->num_descriptors = dring_pkt->num_descriptors;
45891ae08745Sheppo 		dp->descriptor_size = dring_pkt->descriptor_size;
45901ae08745Sheppo 		dp->options = dring_pkt->options;
45911ae08745Sheppo 		dp->ncookies = dring_pkt->ncookies;
45921ae08745Sheppo 
45931ae08745Sheppo 		/*
45941ae08745Sheppo 		 * Note: should only get one cookie. Enforced in
45951ae08745Sheppo 		 * the ldc layer.
45961ae08745Sheppo 		 */
45971ae08745Sheppo 		bcopy(&dring_pkt->cookie[0], &dp->cookie[0],
45981ae08745Sheppo 			sizeof (ldc_mem_cookie_t));
45991ae08745Sheppo 
46001ae08745Sheppo 		D2(vswp, "%s: num_desc %ld : desc_size %ld", __func__,
46011ae08745Sheppo 			dp->num_descriptors, dp->descriptor_size);
46021ae08745Sheppo 		D2(vswp, "%s: options 0x%lx: ncookies %ld", __func__,
46031ae08745Sheppo 			dp->options, dp->ncookies);
46041ae08745Sheppo 
46051ae08745Sheppo 		if ((ldc_mem_dring_map(ldcp->ldc_handle, &dp->cookie[0],
46061ae08745Sheppo 			dp->ncookies, dp->num_descriptors,
46071ae08745Sheppo 			dp->descriptor_size, LDC_SHADOW_MAP,
46081ae08745Sheppo 			&(dp->handle))) != 0) {
46091ae08745Sheppo 
46101ae08745Sheppo 			DERR(vswp, "%s: dring_map failed\n", __func__);
46111ae08745Sheppo 
46121ae08745Sheppo 			kmem_free(dp, sizeof (dring_info_t));
46131ae08745Sheppo 			vsw_free_lane_resources(ldcp, INBOUND);
46141ae08745Sheppo 
46151ae08745Sheppo 			dring_pkt->tag.vio_sid = ldcp->local_session;
46161ae08745Sheppo 			dring_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
46171ae08745Sheppo 
46181ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)dring_pkt);
46191ae08745Sheppo 
46201ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_DRING_NACK_SENT;
46211ae08745Sheppo 			vsw_send_msg(ldcp, (void *)dring_pkt,
46221ae08745Sheppo 				sizeof (vio_dring_reg_msg_t));
46231ae08745Sheppo 
46241ae08745Sheppo 			vsw_next_milestone(ldcp);
46251ae08745Sheppo 			return;
46261ae08745Sheppo 		}
46271ae08745Sheppo 
46281ae08745Sheppo 		if ((ldc_mem_dring_info(dp->handle, &minfo)) != 0) {
46291ae08745Sheppo 
46301ae08745Sheppo 			DERR(vswp, "%s: dring_addr failed\n", __func__);
46311ae08745Sheppo 
46321ae08745Sheppo 			kmem_free(dp, sizeof (dring_info_t));
46331ae08745Sheppo 			vsw_free_lane_resources(ldcp, INBOUND);
46341ae08745Sheppo 
46351ae08745Sheppo 			dring_pkt->tag.vio_sid = ldcp->local_session;
46361ae08745Sheppo 			dring_pkt->tag.vio_subtype = VIO_SUBTYPE_NACK;
46371ae08745Sheppo 
46381ae08745Sheppo 			DUMP_TAG_PTR((vio_msg_tag_t *)dring_pkt);
46391ae08745Sheppo 
46401ae08745Sheppo 			ldcp->lane_in.lstate |= VSW_DRING_NACK_SENT;
46411ae08745Sheppo 			vsw_send_msg(ldcp, (void *)dring_pkt,
46421ae08745Sheppo 				sizeof (vio_dring_reg_msg_t));
46431ae08745Sheppo 
46441ae08745Sheppo 			vsw_next_milestone(ldcp);
46451ae08745Sheppo 			return;
46461ae08745Sheppo 		} else {
46471ae08745Sheppo 			/* store the address of the pub part of ring */
46481ae08745Sheppo 			dp->pub_addr = minfo.vaddr;
46491ae08745Sheppo 		}
46501ae08745Sheppo 
46511ae08745Sheppo 		/* no private section as we are importing */
46521ae08745Sheppo 		dp->priv_addr = NULL;
46531ae08745Sheppo 
46541ae08745Sheppo 		/*
46551ae08745Sheppo 		 * Using simple mono increasing int for ident at
46561ae08745Sheppo 		 * the moment.
46571ae08745Sheppo 		 */
46581ae08745Sheppo 		dp->ident = ldcp->next_ident;
46591ae08745Sheppo 		ldcp->next_ident++;
46601ae08745Sheppo 
46611ae08745Sheppo 		dp->end_idx = 0;
46621ae08745Sheppo 		dp->next = NULL;
46631ae08745Sheppo 
46641ae08745Sheppo 		/*
46651ae08745Sheppo 		 * Link it onto the end of the list of drings
46661ae08745Sheppo 		 * for this lane.
46671ae08745Sheppo 		 */
46681ae08745Sheppo 		if (ldcp->lane_in.dringp == NULL) {
46691ae08745Sheppo 			D2(vswp, "%s: adding first INBOUND dring", __func__);
46701ae08745Sheppo 			ldcp->lane_in.dringp = dp;
46711ae08745Sheppo 		} else {
46721ae08745Sheppo 			dbp = ldcp->lane_in.dringp;
46731ae08745Sheppo 
46741ae08745Sheppo 			while (dbp->next != NULL)
46751ae08745Sheppo 				dbp = dbp->next;
46761ae08745Sheppo 
46771ae08745Sheppo 			dbp->next = dp;
46781ae08745Sheppo 		}
46791ae08745Sheppo 
46801ae08745Sheppo 		/* acknowledge it */
46811ae08745Sheppo 		dring_pkt->tag.vio_sid = ldcp->local_session;
46821ae08745Sheppo 		dring_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
46831ae08745Sheppo 		dring_pkt->dring_ident = dp->ident;
46841ae08745Sheppo 
46851ae08745Sheppo 		vsw_send_msg(ldcp, (void *)dring_pkt,
46861ae08745Sheppo 				sizeof (vio_dring_reg_msg_t));
46871ae08745Sheppo 
46881ae08745Sheppo 		ldcp->lane_in.lstate |= VSW_DRING_ACK_SENT;
46891ae08745Sheppo 		vsw_next_milestone(ldcp);
46901ae08745Sheppo 		break;
46911ae08745Sheppo 
46921ae08745Sheppo 	case VIO_SUBTYPE_ACK:
46931ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_ACK", __func__);
46941ae08745Sheppo 
46951ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_DRING_ACK_RECV))
46961ae08745Sheppo 			return;
46971ae08745Sheppo 
46981ae08745Sheppo 		/*
46991ae08745Sheppo 		 * Peer is acknowledging our dring info and will have
47001ae08745Sheppo 		 * sent us a dring identifier which we will use to
47011ae08745Sheppo 		 * refer to this ring w.r.t. our peer.
47021ae08745Sheppo 		 */
47031ae08745Sheppo 		dp = ldcp->lane_out.dringp;
47041ae08745Sheppo 		if (dp != NULL) {
47051ae08745Sheppo 			/*
47061ae08745Sheppo 			 * Find the ring this ident should be associated
47071ae08745Sheppo 			 * with.
47081ae08745Sheppo 			 */
47091ae08745Sheppo 			if (vsw_dring_match(dp, dring_pkt)) {
47101ae08745Sheppo 				dring_found = 1;
47111ae08745Sheppo 
47121ae08745Sheppo 			} else while (dp != NULL) {
47131ae08745Sheppo 				if (vsw_dring_match(dp, dring_pkt)) {
47141ae08745Sheppo 					dring_found = 1;
47151ae08745Sheppo 					break;
47161ae08745Sheppo 				}
47171ae08745Sheppo 				dp = dp->next;
47181ae08745Sheppo 			}
47191ae08745Sheppo 
47201ae08745Sheppo 			if (dring_found == 0) {
47211ae08745Sheppo 				DERR(NULL, "%s: unrecognised ring cookie",
47221ae08745Sheppo 					__func__);
47231ae08745Sheppo 				vsw_restart_handshake(ldcp);
47241ae08745Sheppo 				return;
47251ae08745Sheppo 			}
47261ae08745Sheppo 
47271ae08745Sheppo 		} else {
47281ae08745Sheppo 			DERR(vswp, "%s: DRING ACK received but no drings "
47291ae08745Sheppo 				"allocated", __func__);
47301ae08745Sheppo 			vsw_restart_handshake(ldcp);
47311ae08745Sheppo 			return;
47321ae08745Sheppo 		}
47331ae08745Sheppo 
47341ae08745Sheppo 		/* store ident */
47351ae08745Sheppo 		dp->ident = dring_pkt->dring_ident;
47361ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_DRING_ACK_RECV;
47371ae08745Sheppo 		vsw_next_milestone(ldcp);
47381ae08745Sheppo 		break;
47391ae08745Sheppo 
47401ae08745Sheppo 	case VIO_SUBTYPE_NACK:
47411ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
47421ae08745Sheppo 
47431ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_DRING_NACK_RECV))
47441ae08745Sheppo 			return;
47451ae08745Sheppo 
47461ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_DRING_NACK_RECV;
47471ae08745Sheppo 		vsw_next_milestone(ldcp);
47481ae08745Sheppo 		break;
47491ae08745Sheppo 
47501ae08745Sheppo 	default:
47511ae08745Sheppo 		DERR(vswp, "%s: Unknown vio_subtype %x\n", __func__,
47521ae08745Sheppo 			dring_pkt->tag.vio_subtype);
47531ae08745Sheppo 	}
47541ae08745Sheppo 
47551ae08745Sheppo 	D1(vswp, "%s(%lld) exit", __func__, ldcp->ldc_id);
47561ae08745Sheppo }
47571ae08745Sheppo 
47581ae08745Sheppo /*
47591ae08745Sheppo  * Process a request from peer to unregister a dring.
47601ae08745Sheppo  *
47611ae08745Sheppo  * For the moment we just restart the handshake if our
47621ae08745Sheppo  * peer endpoint attempts to unregister a dring.
47631ae08745Sheppo  */
47641ae08745Sheppo void
47651ae08745Sheppo vsw_process_ctrl_dring_unreg_pkt(vsw_ldc_t *ldcp, void *pkt)
47661ae08745Sheppo {
47671ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
47681ae08745Sheppo 	vio_dring_unreg_msg_t	*dring_pkt;
47691ae08745Sheppo 
47701ae08745Sheppo 	/*
47711ae08745Sheppo 	 * We know this is a ctrl/dring packet so
47721ae08745Sheppo 	 * cast it into the correct structure.
47731ae08745Sheppo 	 */
47741ae08745Sheppo 	dring_pkt = (vio_dring_unreg_msg_t *)pkt;
47751ae08745Sheppo 
47761ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
47771ae08745Sheppo 
47781ae08745Sheppo 	switch (dring_pkt->tag.vio_subtype) {
47791ae08745Sheppo 	case VIO_SUBTYPE_INFO:
47801ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
47811ae08745Sheppo 
47821ae08745Sheppo 		DWARN(vswp, "%s: restarting handshake..", __func__);
47831ae08745Sheppo 		vsw_restart_handshake(ldcp);
47841ae08745Sheppo 		break;
47851ae08745Sheppo 
47861ae08745Sheppo 	case VIO_SUBTYPE_ACK:
47871ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_ACK", __func__);
47881ae08745Sheppo 
47891ae08745Sheppo 		DWARN(vswp, "%s: restarting handshake..", __func__);
47901ae08745Sheppo 		vsw_restart_handshake(ldcp);
47911ae08745Sheppo 		break;
47921ae08745Sheppo 
47931ae08745Sheppo 	case VIO_SUBTYPE_NACK:
47941ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
47951ae08745Sheppo 
47961ae08745Sheppo 		DWARN(vswp, "%s: restarting handshake..", __func__);
47971ae08745Sheppo 		vsw_restart_handshake(ldcp);
47981ae08745Sheppo 		break;
47991ae08745Sheppo 
48001ae08745Sheppo 	default:
48011ae08745Sheppo 		DERR(vswp, "%s: Unknown vio_subtype %x\n", __func__,
48021ae08745Sheppo 			dring_pkt->tag.vio_subtype);
48031ae08745Sheppo 		vsw_restart_handshake(ldcp);
48041ae08745Sheppo 	}
48051ae08745Sheppo 
48061ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
48071ae08745Sheppo }
48081ae08745Sheppo 
48091ae08745Sheppo #define	SND_MCST_NACK(ldcp, pkt) \
48101ae08745Sheppo 	pkt->tag.vio_subtype = VIO_SUBTYPE_NACK; \
48111ae08745Sheppo 	pkt->tag.vio_sid = ldcp->local_session; \
48121ae08745Sheppo 	vsw_send_msg(ldcp, (void *)pkt, sizeof (vnet_mcast_msg_t));
48131ae08745Sheppo 
48141ae08745Sheppo /*
48151ae08745Sheppo  * Process a multicast request from a vnet.
48161ae08745Sheppo  *
48171ae08745Sheppo  * Vnet's specify a multicast address that they are interested in. This
48181ae08745Sheppo  * address is used as a key into the hash table which forms the multicast
48191ae08745Sheppo  * forwarding database (mFDB).
48201ae08745Sheppo  *
48211ae08745Sheppo  * The table keys are the multicast addresses, while the table entries
48221ae08745Sheppo  * are pointers to lists of ports which wish to receive packets for the
48231ae08745Sheppo  * specified multicast address.
48241ae08745Sheppo  *
48251ae08745Sheppo  * When a multicast packet is being switched we use the address as a key
48261ae08745Sheppo  * into the hash table, and then walk the appropriate port list forwarding
48271ae08745Sheppo  * the pkt to each port in turn.
48281ae08745Sheppo  *
48291ae08745Sheppo  * If a vnet is no longer interested in a particular multicast grouping
48301ae08745Sheppo  * we simply find the correct location in the hash table and then delete
48311ae08745Sheppo  * the relevant port from the port list.
48321ae08745Sheppo  *
48331ae08745Sheppo  * To deal with the case whereby a port is being deleted without first
48341ae08745Sheppo  * removing itself from the lists in the hash table, we maintain a list
48351ae08745Sheppo  * of multicast addresses the port has registered an interest in, within
48361ae08745Sheppo  * the port structure itself. We then simply walk that list of addresses
48371ae08745Sheppo  * using them as keys into the hash table and remove the port from the
48381ae08745Sheppo  * appropriate lists.
48391ae08745Sheppo  */
48401ae08745Sheppo static void
48411ae08745Sheppo vsw_process_ctrl_mcst_pkt(vsw_ldc_t *ldcp, void *pkt)
48421ae08745Sheppo {
48431ae08745Sheppo 	vnet_mcast_msg_t	*mcst_pkt;
48441ae08745Sheppo 	vsw_port_t		*port = ldcp->ldc_port;
48451ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
48461ae08745Sheppo 	int			i;
48471ae08745Sheppo 
48481ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
48491ae08745Sheppo 
48501ae08745Sheppo 	/*
48511ae08745Sheppo 	 * We know this is a ctrl/mcast packet so
48521ae08745Sheppo 	 * cast it into the correct structure.
48531ae08745Sheppo 	 */
48541ae08745Sheppo 	mcst_pkt = (vnet_mcast_msg_t *)pkt;
48551ae08745Sheppo 
48561ae08745Sheppo 	switch (mcst_pkt->tag.vio_subtype) {
48571ae08745Sheppo 	case VIO_SUBTYPE_INFO:
48581ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
48591ae08745Sheppo 
48601ae08745Sheppo 		/*
48611ae08745Sheppo 		 * Check if in correct state to receive a multicast
48621ae08745Sheppo 		 * message (i.e. handshake complete). If not reset
48631ae08745Sheppo 		 * the handshake.
48641ae08745Sheppo 		 */
48651ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_MCST_INFO_RECV))
48661ae08745Sheppo 			return;
48671ae08745Sheppo 
48681ae08745Sheppo 		/*
48691ae08745Sheppo 		 * Before attempting to add or remove address check
48701ae08745Sheppo 		 * that they are valid multicast addresses.
48711ae08745Sheppo 		 * If not, then NACK back.
48721ae08745Sheppo 		 */
48731ae08745Sheppo 		for (i = 0; i < mcst_pkt->count; i++) {
48741ae08745Sheppo 			if ((mcst_pkt->mca[i].ether_addr_octet[0] & 01) != 1) {
48751ae08745Sheppo 				DERR(vswp, "%s: invalid multicast address",
48761ae08745Sheppo 								__func__);
48771ae08745Sheppo 				SND_MCST_NACK(ldcp, mcst_pkt);
48781ae08745Sheppo 				return;
48791ae08745Sheppo 			}
48801ae08745Sheppo 		}
48811ae08745Sheppo 
48821ae08745Sheppo 		/*
48831ae08745Sheppo 		 * Now add/remove the addresses. If this fails we
48841ae08745Sheppo 		 * NACK back.
48851ae08745Sheppo 		 */
48861ae08745Sheppo 		if (vsw_add_rem_mcst(mcst_pkt, port) != 0) {
48871ae08745Sheppo 			SND_MCST_NACK(ldcp, mcst_pkt);
48881ae08745Sheppo 			return;
48891ae08745Sheppo 		}
48901ae08745Sheppo 
48911ae08745Sheppo 		mcst_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
48921ae08745Sheppo 		mcst_pkt->tag.vio_sid = ldcp->local_session;
48931ae08745Sheppo 
48941ae08745Sheppo 		DUMP_TAG_PTR((vio_msg_tag_t *)mcst_pkt);
48951ae08745Sheppo 
48961ae08745Sheppo 		vsw_send_msg(ldcp, (void *)mcst_pkt,
48971ae08745Sheppo 					sizeof (vnet_mcast_msg_t));
48981ae08745Sheppo 		break;
48991ae08745Sheppo 
49001ae08745Sheppo 	case VIO_SUBTYPE_ACK:
49011ae08745Sheppo 		DWARN(vswp, "%s: VIO_SUBTYPE_ACK", __func__);
49021ae08745Sheppo 
49031ae08745Sheppo 		/*
49041ae08745Sheppo 		 * We shouldn't ever get a multicast ACK message as
49051ae08745Sheppo 		 * at the moment we never request multicast addresses
49061ae08745Sheppo 		 * to be set on some other device. This may change in
49071ae08745Sheppo 		 * the future if we have cascading switches.
49081ae08745Sheppo 		 */
49091ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_MCST_ACK_RECV))
49101ae08745Sheppo 			return;
49111ae08745Sheppo 
49121ae08745Sheppo 				/* Do nothing */
49131ae08745Sheppo 		break;
49141ae08745Sheppo 
49151ae08745Sheppo 	case VIO_SUBTYPE_NACK:
49161ae08745Sheppo 		DWARN(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
49171ae08745Sheppo 
49181ae08745Sheppo 		/*
49191ae08745Sheppo 		 * We shouldn't get a multicast NACK packet for the
49201ae08745Sheppo 		 * same reasons as we shouldn't get a ACK packet.
49211ae08745Sheppo 		 */
49221ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_MCST_NACK_RECV))
49231ae08745Sheppo 			return;
49241ae08745Sheppo 
49251ae08745Sheppo 				/* Do nothing */
49261ae08745Sheppo 		break;
49271ae08745Sheppo 
49281ae08745Sheppo 	default:
49291ae08745Sheppo 		DERR(vswp, "%s: unknown vio_subtype %x\n", __func__,
49301ae08745Sheppo 			mcst_pkt->tag.vio_subtype);
49311ae08745Sheppo 	}
49321ae08745Sheppo 
49331ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
49341ae08745Sheppo }
49351ae08745Sheppo 
49361ae08745Sheppo static void
49371ae08745Sheppo vsw_process_ctrl_rdx_pkt(vsw_ldc_t *ldcp, void *pkt)
49381ae08745Sheppo {
49391ae08745Sheppo 	vio_rdx_msg_t	*rdx_pkt;
49401ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
49411ae08745Sheppo 
49421ae08745Sheppo 	/*
49431ae08745Sheppo 	 * We know this is a ctrl/rdx packet so
49441ae08745Sheppo 	 * cast it into the correct structure.
49451ae08745Sheppo 	 */
49461ae08745Sheppo 	rdx_pkt = (vio_rdx_msg_t *)pkt;
49471ae08745Sheppo 
49481ae08745Sheppo 	D1(vswp, "%s(%lld) enter", __func__, ldcp->ldc_id);
49491ae08745Sheppo 
49501ae08745Sheppo 	switch (rdx_pkt->tag.vio_subtype) {
49511ae08745Sheppo 	case VIO_SUBTYPE_INFO:
49521ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
49531ae08745Sheppo 
49541ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_RDX_INFO_RECV))
49551ae08745Sheppo 			return;
49561ae08745Sheppo 
49571ae08745Sheppo 		rdx_pkt->tag.vio_sid = ldcp->local_session;
49581ae08745Sheppo 		rdx_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
49591ae08745Sheppo 
49601ae08745Sheppo 		DUMP_TAG_PTR((vio_msg_tag_t *)rdx_pkt);
49611ae08745Sheppo 
49621ae08745Sheppo 		ldcp->lane_in.lstate |= VSW_RDX_ACK_SENT;
49631ae08745Sheppo 
49641ae08745Sheppo 		vsw_send_msg(ldcp, (void *)rdx_pkt,
49651ae08745Sheppo 				sizeof (vio_rdx_msg_t));
49661ae08745Sheppo 
49671ae08745Sheppo 		vsw_next_milestone(ldcp);
49681ae08745Sheppo 		break;
49691ae08745Sheppo 
49701ae08745Sheppo 	case VIO_SUBTYPE_ACK:
49711ae08745Sheppo 		/*
49721ae08745Sheppo 		 * Should be handled in-band by callback handler.
49731ae08745Sheppo 		 */
49741ae08745Sheppo 		DERR(vswp, "%s: Unexpected VIO_SUBTYPE_ACK", __func__);
49751ae08745Sheppo 		vsw_restart_handshake(ldcp);
49761ae08745Sheppo 		break;
49771ae08745Sheppo 
49781ae08745Sheppo 	case VIO_SUBTYPE_NACK:
49791ae08745Sheppo 		D2(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
49801ae08745Sheppo 
49811ae08745Sheppo 		if (vsw_check_flag(ldcp, OUTBOUND, VSW_RDX_NACK_RECV))
49821ae08745Sheppo 			return;
49831ae08745Sheppo 
49841ae08745Sheppo 		ldcp->lane_out.lstate |= VSW_RDX_NACK_RECV;
49851ae08745Sheppo 		vsw_next_milestone(ldcp);
49861ae08745Sheppo 		break;
49871ae08745Sheppo 
49881ae08745Sheppo 	default:
49891ae08745Sheppo 		DERR(vswp, "%s: Unknown vio_subtype %x\n", __func__,
49901ae08745Sheppo 			rdx_pkt->tag.vio_subtype);
49911ae08745Sheppo 	}
49921ae08745Sheppo 
49931ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
49941ae08745Sheppo }
49951ae08745Sheppo 
49961ae08745Sheppo static void
49971ae08745Sheppo vsw_process_data_pkt(vsw_ldc_t *ldcp, void *dpkt, vio_msg_tag_t tag)
49981ae08745Sheppo {
49991ae08745Sheppo 	uint16_t	env = tag.vio_subtype_env;
50001ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
50011ae08745Sheppo 
50021ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
50031ae08745Sheppo 
50041ae08745Sheppo 	/* session id check */
50051ae08745Sheppo 	if (ldcp->session_status & VSW_PEER_SESSION) {
50061ae08745Sheppo 		if (ldcp->peer_session != tag.vio_sid) {
50071ae08745Sheppo 			DERR(vswp, "%s (chan %d): invalid session id (%llx)",
50081ae08745Sheppo 				__func__, ldcp->ldc_id, tag.vio_sid);
50091ae08745Sheppo 			vsw_restart_handshake(ldcp);
50101ae08745Sheppo 			return;
50111ae08745Sheppo 		}
50121ae08745Sheppo 	}
50131ae08745Sheppo 
50141ae08745Sheppo 	/*
50151ae08745Sheppo 	 * It is an error for us to be getting data packets
50161ae08745Sheppo 	 * before the handshake has completed.
50171ae08745Sheppo 	 */
50181ae08745Sheppo 	if (ldcp->hphase != VSW_MILESTONE4) {
50191ae08745Sheppo 		DERR(vswp, "%s: got data packet before handshake complete "
50201ae08745Sheppo 			"hphase %d (%x: %x)", __func__, ldcp->hphase,
50211ae08745Sheppo 			ldcp->lane_in.lstate, ldcp->lane_out.lstate);
50221ae08745Sheppo 		DUMP_FLAGS(ldcp->lane_in.lstate);
50231ae08745Sheppo 		DUMP_FLAGS(ldcp->lane_out.lstate);
50241ae08745Sheppo 		vsw_restart_handshake(ldcp);
50251ae08745Sheppo 		return;
50261ae08745Sheppo 	}
50271ae08745Sheppo 
50281ae08745Sheppo 	/*
50291ae08745Sheppo 	 * Switch on vio_subtype envelope, then let lower routines
50301ae08745Sheppo 	 * decide if its an INFO, ACK or NACK packet.
50311ae08745Sheppo 	 */
50321ae08745Sheppo 	if (env == VIO_DRING_DATA) {
50331ae08745Sheppo 		vsw_process_data_dring_pkt(ldcp, dpkt);
50341ae08745Sheppo 	} else if (env == VIO_PKT_DATA) {
50351ae08745Sheppo 		vsw_process_data_raw_pkt(ldcp, dpkt);
50361ae08745Sheppo 	} else if (env == VIO_DESC_DATA) {
50371ae08745Sheppo 		vsw_process_data_ibnd_pkt(ldcp, dpkt);
50381ae08745Sheppo 	} else {
50391ae08745Sheppo 		DERR(vswp, "%s : unknown vio_subtype_env (%x)\n",
50401ae08745Sheppo 							__func__, env);
50411ae08745Sheppo 	}
50421ae08745Sheppo 
50431ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
50441ae08745Sheppo }
50451ae08745Sheppo 
50461ae08745Sheppo #define	SND_DRING_NACK(ldcp, pkt) \
50471ae08745Sheppo 	pkt->tag.vio_subtype = VIO_SUBTYPE_NACK; \
50481ae08745Sheppo 	pkt->tag.vio_sid = ldcp->local_session; \
50491ae08745Sheppo 	vsw_send_msg(ldcp, (void *)pkt, sizeof (vio_dring_msg_t));
50501ae08745Sheppo 
50511ae08745Sheppo static void
50521ae08745Sheppo vsw_process_data_dring_pkt(vsw_ldc_t *ldcp, void *dpkt)
50531ae08745Sheppo {
50541ae08745Sheppo 	vio_dring_msg_t		*dring_pkt;
50551ae08745Sheppo 	vnet_public_desc_t	*pub_addr = NULL;
50561ae08745Sheppo 	vsw_private_desc_t	*priv_addr = NULL;
50571ae08745Sheppo 	dring_info_t		*dp = NULL;
50581ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
50591ae08745Sheppo 	mblk_t			*mp = NULL;
50601ae08745Sheppo 	mblk_t			*bp = NULL;
50611ae08745Sheppo 	mblk_t			*bpt = NULL;
50621ae08745Sheppo 	size_t			nbytes = 0;
50631ae08745Sheppo 	size_t			off = 0;
50641ae08745Sheppo 	uint64_t		ncookies = 0;
50651ae08745Sheppo 	uint64_t		chain = 0;
5066d10e4ef2Snarayan 	uint64_t		j, len;
5067d10e4ef2Snarayan 	uint32_t		pos, start, datalen;
5068d10e4ef2Snarayan 	uint32_t		range_start, range_end;
5069d10e4ef2Snarayan 	int32_t			end, num, cnt = 0;
5070d10e4ef2Snarayan 	int			i, rv;
50711ae08745Sheppo 	boolean_t		ack_needed = B_FALSE;
5072d10e4ef2Snarayan 	boolean_t		prev_desc_ack = B_FALSE;
5073d10e4ef2Snarayan 	int			read_attempts = 0;
50741ae08745Sheppo 
50751ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
50761ae08745Sheppo 
50771ae08745Sheppo 	/*
50781ae08745Sheppo 	 * We know this is a data/dring packet so
50791ae08745Sheppo 	 * cast it into the correct structure.
50801ae08745Sheppo 	 */
50811ae08745Sheppo 	dring_pkt = (vio_dring_msg_t *)dpkt;
50821ae08745Sheppo 
50831ae08745Sheppo 	/*
50841ae08745Sheppo 	 * Switch on the vio_subtype. If its INFO then we need to
50851ae08745Sheppo 	 * process the data. If its an ACK we need to make sure
50861ae08745Sheppo 	 * it makes sense (i.e did we send an earlier data/info),
50871ae08745Sheppo 	 * and if its a NACK then we maybe attempt a retry.
50881ae08745Sheppo 	 */
50891ae08745Sheppo 	switch (dring_pkt->tag.vio_subtype) {
50901ae08745Sheppo 	case VIO_SUBTYPE_INFO:
50911ae08745Sheppo 		D2(vswp, "%s(%lld): VIO_SUBTYPE_INFO", __func__, ldcp->ldc_id);
50921ae08745Sheppo 
50931ae08745Sheppo 		if ((dp = vsw_ident2dring(&ldcp->lane_in,
50941ae08745Sheppo 				dring_pkt->dring_ident)) == NULL) {
50951ae08745Sheppo 
50961ae08745Sheppo 			DERR(vswp, "%s(%lld): unable to find dring from "
50971ae08745Sheppo 				"ident 0x%llx", __func__, ldcp->ldc_id,
50981ae08745Sheppo 				dring_pkt->dring_ident);
50991ae08745Sheppo 
51001ae08745Sheppo 			SND_DRING_NACK(ldcp, dring_pkt);
51011ae08745Sheppo 			return;
51021ae08745Sheppo 		}
51031ae08745Sheppo 
5104d10e4ef2Snarayan 		start = pos = dring_pkt->start_idx;
51051ae08745Sheppo 		end = dring_pkt->end_idx;
5106d10e4ef2Snarayan 		len = dp->num_descriptors;
51071ae08745Sheppo 
5108d10e4ef2Snarayan 		range_start = range_end = pos;
5109d10e4ef2Snarayan 
5110d10e4ef2Snarayan 		D2(vswp, "%s(%lld): start index %ld : end %ld\n",
51111ae08745Sheppo 			__func__, ldcp->ldc_id, start, end);
51121ae08745Sheppo 
5113d10e4ef2Snarayan 		if (end == -1) {
5114d10e4ef2Snarayan 			num = -1;
51154bac2208Snarayan 		} else if (end >= 0) {
5116d10e4ef2Snarayan 			num = end >= pos ?
5117d10e4ef2Snarayan 				end - pos + 1: (len - pos + 1) + end;
5118d10e4ef2Snarayan 
51191ae08745Sheppo 			/* basic sanity check */
51201ae08745Sheppo 			if (end > len) {
5121d10e4ef2Snarayan 				DERR(vswp, "%s(%lld): endpoint %lld outside "
5122d10e4ef2Snarayan 					"ring length %lld", __func__,
5123d10e4ef2Snarayan 					ldcp->ldc_id, end, len);
51241ae08745Sheppo 
51251ae08745Sheppo 				SND_DRING_NACK(ldcp, dring_pkt);
51261ae08745Sheppo 				return;
51271ae08745Sheppo 			}
5128d10e4ef2Snarayan 		} else {
5129d10e4ef2Snarayan 			DERR(vswp, "%s(%lld): invalid endpoint %lld",
5130d10e4ef2Snarayan 				__func__, ldcp->ldc_id, end);
5131d10e4ef2Snarayan 			SND_DRING_NACK(ldcp, dring_pkt);
51321ae08745Sheppo 			return;
51331ae08745Sheppo 		}
51341ae08745Sheppo 
5135d10e4ef2Snarayan 		while (cnt != num) {
5136d10e4ef2Snarayan vsw_recheck_desc:
5137d10e4ef2Snarayan 			if ((rv = ldc_mem_dring_acquire(dp->handle,
5138d10e4ef2Snarayan 							pos, pos)) != 0) {
5139d10e4ef2Snarayan 				DERR(vswp, "%s(%lld): unable to acquire "
5140d10e4ef2Snarayan 					"descriptor at pos %d: err %d",
5141d10e4ef2Snarayan 					__func__, pos, ldcp->ldc_id, rv);
5142d10e4ef2Snarayan 				SND_DRING_NACK(ldcp, dring_pkt);
5143d10e4ef2Snarayan 				return;
5144d10e4ef2Snarayan 			}
51451ae08745Sheppo 
5146d10e4ef2Snarayan 			pub_addr = (vnet_public_desc_t *)dp->pub_addr + pos;
51471ae08745Sheppo 
5148d10e4ef2Snarayan 			/*
5149d10e4ef2Snarayan 			 * When given a bounded range of descriptors
5150d10e4ef2Snarayan 			 * to process, its an error to hit a descriptor
5151d10e4ef2Snarayan 			 * which is not ready. In the non-bounded case
5152d10e4ef2Snarayan 			 * (end_idx == -1) this simply indicates we have
5153d10e4ef2Snarayan 			 * reached the end of the current active range.
5154d10e4ef2Snarayan 			 */
5155d10e4ef2Snarayan 			if (pub_addr->hdr.dstate != VIO_DESC_READY) {
5156d10e4ef2Snarayan 				/* unbound - no error */
5157d10e4ef2Snarayan 				if (end == -1) {
5158d10e4ef2Snarayan 					if (read_attempts == vsw_read_attempts)
5159d10e4ef2Snarayan 						break;
51601ae08745Sheppo 
5161d10e4ef2Snarayan 					delay(drv_usectohz(vsw_desc_delay));
5162d10e4ef2Snarayan 					read_attempts++;
5163d10e4ef2Snarayan 					goto vsw_recheck_desc;
5164d10e4ef2Snarayan 				}
51651ae08745Sheppo 
5166d10e4ef2Snarayan 				/* bounded - error - so NACK back */
5167d10e4ef2Snarayan 				DERR(vswp, "%s(%lld): descriptor not READY "
5168d10e4ef2Snarayan 					"(%d)", __func__, ldcp->ldc_id,
5169d10e4ef2Snarayan 					pub_addr->hdr.dstate);
5170d10e4ef2Snarayan 				SND_DRING_NACK(ldcp, dring_pkt);
5171d10e4ef2Snarayan 				return;
5172d10e4ef2Snarayan 			}
5173d10e4ef2Snarayan 
5174d10e4ef2Snarayan 			DTRACE_PROBE1(read_attempts, int, read_attempts);
5175d10e4ef2Snarayan 
5176d10e4ef2Snarayan 			range_end = pos;
5177d10e4ef2Snarayan 
5178d10e4ef2Snarayan 			/*
5179d10e4ef2Snarayan 			 * If we ACK'd the previous descriptor then now
5180d10e4ef2Snarayan 			 * record the new range start position for later
5181d10e4ef2Snarayan 			 * ACK's.
5182d10e4ef2Snarayan 			 */
5183d10e4ef2Snarayan 			if (prev_desc_ack) {
5184d10e4ef2Snarayan 				range_start = pos;
5185d10e4ef2Snarayan 
5186d10e4ef2Snarayan 				D2(vswp, "%s(%lld): updating range start "
5187d10e4ef2Snarayan 					"to be %d", __func__, ldcp->ldc_id,
5188d10e4ef2Snarayan 					range_start);
5189d10e4ef2Snarayan 
5190d10e4ef2Snarayan 				prev_desc_ack = B_FALSE;
5191d10e4ef2Snarayan 			}
51921ae08745Sheppo 
51931ae08745Sheppo 			/*
51941ae08745Sheppo 			 * Data is padded to align on 8 byte boundary,
51951ae08745Sheppo 			 * datalen is actual data length, i.e. minus that
51961ae08745Sheppo 			 * padding.
51971ae08745Sheppo 			 */
51981ae08745Sheppo 			datalen = pub_addr->nbytes;
51991ae08745Sheppo 
52001ae08745Sheppo 			/*
52011ae08745Sheppo 			 * Does peer wish us to ACK when we have finished
52021ae08745Sheppo 			 * with this descriptor ?
52031ae08745Sheppo 			 */
52041ae08745Sheppo 			if (pub_addr->hdr.ack)
52051ae08745Sheppo 				ack_needed = B_TRUE;
52061ae08745Sheppo 
52071ae08745Sheppo 			D2(vswp, "%s(%lld): processing desc %lld at pos"
52081ae08745Sheppo 				" 0x%llx : dstate 0x%lx : datalen 0x%lx",
5209d10e4ef2Snarayan 				__func__, ldcp->ldc_id, pos, pub_addr,
52101ae08745Sheppo 				pub_addr->hdr.dstate, datalen);
52111ae08745Sheppo 
52121ae08745Sheppo 			/*
52131ae08745Sheppo 			 * Mark that we are starting to process descriptor.
52141ae08745Sheppo 			 */
52151ae08745Sheppo 			pub_addr->hdr.dstate = VIO_DESC_ACCEPTED;
52161ae08745Sheppo 
5217d10e4ef2Snarayan 			mp = vio_allocb(ldcp->rxh);
5218d10e4ef2Snarayan 			if (mp == NULL) {
52191ae08745Sheppo 				/*
5220d10e4ef2Snarayan 				 * No free receive buffers available, so
5221d10e4ef2Snarayan 				 * fallback onto allocb(9F). Make sure that
5222d10e4ef2Snarayan 				 * we get a data buffer which is a multiple
5223d10e4ef2Snarayan 				 * of 8 as this is required by ldc_mem_copy.
52241ae08745Sheppo 				 */
5225d10e4ef2Snarayan 				DTRACE_PROBE(allocb);
5226d10e4ef2Snarayan 				mp = allocb(datalen + VNET_IPALIGN + 8,
5227d10e4ef2Snarayan 								BPRI_MED);
5228d10e4ef2Snarayan 			}
5229d10e4ef2Snarayan 
5230d10e4ef2Snarayan 			/*
5231d10e4ef2Snarayan 			 * Ensure that we ask ldc for an aligned
5232d10e4ef2Snarayan 			 * number of bytes.
5233d10e4ef2Snarayan 			 */
5234d10e4ef2Snarayan 			nbytes = datalen + VNET_IPALIGN;
52351ae08745Sheppo 			if (nbytes & 0x7) {
52361ae08745Sheppo 				off = 8 - (nbytes & 0x7);
52371ae08745Sheppo 				nbytes += off;
52381ae08745Sheppo 			}
52391ae08745Sheppo 
52401ae08745Sheppo 			ncookies = pub_addr->ncookies;
52411ae08745Sheppo 			rv = ldc_mem_copy(ldcp->ldc_handle,
52421ae08745Sheppo 				(caddr_t)mp->b_rptr, 0, &nbytes,
52431ae08745Sheppo 				pub_addr->memcookie, ncookies,
52441ae08745Sheppo 				LDC_COPY_IN);
52451ae08745Sheppo 
52461ae08745Sheppo 			if (rv != 0) {
52471ae08745Sheppo 				DERR(vswp, "%s(%d): unable to copy in "
5248d10e4ef2Snarayan 					"data from %d cookies in desc %d"
5249d10e4ef2Snarayan 					" (rv %d)", __func__, ldcp->ldc_id,
5250d10e4ef2Snarayan 					ncookies, pos, rv);
52511ae08745Sheppo 				freemsg(mp);
5252d10e4ef2Snarayan 
5253d10e4ef2Snarayan 				pub_addr->hdr.dstate = VIO_DESC_DONE;
52541ae08745Sheppo 				(void) ldc_mem_dring_release(dp->handle,
5255d10e4ef2Snarayan 								pos, pos);
5256d10e4ef2Snarayan 				break;
52571ae08745Sheppo 			} else {
52581ae08745Sheppo 				D2(vswp, "%s(%d): copied in %ld bytes"
52591ae08745Sheppo 					" using %d cookies", __func__,
52601ae08745Sheppo 					ldcp->ldc_id, nbytes, ncookies);
52611ae08745Sheppo 			}
52621ae08745Sheppo 
5263d10e4ef2Snarayan 			/* adjust the read pointer to skip over the padding */
5264d10e4ef2Snarayan 			mp->b_rptr += VNET_IPALIGN;
5265d10e4ef2Snarayan 
52661ae08745Sheppo 			/* point to the actual end of data */
52671ae08745Sheppo 			mp->b_wptr = mp->b_rptr + datalen;
52681ae08745Sheppo 
52691ae08745Sheppo 			/* build a chain of received packets */
52701ae08745Sheppo 			if (bp == NULL) {
52711ae08745Sheppo 				/* first pkt */
52721ae08745Sheppo 				bp = mp;
52731ae08745Sheppo 				bp->b_next = bp->b_prev = NULL;
52741ae08745Sheppo 				bpt = bp;
52751ae08745Sheppo 				chain = 1;
52761ae08745Sheppo 			} else {
52771ae08745Sheppo 				mp->b_next = NULL;
52781ae08745Sheppo 				mp->b_prev = bpt;
52791ae08745Sheppo 				bpt->b_next = mp;
52801ae08745Sheppo 				bpt = mp;
52811ae08745Sheppo 				chain++;
52821ae08745Sheppo 			}
52831ae08745Sheppo 
52841ae08745Sheppo 			/* mark we are finished with this descriptor */
52851ae08745Sheppo 			pub_addr->hdr.dstate = VIO_DESC_DONE;
52861ae08745Sheppo 
5287d10e4ef2Snarayan 			(void) ldc_mem_dring_release(dp->handle, pos, pos);
5288d10e4ef2Snarayan 
52891ae08745Sheppo 			/*
5290d10e4ef2Snarayan 			 * Send an ACK back to peer if requested.
52911ae08745Sheppo 			 */
52921ae08745Sheppo 			if (ack_needed) {
52931ae08745Sheppo 				ack_needed = B_FALSE;
52941ae08745Sheppo 
5295d10e4ef2Snarayan 				dring_pkt->start_idx = range_start;
5296d10e4ef2Snarayan 				dring_pkt->end_idx = range_end;
52971ae08745Sheppo 
5298d10e4ef2Snarayan 				DERR(vswp, "%s(%lld): processed %d %d, ACK"
5299d10e4ef2Snarayan 					" requested", __func__, ldcp->ldc_id,
5300d10e4ef2Snarayan 					dring_pkt->start_idx,
5301d10e4ef2Snarayan 					dring_pkt->end_idx);
53021ae08745Sheppo 
5303d10e4ef2Snarayan 				dring_pkt->dring_process_state = VIO_DP_ACTIVE;
53041ae08745Sheppo 				dring_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
53051ae08745Sheppo 				dring_pkt->tag.vio_sid = ldcp->local_session;
53061ae08745Sheppo 				vsw_send_msg(ldcp, (void *)dring_pkt,
53071ae08745Sheppo 					sizeof (vio_dring_msg_t));
5308d10e4ef2Snarayan 
5309d10e4ef2Snarayan 				prev_desc_ack = B_TRUE;
5310d10e4ef2Snarayan 				range_start = pos;
53111ae08745Sheppo 			}
53121ae08745Sheppo 
5313d10e4ef2Snarayan 			/* next descriptor */
5314d10e4ef2Snarayan 			pos = (pos + 1) % len;
5315d10e4ef2Snarayan 			cnt++;
5316d10e4ef2Snarayan 
5317d10e4ef2Snarayan 			/*
5318d10e4ef2Snarayan 			 * Break out of loop here and stop processing to
5319d10e4ef2Snarayan 			 * allow some other network device (or disk) to
5320d10e4ef2Snarayan 			 * get access to the cpu.
5321d10e4ef2Snarayan 			 */
5322d10e4ef2Snarayan 			/* send the chain of packets to be switched */
5323d10e4ef2Snarayan 			if (chain > vsw_chain_len) {
5324d10e4ef2Snarayan 				D3(vswp, "%s(%lld): switching chain of %d "
5325d10e4ef2Snarayan 					"msgs", __func__, ldcp->ldc_id, chain);
5326d10e4ef2Snarayan 				vsw_switch_frame(vswp, bp, VSW_VNETPORT,
5327d10e4ef2Snarayan 							ldcp->ldc_port, NULL);
5328d10e4ef2Snarayan 				bp = NULL;
5329d10e4ef2Snarayan 				break;
53301ae08745Sheppo 			}
53311ae08745Sheppo 		}
53321ae08745Sheppo 
53331ae08745Sheppo 		/* send the chain of packets to be switched */
5334d10e4ef2Snarayan 		if (bp != NULL) {
5335d10e4ef2Snarayan 			D3(vswp, "%s(%lld): switching chain of %d msgs",
5336d10e4ef2Snarayan 					__func__, ldcp->ldc_id, chain);
53371ae08745Sheppo 			vsw_switch_frame(vswp, bp, VSW_VNETPORT,
53381ae08745Sheppo 							ldcp->ldc_port, NULL);
5339d10e4ef2Snarayan 		}
53401ae08745Sheppo 
5341d10e4ef2Snarayan 		DTRACE_PROBE1(msg_cnt, int, cnt);
5342d10e4ef2Snarayan 
5343d10e4ef2Snarayan 		/*
5344d10e4ef2Snarayan 		 * We are now finished so ACK back with the state
5345d10e4ef2Snarayan 		 * set to STOPPING so our peer knows we are finished
5346d10e4ef2Snarayan 		 */
5347d10e4ef2Snarayan 		dring_pkt->tag.vio_subtype = VIO_SUBTYPE_ACK;
5348d10e4ef2Snarayan 		dring_pkt->tag.vio_sid = ldcp->local_session;
5349d10e4ef2Snarayan 
5350d10e4ef2Snarayan 		dring_pkt->dring_process_state = VIO_DP_STOPPED;
5351d10e4ef2Snarayan 
5352d10e4ef2Snarayan 		DTRACE_PROBE(stop_process_sent);
5353d10e4ef2Snarayan 
5354d10e4ef2Snarayan 		/*
5355d10e4ef2Snarayan 		 * We have not processed any more descriptors beyond
5356d10e4ef2Snarayan 		 * the last one we ACK'd.
5357d10e4ef2Snarayan 		 */
5358d10e4ef2Snarayan 		if (prev_desc_ack)
5359d10e4ef2Snarayan 			range_start = range_end;
5360d10e4ef2Snarayan 
5361d10e4ef2Snarayan 		dring_pkt->start_idx = range_start;
5362d10e4ef2Snarayan 		dring_pkt->end_idx = range_end;
5363d10e4ef2Snarayan 
5364d10e4ef2Snarayan 		D2(vswp, "%s(%lld) processed : %d : %d, now stopping",
5365d10e4ef2Snarayan 			__func__, ldcp->ldc_id, dring_pkt->start_idx,
5366d10e4ef2Snarayan 			dring_pkt->end_idx);
5367d10e4ef2Snarayan 
5368d10e4ef2Snarayan 		vsw_send_msg(ldcp, (void *)dring_pkt,
5369d10e4ef2Snarayan 					sizeof (vio_dring_msg_t));
53701ae08745Sheppo 		break;
53711ae08745Sheppo 
53721ae08745Sheppo 	case VIO_SUBTYPE_ACK:
53731ae08745Sheppo 		D2(vswp, "%s(%lld): VIO_SUBTYPE_ACK", __func__, ldcp->ldc_id);
53741ae08745Sheppo 		/*
53751ae08745Sheppo 		 * Verify that the relevant descriptors are all
53761ae08745Sheppo 		 * marked as DONE
53771ae08745Sheppo 		 */
53781ae08745Sheppo 		if ((dp = vsw_ident2dring(&ldcp->lane_out,
53791ae08745Sheppo 			dring_pkt->dring_ident)) == NULL) {
53801ae08745Sheppo 			DERR(vswp, "%s: unknown ident in ACK", __func__);
53811ae08745Sheppo 			return;
53821ae08745Sheppo 		}
53831ae08745Sheppo 
53841ae08745Sheppo 		pub_addr = (vnet_public_desc_t *)dp->pub_addr;
53851ae08745Sheppo 		priv_addr = (vsw_private_desc_t *)dp->priv_addr;
53861ae08745Sheppo 
53871ae08745Sheppo 		start = end = 0;
53881ae08745Sheppo 		start = dring_pkt->start_idx;
53891ae08745Sheppo 		end = dring_pkt->end_idx;
53901ae08745Sheppo 		len = dp->num_descriptors;
53911ae08745Sheppo 
53921ae08745Sheppo 		j = num = 0;
53931ae08745Sheppo 		/* calculate # descriptors taking into a/c wrap around */
53941ae08745Sheppo 		num = end >= start ? end - start + 1: (len - start + 1) + end;
53951ae08745Sheppo 
53961ae08745Sheppo 		D2(vswp, "%s(%lld): start index %ld : end %ld : num %ld\n",
53971ae08745Sheppo 			__func__, ldcp->ldc_id, start, end, num);
53981ae08745Sheppo 
5399d10e4ef2Snarayan 		mutex_enter(&dp->dlock);
5400d10e4ef2Snarayan 		dp->last_ack_recv = end;
5401d10e4ef2Snarayan 		mutex_exit(&dp->dlock);
5402d10e4ef2Snarayan 
54031ae08745Sheppo 		for (i = start; j < num; i = (i + 1) % len, j++) {
54041ae08745Sheppo 			pub_addr = (vnet_public_desc_t *)dp->pub_addr + i;
54051ae08745Sheppo 			priv_addr = (vsw_private_desc_t *)dp->priv_addr + i;
54061ae08745Sheppo 
5407d10e4ef2Snarayan 			/*
5408d10e4ef2Snarayan 			 * If the last descriptor in a range has the ACK
5409d10e4ef2Snarayan 			 * bit set then we will get two messages from our
5410d10e4ef2Snarayan 			 * peer relating to it. The normal ACK msg and then
5411d10e4ef2Snarayan 			 * a subsequent STOP msg. The first message will have
5412d10e4ef2Snarayan 			 * resulted in the descriptor being reclaimed and
5413d10e4ef2Snarayan 			 * its state set to FREE so when we encounter a non
5414d10e4ef2Snarayan 			 * DONE descriptor we need to check to see if its
5415d10e4ef2Snarayan 			 * because we have just reclaimed it.
5416d10e4ef2Snarayan 			 */
5417d10e4ef2Snarayan 			mutex_enter(&priv_addr->dstate_lock);
5418d10e4ef2Snarayan 			if (pub_addr->hdr.dstate == VIO_DESC_DONE) {
54191ae08745Sheppo 				/* clear all the fields */
54201ae08745Sheppo 				bzero(priv_addr->datap, priv_addr->datalen);
54211ae08745Sheppo 				priv_addr->datalen = 0;
54221ae08745Sheppo 
54231ae08745Sheppo 				pub_addr->hdr.dstate = VIO_DESC_FREE;
54241ae08745Sheppo 				pub_addr->hdr.ack = 0;
5425d10e4ef2Snarayan 
54261ae08745Sheppo 				priv_addr->dstate = VIO_DESC_FREE;
5427d10e4ef2Snarayan 				mutex_exit(&priv_addr->dstate_lock);
54281ae08745Sheppo 
54291ae08745Sheppo 				D3(vswp, "clearing descp %d : pub state "
54301ae08745Sheppo 					"0x%llx : priv state 0x%llx", i,
54311ae08745Sheppo 					pub_addr->hdr.dstate,
54321ae08745Sheppo 					priv_addr->dstate);
5433d10e4ef2Snarayan 
5434d10e4ef2Snarayan 			} else {
5435d10e4ef2Snarayan 				mutex_exit(&priv_addr->dstate_lock);
5436d10e4ef2Snarayan 
5437d10e4ef2Snarayan 				if (dring_pkt->dring_process_state !=
5438d10e4ef2Snarayan 							VIO_DP_STOPPED) {
5439d10e4ef2Snarayan 					DERR(vswp, "%s: descriptor %lld at pos "
5440d10e4ef2Snarayan 						" 0x%llx not DONE (0x%lx)\n",
5441d10e4ef2Snarayan 						__func__, i, pub_addr,
5442d10e4ef2Snarayan 						pub_addr->hdr.dstate);
5443d10e4ef2Snarayan 					return;
5444d10e4ef2Snarayan 				}
54451ae08745Sheppo 			}
54461ae08745Sheppo 		}
54471ae08745Sheppo 
5448d10e4ef2Snarayan 		/*
5449d10e4ef2Snarayan 		 * If our peer is stopping processing descriptors then
5450d10e4ef2Snarayan 		 * we check to make sure it has processed all the descriptors
5451d10e4ef2Snarayan 		 * we have updated. If not then we send it a new message
5452d10e4ef2Snarayan 		 * to prompt it to restart.
5453d10e4ef2Snarayan 		 */
5454d10e4ef2Snarayan 		if (dring_pkt->dring_process_state == VIO_DP_STOPPED) {
5455d10e4ef2Snarayan 			DTRACE_PROBE(stop_process_recv);
5456d10e4ef2Snarayan 			D2(vswp, "%s(%lld): got stopping msg : %d : %d",
5457d10e4ef2Snarayan 				__func__, ldcp->ldc_id, dring_pkt->start_idx,
5458d10e4ef2Snarayan 				dring_pkt->end_idx);
5459d10e4ef2Snarayan 
5460d10e4ef2Snarayan 			/*
5461d10e4ef2Snarayan 			 * Check next descriptor in public section of ring.
5462d10e4ef2Snarayan 			 * If its marked as READY then we need to prompt our
5463d10e4ef2Snarayan 			 * peer to start processing the ring again.
5464d10e4ef2Snarayan 			 */
5465d10e4ef2Snarayan 			i = (end + 1) % len;
5466d10e4ef2Snarayan 			pub_addr = (vnet_public_desc_t *)dp->pub_addr + i;
5467d10e4ef2Snarayan 			priv_addr = (vsw_private_desc_t *)dp->priv_addr + i;
5468d10e4ef2Snarayan 
5469d10e4ef2Snarayan 			/*
5470d10e4ef2Snarayan 			 * Hold the restart lock across all of this to
5471d10e4ef2Snarayan 			 * make sure that its not possible for us to
5472d10e4ef2Snarayan 			 * decide that a msg needs to be sent in the future
5473d10e4ef2Snarayan 			 * but the sending code having already checked is
5474d10e4ef2Snarayan 			 * about to exit.
5475d10e4ef2Snarayan 			 */
5476d10e4ef2Snarayan 			mutex_enter(&dp->restart_lock);
5477d10e4ef2Snarayan 			mutex_enter(&priv_addr->dstate_lock);
5478d10e4ef2Snarayan 			if (pub_addr->hdr.dstate == VIO_DESC_READY) {
5479d10e4ef2Snarayan 
5480d10e4ef2Snarayan 				mutex_exit(&priv_addr->dstate_lock);
5481d10e4ef2Snarayan 
5482d10e4ef2Snarayan 				dring_pkt->tag.vio_subtype = VIO_SUBTYPE_INFO;
5483d10e4ef2Snarayan 				dring_pkt->tag.vio_sid = ldcp->local_session;
5484d10e4ef2Snarayan 
5485d10e4ef2Snarayan 				mutex_enter(&ldcp->lane_out.seq_lock);
5486d10e4ef2Snarayan 				dring_pkt->seq_num = ldcp->lane_out.seq_num++;
5487d10e4ef2Snarayan 				mutex_exit(&ldcp->lane_out.seq_lock);
5488d10e4ef2Snarayan 
5489d10e4ef2Snarayan 				dring_pkt->start_idx = (end + 1) % len;
5490d10e4ef2Snarayan 				dring_pkt->end_idx = -1;
5491d10e4ef2Snarayan 
5492d10e4ef2Snarayan 				D2(vswp, "%s(%lld) : sending restart msg:"
5493d10e4ef2Snarayan 					" %d : %d", __func__, ldcp->ldc_id,
5494d10e4ef2Snarayan 					dring_pkt->start_idx,
5495d10e4ef2Snarayan 					dring_pkt->end_idx);
5496d10e4ef2Snarayan 
5497d10e4ef2Snarayan 				vsw_send_msg(ldcp, (void *)dring_pkt,
5498d10e4ef2Snarayan 						sizeof (vio_dring_msg_t));
5499d10e4ef2Snarayan 			} else {
5500d10e4ef2Snarayan 				mutex_exit(&priv_addr->dstate_lock);
5501d10e4ef2Snarayan 				dp->restart_reqd = B_TRUE;
5502d10e4ef2Snarayan 			}
5503d10e4ef2Snarayan 			mutex_exit(&dp->restart_lock);
5504d10e4ef2Snarayan 		}
55051ae08745Sheppo 		break;
55061ae08745Sheppo 
55071ae08745Sheppo 	case VIO_SUBTYPE_NACK:
55081ae08745Sheppo 		DWARN(vswp, "%s(%lld): VIO_SUBTYPE_NACK",
55091ae08745Sheppo 						__func__, ldcp->ldc_id);
55101ae08745Sheppo 		/*
55111ae08745Sheppo 		 * Something is badly wrong if we are getting NACK's
55121ae08745Sheppo 		 * for our data pkts. So reset the channel.
55131ae08745Sheppo 		 */
55141ae08745Sheppo 		vsw_restart_handshake(ldcp);
55151ae08745Sheppo 
55161ae08745Sheppo 		break;
55171ae08745Sheppo 
55181ae08745Sheppo 	default:
55191ae08745Sheppo 		DERR(vswp, "%s(%lld): Unknown vio_subtype %x\n", __func__,
55201ae08745Sheppo 			ldcp->ldc_id, dring_pkt->tag.vio_subtype);
55211ae08745Sheppo 	}
55221ae08745Sheppo 
55231ae08745Sheppo 	D1(vswp, "%s(%lld) exit", __func__, ldcp->ldc_id);
55241ae08745Sheppo }
55251ae08745Sheppo 
55261ae08745Sheppo /*
55271ae08745Sheppo  * VIO_PKT_DATA (a.k.a raw data mode )
55281ae08745Sheppo  *
55291ae08745Sheppo  * Note - currently not supported. Do nothing.
55301ae08745Sheppo  */
55311ae08745Sheppo static void
55321ae08745Sheppo vsw_process_data_raw_pkt(vsw_ldc_t *ldcp, void *dpkt)
55331ae08745Sheppo {
55341ae08745Sheppo 	_NOTE(ARGUNUSED(dpkt))
55351ae08745Sheppo 
55361ae08745Sheppo 	D1(NULL, "%s (%lld): enter\n", __func__, ldcp->ldc_id);
55371ae08745Sheppo 
55381ae08745Sheppo 	DERR(NULL, "%s (%lld): currently  not supported",
55391ae08745Sheppo 						__func__, ldcp->ldc_id);
55401ae08745Sheppo 
55411ae08745Sheppo 	D1(NULL, "%s (%lld): exit\n", __func__, ldcp->ldc_id);
55421ae08745Sheppo }
55431ae08745Sheppo 
55441ae08745Sheppo #define	SND_IBND_DESC_NACK(ldcp, pkt) \
55451ae08745Sheppo 	pkt->tag.vio_subtype = VIO_SUBTYPE_NACK; \
55461ae08745Sheppo 	pkt->tag.vio_sid = ldcp->local_session; \
55471ae08745Sheppo 	vsw_send_msg(ldcp, (void *)pkt, sizeof (vio_ibnd_desc_t));
55481ae08745Sheppo 
55491ae08745Sheppo /*
55501ae08745Sheppo  * Process an in-band descriptor message (most likely from
55511ae08745Sheppo  * OBP).
55521ae08745Sheppo  */
55531ae08745Sheppo static void
55541ae08745Sheppo vsw_process_data_ibnd_pkt(vsw_ldc_t *ldcp, void *pkt)
55551ae08745Sheppo {
55561ae08745Sheppo 	vio_ibnd_desc_t		*ibnd_desc;
55571ae08745Sheppo 	dring_info_t		*dp = NULL;
55581ae08745Sheppo 	vsw_private_desc_t	*priv_addr = NULL;
55591ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
55601ae08745Sheppo 	mblk_t			*mp = NULL;
55611ae08745Sheppo 	size_t			nbytes = 0;
55621ae08745Sheppo 	size_t			off = 0;
55631ae08745Sheppo 	uint64_t		idx = 0;
55644bac2208Snarayan 	uint32_t		num = 1, len, datalen = 0;
55651ae08745Sheppo 	uint64_t		ncookies = 0;
55664bac2208Snarayan 	int			i, rv;
55674bac2208Snarayan 	int			j = 0;
55681ae08745Sheppo 
55691ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
55701ae08745Sheppo 
55711ae08745Sheppo 	ibnd_desc = (vio_ibnd_desc_t *)pkt;
55721ae08745Sheppo 
55731ae08745Sheppo 	switch (ibnd_desc->hdr.tag.vio_subtype) {
55741ae08745Sheppo 	case VIO_SUBTYPE_INFO:
55751ae08745Sheppo 		D1(vswp, "%s: VIO_SUBTYPE_INFO", __func__);
55761ae08745Sheppo 
55771ae08745Sheppo 		if (vsw_check_flag(ldcp, INBOUND, VSW_DRING_INFO_RECV))
55781ae08745Sheppo 			return;
55791ae08745Sheppo 
55801ae08745Sheppo 		/*
55811ae08745Sheppo 		 * Data is padded to align on a 8 byte boundary,
55821ae08745Sheppo 		 * nbytes is actual data length, i.e. minus that
55831ae08745Sheppo 		 * padding.
55841ae08745Sheppo 		 */
55851ae08745Sheppo 		datalen = ibnd_desc->nbytes;
55861ae08745Sheppo 
55871ae08745Sheppo 		D2(vswp, "%s(%lld): processing inband desc : "
55881ae08745Sheppo 			": datalen 0x%lx", __func__, ldcp->ldc_id, datalen);
55891ae08745Sheppo 
55901ae08745Sheppo 		ncookies = ibnd_desc->ncookies;
55911ae08745Sheppo 
55921ae08745Sheppo 		/*
55931ae08745Sheppo 		 * allocb(9F) returns an aligned data block. We
55941ae08745Sheppo 		 * need to ensure that we ask ldc for an aligned
55951ae08745Sheppo 		 * number of bytes also.
55961ae08745Sheppo 		 */
55971ae08745Sheppo 		nbytes = datalen;
55981ae08745Sheppo 		if (nbytes & 0x7) {
55991ae08745Sheppo 			off = 8 - (nbytes & 0x7);
56001ae08745Sheppo 			nbytes += off;
56011ae08745Sheppo 		}
56021ae08745Sheppo 
56031ae08745Sheppo 		mp = allocb(datalen, BPRI_MED);
56041ae08745Sheppo 		if (mp == NULL) {
56051ae08745Sheppo 			DERR(vswp, "%s(%lld): allocb failed",
56061ae08745Sheppo 					__func__, ldcp->ldc_id);
56071ae08745Sheppo 			return;
56081ae08745Sheppo 		}
56091ae08745Sheppo 
56101ae08745Sheppo 		rv = ldc_mem_copy(ldcp->ldc_handle, (caddr_t)mp->b_rptr,
56111ae08745Sheppo 			0, &nbytes, ibnd_desc->memcookie, (uint64_t)ncookies,
56121ae08745Sheppo 			LDC_COPY_IN);
56131ae08745Sheppo 
56141ae08745Sheppo 		if (rv != 0) {
56151ae08745Sheppo 			DERR(vswp, "%s(%d): unable to copy in data from "
56161ae08745Sheppo 				"%d cookie(s)", __func__,
56171ae08745Sheppo 				ldcp->ldc_id, ncookies);
56181ae08745Sheppo 			freemsg(mp);
56191ae08745Sheppo 			return;
56201ae08745Sheppo 		} else {
56211ae08745Sheppo 			D2(vswp, "%s(%d): copied in %ld bytes using %d "
56221ae08745Sheppo 				"cookies", __func__, ldcp->ldc_id, nbytes,
56231ae08745Sheppo 				ncookies);
56241ae08745Sheppo 		}
56251ae08745Sheppo 
56261ae08745Sheppo 		/* point to the actual end of data */
56271ae08745Sheppo 		mp->b_wptr = mp->b_rptr + datalen;
56281ae08745Sheppo 
56291ae08745Sheppo 		/*
56301ae08745Sheppo 		 * We ACK back every in-band descriptor message we process
56311ae08745Sheppo 		 */
56321ae08745Sheppo 		ibnd_desc->hdr.tag.vio_subtype = VIO_SUBTYPE_ACK;
56331ae08745Sheppo 		ibnd_desc->hdr.tag.vio_sid = ldcp->local_session;
56341ae08745Sheppo 		vsw_send_msg(ldcp, (void *)ibnd_desc,
56351ae08745Sheppo 				sizeof (vio_ibnd_desc_t));
56361ae08745Sheppo 
56371ae08745Sheppo 		/* send the packet to be switched */
56381ae08745Sheppo 		vsw_switch_frame(vswp, mp, VSW_VNETPORT,
56391ae08745Sheppo 					ldcp->ldc_port, NULL);
56401ae08745Sheppo 
56411ae08745Sheppo 		break;
56421ae08745Sheppo 
56431ae08745Sheppo 	case VIO_SUBTYPE_ACK:
56441ae08745Sheppo 		D1(vswp, "%s: VIO_SUBTYPE_ACK", __func__);
56451ae08745Sheppo 
56461ae08745Sheppo 		/* Verify the ACK is valid */
56471ae08745Sheppo 		idx = ibnd_desc->hdr.desc_handle;
56481ae08745Sheppo 
56491ae08745Sheppo 		if (idx >= VSW_RING_NUM_EL) {
56501ae08745Sheppo 			cmn_err(CE_WARN, "%s: corrupted ACK received "
56511ae08745Sheppo 				"(idx %ld)", __func__, idx);
56521ae08745Sheppo 			return;
56531ae08745Sheppo 		}
56541ae08745Sheppo 
56551ae08745Sheppo 		if ((dp = ldcp->lane_out.dringp) == NULL) {
56561ae08745Sheppo 			DERR(vswp, "%s: no dring found", __func__);
56571ae08745Sheppo 			return;
56581ae08745Sheppo 		}
56591ae08745Sheppo 
56604bac2208Snarayan 		len = dp->num_descriptors;
56614bac2208Snarayan 		/*
56624bac2208Snarayan 		 * If the descriptor we are being ACK'ed for is not the
56634bac2208Snarayan 		 * one we expected, then pkts were lost somwhere, either
56644bac2208Snarayan 		 * when we tried to send a msg, or a previous ACK msg from
56654bac2208Snarayan 		 * our peer. In either case we now reclaim the descriptors
56664bac2208Snarayan 		 * in the range from the last ACK we received up to the
56674bac2208Snarayan 		 * current ACK.
56684bac2208Snarayan 		 */
56694bac2208Snarayan 		if (idx != dp->last_ack_recv) {
56704bac2208Snarayan 			DWARN(vswp, "%s: dropped pkts detected, (%ld, %ld)",
56714bac2208Snarayan 				__func__, dp->last_ack_recv, idx);
56724bac2208Snarayan 			num = idx >= dp->last_ack_recv ?
56734bac2208Snarayan 				idx - dp->last_ack_recv + 1:
56744bac2208Snarayan 				(len - dp->last_ack_recv + 1) + idx;
56754bac2208Snarayan 		}
56761ae08745Sheppo 
56771ae08745Sheppo 		/*
56781ae08745Sheppo 		 * When we sent the in-band message to our peer we
56791ae08745Sheppo 		 * marked the copy in our private ring as READY. We now
56801ae08745Sheppo 		 * check that the descriptor we are being ACK'ed for is in
56811ae08745Sheppo 		 * fact READY, i.e. it is one we have shared with our peer.
56824bac2208Snarayan 		 *
56834bac2208Snarayan 		 * If its not we flag an error, but still reset the descr
56844bac2208Snarayan 		 * back to FREE.
56851ae08745Sheppo 		 */
56864bac2208Snarayan 		for (i = dp->last_ack_recv; j < num; i = (i + 1) % len, j++) {
56874bac2208Snarayan 			priv_addr = (vsw_private_desc_t *)dp->priv_addr + i;
5688d10e4ef2Snarayan 			mutex_enter(&priv_addr->dstate_lock);
56891ae08745Sheppo 			if (priv_addr->dstate != VIO_DESC_READY) {
56904bac2208Snarayan 				DERR(vswp, "%s: (%ld) desc at index %ld not "
56914bac2208Snarayan 					"READY (0x%lx)", __func__,
56924bac2208Snarayan 					ldcp->ldc_id, idx, priv_addr->dstate);
56934bac2208Snarayan 				DERR(vswp, "%s: bound %d: ncookies %ld : "
56944bac2208Snarayan 					"datalen %ld", __func__,
56954bac2208Snarayan 					priv_addr->bound, priv_addr->ncookies,
56964bac2208Snarayan 					priv_addr->datalen);
56974bac2208Snarayan 			}
56981ae08745Sheppo 			D2(vswp, "%s: (%lld) freeing descp at %lld", __func__,
56991ae08745Sheppo 				ldcp->ldc_id, idx);
57001ae08745Sheppo 			/* release resources associated with sent msg */
57011ae08745Sheppo 			bzero(priv_addr->datap, priv_addr->datalen);
57021ae08745Sheppo 			priv_addr->datalen = 0;
57031ae08745Sheppo 			priv_addr->dstate = VIO_DESC_FREE;
5704d10e4ef2Snarayan 			mutex_exit(&priv_addr->dstate_lock);
57051ae08745Sheppo 		}
57064bac2208Snarayan 		/* update to next expected value */
57074bac2208Snarayan 		dp->last_ack_recv = (idx + 1) % dp->num_descriptors;
57084bac2208Snarayan 
57091ae08745Sheppo 		break;
57101ae08745Sheppo 
57111ae08745Sheppo 	case VIO_SUBTYPE_NACK:
57121ae08745Sheppo 		DERR(vswp, "%s: VIO_SUBTYPE_NACK", __func__);
57131ae08745Sheppo 
57141ae08745Sheppo 		/*
57151ae08745Sheppo 		 * We should only get a NACK if our peer doesn't like
57161ae08745Sheppo 		 * something about a message we have sent it. If this
57171ae08745Sheppo 		 * happens we just release the resources associated with
57181ae08745Sheppo 		 * the message. (We are relying on higher layers to decide
57191ae08745Sheppo 		 * whether or not to resend.
57201ae08745Sheppo 		 */
57211ae08745Sheppo 
57221ae08745Sheppo 		/* limit check */
57231ae08745Sheppo 		idx = ibnd_desc->hdr.desc_handle;
57241ae08745Sheppo 
57251ae08745Sheppo 		if (idx >= VSW_RING_NUM_EL) {
57261ae08745Sheppo 			DERR(vswp, "%s: corrupted NACK received (idx %lld)",
57271ae08745Sheppo 				__func__, idx);
57281ae08745Sheppo 			return;
57291ae08745Sheppo 		}
57301ae08745Sheppo 
57311ae08745Sheppo 		if ((dp = ldcp->lane_out.dringp) == NULL) {
57321ae08745Sheppo 			DERR(vswp, "%s: no dring found", __func__);
57331ae08745Sheppo 			return;
57341ae08745Sheppo 		}
57351ae08745Sheppo 
57361ae08745Sheppo 		priv_addr = (vsw_private_desc_t *)dp->priv_addr;
57371ae08745Sheppo 
57381ae08745Sheppo 		/* move to correct location in ring */
57391ae08745Sheppo 		priv_addr += idx;
57401ae08745Sheppo 
57411ae08745Sheppo 		/* release resources associated with sent msg */
5742d10e4ef2Snarayan 		mutex_enter(&priv_addr->dstate_lock);
57431ae08745Sheppo 		bzero(priv_addr->datap, priv_addr->datalen);
57441ae08745Sheppo 		priv_addr->datalen = 0;
57451ae08745Sheppo 		priv_addr->dstate = VIO_DESC_FREE;
5746d10e4ef2Snarayan 		mutex_exit(&priv_addr->dstate_lock);
57471ae08745Sheppo 
57481ae08745Sheppo 		break;
57491ae08745Sheppo 
57501ae08745Sheppo 	default:
57511ae08745Sheppo 		DERR(vswp, "%s(%lld): Unknown vio_subtype %x\n", __func__,
57521ae08745Sheppo 			ldcp->ldc_id, ibnd_desc->hdr.tag.vio_subtype);
57531ae08745Sheppo 	}
57541ae08745Sheppo 
57551ae08745Sheppo 	D1(vswp, "%s(%lld) exit", __func__, ldcp->ldc_id);
57561ae08745Sheppo }
57571ae08745Sheppo 
57581ae08745Sheppo static void
57591ae08745Sheppo vsw_process_err_pkt(vsw_ldc_t *ldcp, void *epkt, vio_msg_tag_t tag)
57601ae08745Sheppo {
57611ae08745Sheppo 	_NOTE(ARGUNUSED(epkt))
57621ae08745Sheppo 
57631ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
57641ae08745Sheppo 	uint16_t	env = tag.vio_subtype_env;
57651ae08745Sheppo 
57661ae08745Sheppo 	D1(vswp, "%s (%lld): enter\n", __func__, ldcp->ldc_id);
57671ae08745Sheppo 
57681ae08745Sheppo 	/*
57691ae08745Sheppo 	 * Error vio_subtypes have yet to be defined. So for
57701ae08745Sheppo 	 * the moment we can't do anything.
57711ae08745Sheppo 	 */
57721ae08745Sheppo 	D2(vswp, "%s: (%x) vio_subtype env", __func__, env);
57731ae08745Sheppo 
57741ae08745Sheppo 	D1(vswp, "%s (%lld): exit\n", __func__, ldcp->ldc_id);
57751ae08745Sheppo }
57761ae08745Sheppo 
57771ae08745Sheppo /*
57781ae08745Sheppo  * Switch the given ethernet frame when operating in layer 2 mode.
57791ae08745Sheppo  *
57801ae08745Sheppo  * vswp: pointer to the vsw instance
57811ae08745Sheppo  * mp: pointer to chain of ethernet frame(s) to be switched
57821ae08745Sheppo  * caller: identifies the source of this frame as:
57831ae08745Sheppo  * 		1. VSW_VNETPORT - a vsw port (connected to a vnet).
57841ae08745Sheppo  *		2. VSW_PHYSDEV - the physical ethernet device
57851ae08745Sheppo  *		3. VSW_LOCALDEV - vsw configured as a virtual interface
57861ae08745Sheppo  * arg: argument provided by the caller.
57871ae08745Sheppo  *		1. for VNETPORT - pointer to the corresponding vsw_port_t.
57881ae08745Sheppo  *		2. for PHYSDEV - NULL
57891ae08745Sheppo  *		3. for LOCALDEV - pointer to to this vsw_t(self)
57901ae08745Sheppo  */
57911ae08745Sheppo void
57921ae08745Sheppo vsw_switch_l2_frame(vsw_t *vswp, mblk_t *mp, int caller,
57931ae08745Sheppo 			vsw_port_t *arg, mac_resource_handle_t mrh)
57941ae08745Sheppo {
57951ae08745Sheppo 	struct ether_header	*ehp;
57961ae08745Sheppo 	vsw_port_t		*port = NULL;
57971ae08745Sheppo 	mblk_t			*bp, *ret_m;
57981ae08745Sheppo 	mblk_t			*nmp = NULL;
57991ae08745Sheppo 	vsw_port_list_t		*plist = &vswp->plist;
58001ae08745Sheppo 
58011ae08745Sheppo 	D1(vswp, "%s: enter (caller %d)", __func__, caller);
58021ae08745Sheppo 
58031ae08745Sheppo 	/*
58041ae08745Sheppo 	 * PERF: rather than breaking up the chain here, scan it
58051ae08745Sheppo 	 * to find all mblks heading to same destination and then
58061ae08745Sheppo 	 * pass that sub-chain to the lower transmit functions.
58071ae08745Sheppo 	 */
58081ae08745Sheppo 
58091ae08745Sheppo 	/* process the chain of packets */
58101ae08745Sheppo 	bp = mp;
58111ae08745Sheppo 	while (bp) {
58121ae08745Sheppo 		mp = bp;
58131ae08745Sheppo 		bp = bp->b_next;
58141ae08745Sheppo 		mp->b_next = mp->b_prev = NULL;
58151ae08745Sheppo 		ehp = (struct ether_header *)mp->b_rptr;
58161ae08745Sheppo 
58171ae08745Sheppo 		D2(vswp, "%s: mblk data buffer %lld : actual data size %lld",
58181ae08745Sheppo 			__func__, MBLKSIZE(mp), MBLKL(mp));
58191ae08745Sheppo 
58201ae08745Sheppo 		READ_ENTER(&vswp->if_lockrw);
58211ae08745Sheppo 		if (ether_cmp(&ehp->ether_dhost, &vswp->if_addr) == 0) {
58221ae08745Sheppo 			/*
58231ae08745Sheppo 			 * If destination is VSW_LOCALDEV (vsw as an eth
58241ae08745Sheppo 			 * interface) and if the device is up & running,
58251ae08745Sheppo 			 * send the packet up the stack on this host.
58261ae08745Sheppo 			 * If the virtual interface is down, drop the packet.
58271ae08745Sheppo 			 */
58281ae08745Sheppo 			if (caller != VSW_LOCALDEV) {
58291ae08745Sheppo 				if (vswp->if_state & VSW_IF_UP) {
58301ae08745Sheppo 					RW_EXIT(&vswp->if_lockrw);
5831ba2e4443Sseb 					mac_rx(vswp->if_mh, mrh, mp);
58321ae08745Sheppo 				} else {
58331ae08745Sheppo 					RW_EXIT(&vswp->if_lockrw);
58341ae08745Sheppo 					/* Interface down, drop pkt */
58351ae08745Sheppo 					freemsg(mp);
58361ae08745Sheppo 				}
58371ae08745Sheppo 			} else {
58381ae08745Sheppo 				RW_EXIT(&vswp->if_lockrw);
58391ae08745Sheppo 				freemsg(mp);
58401ae08745Sheppo 			}
58411ae08745Sheppo 			continue;
58421ae08745Sheppo 		}
58431ae08745Sheppo 		RW_EXIT(&vswp->if_lockrw);
58441ae08745Sheppo 
58451ae08745Sheppo 		READ_ENTER(&plist->lockrw);
58461ae08745Sheppo 		port = vsw_lookup_fdb(vswp, ehp);
58471ae08745Sheppo 		if (port) {
58481ae08745Sheppo 			/*
58491ae08745Sheppo 			 * Mark the port as in-use.
58501ae08745Sheppo 			 */
58511ae08745Sheppo 			mutex_enter(&port->ref_lock);
58521ae08745Sheppo 			port->ref_cnt++;
58531ae08745Sheppo 			mutex_exit(&port->ref_lock);
58541ae08745Sheppo 			RW_EXIT(&plist->lockrw);
58551ae08745Sheppo 
58561ae08745Sheppo 			/*
58571ae08745Sheppo 			 * If plumbed and in promisc mode then copy msg
58581ae08745Sheppo 			 * and send up the stack.
58591ae08745Sheppo 			 */
58601ae08745Sheppo 			READ_ENTER(&vswp->if_lockrw);
58611ae08745Sheppo 			if (VSW_U_P(vswp->if_state)) {
58621ae08745Sheppo 				RW_EXIT(&vswp->if_lockrw);
58631ae08745Sheppo 				nmp = copymsg(mp);
58641ae08745Sheppo 				if (nmp)
5865ba2e4443Sseb 					mac_rx(vswp->if_mh, mrh, nmp);
58661ae08745Sheppo 			} else {
58671ae08745Sheppo 				RW_EXIT(&vswp->if_lockrw);
58681ae08745Sheppo 			}
58691ae08745Sheppo 
58701ae08745Sheppo 			/*
58711ae08745Sheppo 			 * If the destination is in FDB, the packet
58721ae08745Sheppo 			 * should be forwarded to the correponding
58731ae08745Sheppo 			 * vsw_port (connected to a vnet device -
58741ae08745Sheppo 			 * VSW_VNETPORT)
58751ae08745Sheppo 			 */
58761ae08745Sheppo 			(void) vsw_portsend(port, mp);
58771ae08745Sheppo 
58781ae08745Sheppo 			/*
58791ae08745Sheppo 			 * Decrement use count in port and check if
58801ae08745Sheppo 			 * should wake delete thread.
58811ae08745Sheppo 			 */
58821ae08745Sheppo 			mutex_enter(&port->ref_lock);
58831ae08745Sheppo 			port->ref_cnt--;
58841ae08745Sheppo 			if (port->ref_cnt == 0)
58851ae08745Sheppo 				cv_signal(&port->ref_cv);
58861ae08745Sheppo 			mutex_exit(&port->ref_lock);
58871ae08745Sheppo 		} else {
58881ae08745Sheppo 			RW_EXIT(&plist->lockrw);
58891ae08745Sheppo 			/*
58901ae08745Sheppo 			 * Destination not in FDB.
58911ae08745Sheppo 			 *
58921ae08745Sheppo 			 * If the destination is broadcast or
58931ae08745Sheppo 			 * multicast forward the packet to all
58941ae08745Sheppo 			 * (VNETPORTs, PHYSDEV, LOCALDEV),
58951ae08745Sheppo 			 * except the caller.
58961ae08745Sheppo 			 */
58971ae08745Sheppo 			if (IS_BROADCAST(ehp)) {
58981ae08745Sheppo 				D3(vswp, "%s: BROADCAST pkt", __func__);
58991ae08745Sheppo 				(void) vsw_forward_all(vswp, mp,
59001ae08745Sheppo 								caller, arg);
59011ae08745Sheppo 			} else if (IS_MULTICAST(ehp)) {
59021ae08745Sheppo 				D3(vswp, "%s: MULTICAST pkt", __func__);
59031ae08745Sheppo 				(void) vsw_forward_grp(vswp, mp,
59041ae08745Sheppo 							caller, arg);
59051ae08745Sheppo 			} else {
59061ae08745Sheppo 				/*
59071ae08745Sheppo 				 * If the destination is unicast, and came
59081ae08745Sheppo 				 * from either a logical network device or
59091ae08745Sheppo 				 * the switch itself when it is plumbed, then
59101ae08745Sheppo 				 * send it out on the physical device and also
59111ae08745Sheppo 				 * up the stack if the logical interface is
59121ae08745Sheppo 				 * in promiscious mode.
59131ae08745Sheppo 				 *
59141ae08745Sheppo 				 * NOTE:  The assumption here is that if we
59151ae08745Sheppo 				 * cannot find the destination in our fdb, its
59161ae08745Sheppo 				 * a unicast address, and came from either a
59171ae08745Sheppo 				 * vnet or down the stack (when plumbed) it
59181ae08745Sheppo 				 * must be destinded for an ethernet device
59191ae08745Sheppo 				 * outside our ldoms.
59201ae08745Sheppo 				 */
59211ae08745Sheppo 				if (caller == VSW_VNETPORT) {
59221ae08745Sheppo 					READ_ENTER(&vswp->if_lockrw);
59231ae08745Sheppo 					if (VSW_U_P(vswp->if_state)) {
59241ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
59251ae08745Sheppo 						nmp = copymsg(mp);
59261ae08745Sheppo 						if (nmp)
5927ba2e4443Sseb 							mac_rx(vswp->if_mh,
59281ae08745Sheppo 								mrh, nmp);
59291ae08745Sheppo 					} else {
59301ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
59311ae08745Sheppo 					}
59321ae08745Sheppo 					if ((ret_m = vsw_tx_msg(vswp, mp))
59331ae08745Sheppo 								!= NULL) {
59341ae08745Sheppo 						DERR(vswp, "%s: drop mblks to "
59351ae08745Sheppo 							"phys dev", __func__);
59361ae08745Sheppo 						freemsg(ret_m);
59371ae08745Sheppo 					}
59381ae08745Sheppo 
59391ae08745Sheppo 				} else if (caller == VSW_PHYSDEV) {
59401ae08745Sheppo 					/*
59411ae08745Sheppo 					 * Pkt seen because card in promisc
59421ae08745Sheppo 					 * mode. Send up stack if plumbed in
59431ae08745Sheppo 					 * promisc mode, else drop it.
59441ae08745Sheppo 					 */
59451ae08745Sheppo 					READ_ENTER(&vswp->if_lockrw);
59461ae08745Sheppo 					if (VSW_U_P(vswp->if_state)) {
59471ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
5948ba2e4443Sseb 						mac_rx(vswp->if_mh, mrh, mp);
59491ae08745Sheppo 					} else {
59501ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
59511ae08745Sheppo 						freemsg(mp);
59521ae08745Sheppo 					}
59531ae08745Sheppo 
59541ae08745Sheppo 				} else if (caller == VSW_LOCALDEV) {
59551ae08745Sheppo 					/*
59561ae08745Sheppo 					 * Pkt came down the stack, send out
59571ae08745Sheppo 					 * over physical device.
59581ae08745Sheppo 					 */
59591ae08745Sheppo 					if ((ret_m = vsw_tx_msg(vswp, mp))
59601ae08745Sheppo 								!= NULL) {
59611ae08745Sheppo 						DERR(vswp, "%s: drop mblks to "
59621ae08745Sheppo 							"phys dev", __func__);
59631ae08745Sheppo 						freemsg(ret_m);
59641ae08745Sheppo 					}
59651ae08745Sheppo 				}
59661ae08745Sheppo 			}
59671ae08745Sheppo 		}
59681ae08745Sheppo 	}
59691ae08745Sheppo 	D1(vswp, "%s: exit\n", __func__);
59701ae08745Sheppo }
59711ae08745Sheppo 
59721ae08745Sheppo /*
59731ae08745Sheppo  * Switch ethernet frame when in layer 3 mode (i.e. using IP
59741ae08745Sheppo  * layer to do the routing).
59751ae08745Sheppo  *
59761ae08745Sheppo  * There is a large amount of overlap between this function and
59771ae08745Sheppo  * vsw_switch_l2_frame. At some stage we need to revisit and refactor
59781ae08745Sheppo  * both these functions.
59791ae08745Sheppo  */
59801ae08745Sheppo void
59811ae08745Sheppo vsw_switch_l3_frame(vsw_t *vswp, mblk_t *mp, int caller,
59821ae08745Sheppo 			vsw_port_t *arg, mac_resource_handle_t mrh)
59831ae08745Sheppo {
59841ae08745Sheppo 	struct ether_header	*ehp;
59851ae08745Sheppo 	vsw_port_t		*port = NULL;
59861ae08745Sheppo 	mblk_t			*bp = NULL;
59871ae08745Sheppo 	vsw_port_list_t		*plist = &vswp->plist;
59881ae08745Sheppo 
59891ae08745Sheppo 	D1(vswp, "%s: enter (caller %d)", __func__, caller);
59901ae08745Sheppo 
59911ae08745Sheppo 	/*
59921ae08745Sheppo 	 * In layer 3 mode should only ever be switching packets
59931ae08745Sheppo 	 * between IP layer and vnet devices. So make sure thats
59941ae08745Sheppo 	 * who is invoking us.
59951ae08745Sheppo 	 */
59961ae08745Sheppo 	if ((caller != VSW_LOCALDEV) && (caller != VSW_VNETPORT)) {
59971ae08745Sheppo 		DERR(vswp, "%s: unexpected caller (%d)", __func__, caller);
59981ae08745Sheppo 		freemsgchain(mp);
59991ae08745Sheppo 		return;
60001ae08745Sheppo 	}
60011ae08745Sheppo 
60021ae08745Sheppo 	/* process the chain of packets */
60031ae08745Sheppo 	bp = mp;
60041ae08745Sheppo 	while (bp) {
60051ae08745Sheppo 		mp = bp;
60061ae08745Sheppo 		bp = bp->b_next;
60071ae08745Sheppo 		mp->b_next = mp->b_prev = NULL;
60081ae08745Sheppo 		ehp = (struct ether_header *)mp->b_rptr;
60091ae08745Sheppo 
60101ae08745Sheppo 		D2(vswp, "%s: mblk data buffer %lld : actual data size %lld",
60111ae08745Sheppo 			__func__, MBLKSIZE(mp), MBLKL(mp));
60121ae08745Sheppo 
60131ae08745Sheppo 		READ_ENTER(&plist->lockrw);
60141ae08745Sheppo 		port = vsw_lookup_fdb(vswp, ehp);
60151ae08745Sheppo 		if (port) {
60161ae08745Sheppo 			/*
60171ae08745Sheppo 			 * Mark port as in-use.
60181ae08745Sheppo 			 */
60191ae08745Sheppo 			mutex_enter(&port->ref_lock);
60201ae08745Sheppo 			port->ref_cnt++;
60211ae08745Sheppo 			mutex_exit(&port->ref_lock);
60221ae08745Sheppo 			RW_EXIT(&plist->lockrw);
60231ae08745Sheppo 
60241ae08745Sheppo 			D2(vswp, "%s: sending to target port", __func__);
60251ae08745Sheppo 			(void) vsw_portsend(port, mp);
60261ae08745Sheppo 
60271ae08745Sheppo 			/*
60281ae08745Sheppo 			 * Finished with port so decrement ref count and
60291ae08745Sheppo 			 * check if should wake delete thread.
60301ae08745Sheppo 			 */
60311ae08745Sheppo 			mutex_enter(&port->ref_lock);
60321ae08745Sheppo 			port->ref_cnt--;
60331ae08745Sheppo 			if (port->ref_cnt == 0)
60341ae08745Sheppo 				cv_signal(&port->ref_cv);
60351ae08745Sheppo 			mutex_exit(&port->ref_lock);
60361ae08745Sheppo 		} else {
60371ae08745Sheppo 			RW_EXIT(&plist->lockrw);
60381ae08745Sheppo 			/*
60391ae08745Sheppo 			 * Destination not in FDB
60401ae08745Sheppo 			 *
60411ae08745Sheppo 			 * If the destination is broadcast or
60421ae08745Sheppo 			 * multicast forward the packet to all
60431ae08745Sheppo 			 * (VNETPORTs, PHYSDEV, LOCALDEV),
60441ae08745Sheppo 			 * except the caller.
60451ae08745Sheppo 			 */
60461ae08745Sheppo 			if (IS_BROADCAST(ehp)) {
60471ae08745Sheppo 				D2(vswp, "%s: BROADCAST pkt", __func__);
60481ae08745Sheppo 				(void) vsw_forward_all(vswp, mp,
60491ae08745Sheppo 								caller, arg);
60501ae08745Sheppo 			} else if (IS_MULTICAST(ehp)) {
60511ae08745Sheppo 				D2(vswp, "%s: MULTICAST pkt", __func__);
60521ae08745Sheppo 				(void) vsw_forward_grp(vswp, mp,
60531ae08745Sheppo 							caller, arg);
60541ae08745Sheppo 			} else {
60551ae08745Sheppo 				/*
60561ae08745Sheppo 				 * Unicast pkt from vnet that we don't have
60571ae08745Sheppo 				 * an FDB entry for, so must be destinded for
60581ae08745Sheppo 				 * the outside world. Attempt to send up to the
60591ae08745Sheppo 				 * IP layer to allow it to deal with it.
60601ae08745Sheppo 				 */
60611ae08745Sheppo 				if (caller == VSW_VNETPORT) {
60621ae08745Sheppo 					READ_ENTER(&vswp->if_lockrw);
60631ae08745Sheppo 					if (vswp->if_state & VSW_IF_UP) {
60641ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
60651ae08745Sheppo 						D2(vswp, "%s: sending up",
60661ae08745Sheppo 							__func__);
6067ba2e4443Sseb 						mac_rx(vswp->if_mh, mrh, mp);
60681ae08745Sheppo 					} else {
60691ae08745Sheppo 						RW_EXIT(&vswp->if_lockrw);
60701ae08745Sheppo 						/* Interface down, drop pkt */
60711ae08745Sheppo 						D2(vswp, "%s I/F down",
60721ae08745Sheppo 								__func__);
60731ae08745Sheppo 						freemsg(mp);
60741ae08745Sheppo 					}
60751ae08745Sheppo 				}
60761ae08745Sheppo 			}
60771ae08745Sheppo 		}
60781ae08745Sheppo 	}
60791ae08745Sheppo 
60801ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
60811ae08745Sheppo }
60821ae08745Sheppo 
60831ae08745Sheppo /*
60841ae08745Sheppo  * Forward the ethernet frame to all ports (VNETPORTs, PHYSDEV, LOCALDEV),
60851ae08745Sheppo  * except the caller (port on which frame arrived).
60861ae08745Sheppo  */
60871ae08745Sheppo static int
60881ae08745Sheppo vsw_forward_all(vsw_t *vswp, mblk_t *mp, int caller, vsw_port_t *arg)
60891ae08745Sheppo {
60901ae08745Sheppo 	vsw_port_list_t	*plist = &vswp->plist;
60911ae08745Sheppo 	vsw_port_t	*portp;
60921ae08745Sheppo 	mblk_t		*nmp = NULL;
60931ae08745Sheppo 	mblk_t		*ret_m = NULL;
60941ae08745Sheppo 	int		skip_port = 0;
60951ae08745Sheppo 
60961ae08745Sheppo 	D1(vswp, "vsw_forward_all: enter\n");
60971ae08745Sheppo 
60981ae08745Sheppo 	/*
60991ae08745Sheppo 	 * Broadcast message from inside ldoms so send to outside
61001ae08745Sheppo 	 * world if in either of layer 2 modes.
61011ae08745Sheppo 	 */
61021ae08745Sheppo 	if (((vswp->smode[vswp->smode_idx] == VSW_LAYER2) ||
61031ae08745Sheppo 		(vswp->smode[vswp->smode_idx] == VSW_LAYER2_PROMISC)) &&
61041ae08745Sheppo 		((caller == VSW_LOCALDEV) || (caller == VSW_VNETPORT))) {
61051ae08745Sheppo 
61061ae08745Sheppo 		nmp = dupmsg(mp);
61071ae08745Sheppo 		if (nmp) {
61081ae08745Sheppo 			if ((ret_m = vsw_tx_msg(vswp, nmp)) != NULL) {
61091ae08745Sheppo 				DERR(vswp, "%s: dropping pkt(s) "
61101ae08745Sheppo 				"consisting of %ld bytes of data for"
61111ae08745Sheppo 				" physical device", __func__, MBLKL(ret_m));
61121ae08745Sheppo 			freemsg(ret_m);
61131ae08745Sheppo 			}
61141ae08745Sheppo 		}
61151ae08745Sheppo 	}
61161ae08745Sheppo 
61171ae08745Sheppo 	if (caller == VSW_VNETPORT)
61181ae08745Sheppo 		skip_port = 1;
61191ae08745Sheppo 
61201ae08745Sheppo 	/*
61211ae08745Sheppo 	 * Broadcast message from other vnet (layer 2 or 3) or outside
61221ae08745Sheppo 	 * world (layer 2 only), send up stack if plumbed.
61231ae08745Sheppo 	 */
61241ae08745Sheppo 	if ((caller == VSW_PHYSDEV) || (caller == VSW_VNETPORT)) {
61251ae08745Sheppo 		READ_ENTER(&vswp->if_lockrw);
61261ae08745Sheppo 		if (vswp->if_state & VSW_IF_UP) {
61271ae08745Sheppo 			RW_EXIT(&vswp->if_lockrw);
61281ae08745Sheppo 			nmp = copymsg(mp);
61291ae08745Sheppo 			if (nmp)
6130ba2e4443Sseb 				mac_rx(vswp->if_mh, NULL, nmp);
61311ae08745Sheppo 		} else {
61321ae08745Sheppo 			RW_EXIT(&vswp->if_lockrw);
61331ae08745Sheppo 		}
61341ae08745Sheppo 	}
61351ae08745Sheppo 
61361ae08745Sheppo 	/* send it to all VNETPORTs */
61371ae08745Sheppo 	READ_ENTER(&plist->lockrw);
61381ae08745Sheppo 	for (portp = plist->head; portp != NULL; portp = portp->p_next) {
61391ae08745Sheppo 		D2(vswp, "vsw_forward_all: port %d", portp->p_instance);
61401ae08745Sheppo 		/*
61411ae08745Sheppo 		 * Caution ! - don't reorder these two checks as arg
61421ae08745Sheppo 		 * will be NULL if the caller is PHYSDEV. skip_port is
61431ae08745Sheppo 		 * only set if caller is VNETPORT.
61441ae08745Sheppo 		 */
61451ae08745Sheppo 		if ((skip_port) && (portp == arg))
61461ae08745Sheppo 			continue;
61471ae08745Sheppo 		else {
61481ae08745Sheppo 			nmp = dupmsg(mp);
61491ae08745Sheppo 			if (nmp) {
61501ae08745Sheppo 				(void) vsw_portsend(portp, nmp);
61511ae08745Sheppo 			} else {
61521ae08745Sheppo 				DERR(vswp, "vsw_forward_all: nmp NULL");
61531ae08745Sheppo 			}
61541ae08745Sheppo 		}
61551ae08745Sheppo 	}
61561ae08745Sheppo 	RW_EXIT(&plist->lockrw);
61571ae08745Sheppo 
61581ae08745Sheppo 	freemsg(mp);
61591ae08745Sheppo 
61601ae08745Sheppo 	D1(vswp, "vsw_forward_all: exit\n");
61611ae08745Sheppo 	return (0);
61621ae08745Sheppo }
61631ae08745Sheppo 
61641ae08745Sheppo /*
61651ae08745Sheppo  * Forward pkts to any devices or interfaces which have registered
61661ae08745Sheppo  * an interest in them (i.e. multicast groups).
61671ae08745Sheppo  */
61681ae08745Sheppo static int
61691ae08745Sheppo vsw_forward_grp(vsw_t *vswp, mblk_t *mp, int caller, vsw_port_t *arg)
61701ae08745Sheppo {
61711ae08745Sheppo 	struct ether_header	*ehp = (struct ether_header *)mp->b_rptr;
61721ae08745Sheppo 	mfdb_ent_t		*entp = NULL;
61731ae08745Sheppo 	mfdb_ent_t		*tpp = NULL;
61741ae08745Sheppo 	vsw_port_t 		*port;
61751ae08745Sheppo 	uint64_t		key = 0;
61761ae08745Sheppo 	mblk_t			*nmp = NULL;
61771ae08745Sheppo 	mblk_t			*ret_m = NULL;
61781ae08745Sheppo 	boolean_t		check_if = B_TRUE;
61791ae08745Sheppo 
61801ae08745Sheppo 	/*
61811ae08745Sheppo 	 * Convert address to hash table key
61821ae08745Sheppo 	 */
61831ae08745Sheppo 	KEY_HASH(key, ehp->ether_dhost);
61841ae08745Sheppo 
61851ae08745Sheppo 	D1(vswp, "%s: key 0x%llx", __func__, key);
61861ae08745Sheppo 
61871ae08745Sheppo 	/*
61881ae08745Sheppo 	 * If pkt came from either a vnet or down the stack (if we are
61891ae08745Sheppo 	 * plumbed) and we are in layer 2 mode, then we send the pkt out
61901ae08745Sheppo 	 * over the physical adapter, and then check to see if any other
61911ae08745Sheppo 	 * vnets are interested in it.
61921ae08745Sheppo 	 */
61931ae08745Sheppo 	if (((vswp->smode[vswp->smode_idx] == VSW_LAYER2) ||
61941ae08745Sheppo 		(vswp->smode[vswp->smode_idx] == VSW_LAYER2_PROMISC)) &&
61951ae08745Sheppo 		((caller == VSW_VNETPORT) || (caller == VSW_LOCALDEV))) {
61961ae08745Sheppo 		nmp = dupmsg(mp);
61971ae08745Sheppo 		if (nmp) {
61981ae08745Sheppo 			if ((ret_m = vsw_tx_msg(vswp, nmp)) != NULL) {
61991ae08745Sheppo 				DERR(vswp, "%s: dropping pkt(s) "
62001ae08745Sheppo 					"consisting of %ld bytes of "
62011ae08745Sheppo 					"data for physical device",
62021ae08745Sheppo 					__func__, MBLKL(ret_m));
62031ae08745Sheppo 				freemsg(ret_m);
62041ae08745Sheppo 			}
62051ae08745Sheppo 		}
62061ae08745Sheppo 	}
62071ae08745Sheppo 
62081ae08745Sheppo 	READ_ENTER(&vswp->mfdbrw);
62091ae08745Sheppo 	if (mod_hash_find(vswp->mfdb, (mod_hash_key_t)key,
62101ae08745Sheppo 				(mod_hash_val_t *)&entp) != 0) {
62111ae08745Sheppo 		D3(vswp, "%s: no table entry found for addr 0x%llx",
62121ae08745Sheppo 								__func__, key);
62131ae08745Sheppo 	} else {
62141ae08745Sheppo 		/*
62151ae08745Sheppo 		 * Send to list of devices associated with this address...
62161ae08745Sheppo 		 */
62171ae08745Sheppo 		for (tpp = entp; tpp != NULL; tpp = tpp->nextp) {
62181ae08745Sheppo 
62191ae08745Sheppo 			/* dont send to ourselves */
62201ae08745Sheppo 			if ((caller == VSW_VNETPORT) &&
62211ae08745Sheppo 				(tpp->d_addr == (void *)arg)) {
62221ae08745Sheppo 				port = (vsw_port_t *)tpp->d_addr;
62231ae08745Sheppo 				D3(vswp, "%s: not sending to ourselves"
62241ae08745Sheppo 					" : port %d", __func__,
62251ae08745Sheppo 					port->p_instance);
62261ae08745Sheppo 				continue;
62271ae08745Sheppo 
62281ae08745Sheppo 			} else if ((caller == VSW_LOCALDEV) &&
62291ae08745Sheppo 				(tpp->d_type == VSW_LOCALDEV)) {
62301ae08745Sheppo 				D3(vswp, "%s: not sending back up stack",
62311ae08745Sheppo 					__func__);
62321ae08745Sheppo 				continue;
62331ae08745Sheppo 			}
62341ae08745Sheppo 
62351ae08745Sheppo 			if (tpp->d_type == VSW_VNETPORT) {
62361ae08745Sheppo 				port = (vsw_port_t *)tpp->d_addr;
62371ae08745Sheppo 				D3(vswp, "%s: sending to port %ld for "
62381ae08745Sheppo 					" addr 0x%llx", __func__,
62391ae08745Sheppo 					port->p_instance, key);
62401ae08745Sheppo 
62411ae08745Sheppo 				nmp = dupmsg(mp);
62421ae08745Sheppo 				if (nmp)
62431ae08745Sheppo 					(void) vsw_portsend(port, nmp);
62441ae08745Sheppo 			} else {
62451ae08745Sheppo 				if (vswp->if_state & VSW_IF_UP) {
62461ae08745Sheppo 					nmp = copymsg(mp);
62471ae08745Sheppo 					if (nmp)
6248ba2e4443Sseb 						mac_rx(vswp->if_mh, NULL, nmp);
62491ae08745Sheppo 					check_if = B_FALSE;
62501ae08745Sheppo 					D3(vswp, "%s: sending up stack"
62511ae08745Sheppo 						" for addr 0x%llx", __func__,
62521ae08745Sheppo 						key);
62531ae08745Sheppo 				}
62541ae08745Sheppo 			}
62551ae08745Sheppo 		}
62561ae08745Sheppo 	}
62571ae08745Sheppo 
62581ae08745Sheppo 	RW_EXIT(&vswp->mfdbrw);
62591ae08745Sheppo 
62601ae08745Sheppo 	/*
62611ae08745Sheppo 	 * If the pkt came from either a vnet or from physical device,
62621ae08745Sheppo 	 * and if we havent already sent the pkt up the stack then we
62631ae08745Sheppo 	 * check now if we can/should (i.e. the interface is plumbed
62641ae08745Sheppo 	 * and in promisc mode).
62651ae08745Sheppo 	 */
62661ae08745Sheppo 	if ((check_if) &&
62671ae08745Sheppo 		((caller == VSW_VNETPORT) || (caller == VSW_PHYSDEV))) {
62681ae08745Sheppo 		READ_ENTER(&vswp->if_lockrw);
62691ae08745Sheppo 		if (VSW_U_P(vswp->if_state)) {
62701ae08745Sheppo 			RW_EXIT(&vswp->if_lockrw);
62711ae08745Sheppo 			D3(vswp, "%s: (caller %d) finally sending up stack"
62721ae08745Sheppo 				" for addr 0x%llx", __func__, caller, key);
62731ae08745Sheppo 			nmp = copymsg(mp);
62741ae08745Sheppo 			if (nmp)
6275ba2e4443Sseb 				mac_rx(vswp->if_mh, NULL, nmp);
62761ae08745Sheppo 		} else {
62771ae08745Sheppo 			RW_EXIT(&vswp->if_lockrw);
62781ae08745Sheppo 		}
62791ae08745Sheppo 	}
62801ae08745Sheppo 
62811ae08745Sheppo 	freemsg(mp);
62821ae08745Sheppo 
62831ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
62841ae08745Sheppo 
62851ae08745Sheppo 	return (0);
62861ae08745Sheppo }
62871ae08745Sheppo 
62881ae08745Sheppo /* transmit the packet over the given port */
62891ae08745Sheppo static int
62901ae08745Sheppo vsw_portsend(vsw_port_t *port, mblk_t *mp)
62911ae08745Sheppo {
62921ae08745Sheppo 	vsw_ldc_list_t 	*ldcl = &port->p_ldclist;
62931ae08745Sheppo 	vsw_ldc_t 	*ldcp;
62941ae08745Sheppo 	int		status = 0;
62951ae08745Sheppo 
62961ae08745Sheppo 
62971ae08745Sheppo 	READ_ENTER(&ldcl->lockrw);
62981ae08745Sheppo 	/*
62991ae08745Sheppo 	 * Note for now, we have a single channel.
63001ae08745Sheppo 	 */
63011ae08745Sheppo 	ldcp = ldcl->head;
63021ae08745Sheppo 	if (ldcp == NULL) {
63031ae08745Sheppo 		DERR(port->p_vswp, "vsw_portsend: no ldc: dropping packet\n");
63041ae08745Sheppo 		freemsg(mp);
63051ae08745Sheppo 		RW_EXIT(&ldcl->lockrw);
63061ae08745Sheppo 		return (1);
63071ae08745Sheppo 	}
63081ae08745Sheppo 
63091ae08745Sheppo 	/*
63101ae08745Sheppo 	 * Send the message out using the appropriate
63111ae08745Sheppo 	 * transmit function which will free mblock when it
63121ae08745Sheppo 	 * is finished with it.
63131ae08745Sheppo 	 */
63141ae08745Sheppo 	mutex_enter(&port->tx_lock);
63151ae08745Sheppo 	if (port->transmit != NULL)
63161ae08745Sheppo 		status = (*port->transmit)(ldcp, mp);
63171ae08745Sheppo 	else {
63181ae08745Sheppo 		freemsg(mp);
63191ae08745Sheppo 	}
63201ae08745Sheppo 	mutex_exit(&port->tx_lock);
63211ae08745Sheppo 
63221ae08745Sheppo 	RW_EXIT(&ldcl->lockrw);
63231ae08745Sheppo 
63241ae08745Sheppo 	return (status);
63251ae08745Sheppo }
63261ae08745Sheppo 
63271ae08745Sheppo /*
63281ae08745Sheppo  * Send packet out via descriptor ring to a logical device.
63291ae08745Sheppo  */
63301ae08745Sheppo static int
63311ae08745Sheppo vsw_dringsend(vsw_ldc_t *ldcp, mblk_t *mp)
63321ae08745Sheppo {
63331ae08745Sheppo 	vio_dring_msg_t		dring_pkt;
63341ae08745Sheppo 	dring_info_t		*dp = NULL;
63351ae08745Sheppo 	vsw_private_desc_t	*priv_desc = NULL;
6336d10e4ef2Snarayan 	vnet_public_desc_t	*pub = NULL;
63371ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
63381ae08745Sheppo 	mblk_t			*bp;
63391ae08745Sheppo 	size_t			n, size;
63401ae08745Sheppo 	caddr_t			bufp;
63411ae08745Sheppo 	int			idx;
63421ae08745Sheppo 	int			status = LDC_TX_SUCCESS;
63431ae08745Sheppo 
63441ae08745Sheppo 	D1(vswp, "%s(%lld): enter\n", __func__, ldcp->ldc_id);
63451ae08745Sheppo 
63461ae08745Sheppo 	/* TODO: make test a macro */
63471ae08745Sheppo 	if ((!(ldcp->lane_out.lstate & VSW_LANE_ACTIVE)) ||
63481ae08745Sheppo 		(ldcp->ldc_status != LDC_UP) || (ldcp->ldc_handle == NULL)) {
63491ae08745Sheppo 		DWARN(vswp, "%s(%lld) status(%d) lstate(0x%llx), dropping "
63501ae08745Sheppo 			"packet\n", __func__, ldcp->ldc_id, ldcp->ldc_status,
63511ae08745Sheppo 			ldcp->lane_out.lstate);
63521ae08745Sheppo 		freemsg(mp);
63531ae08745Sheppo 		return (LDC_TX_FAILURE);
63541ae08745Sheppo 	}
63551ae08745Sheppo 
63561ae08745Sheppo 	/*
63571ae08745Sheppo 	 * Note - using first ring only, this may change
63581ae08745Sheppo 	 * in the future.
63591ae08745Sheppo 	 */
63601ae08745Sheppo 	if ((dp = ldcp->lane_out.dringp) == NULL) {
63611ae08745Sheppo 		DERR(vswp, "%s(%lld): no dring for outbound lane on"
63621ae08745Sheppo 			" channel %d", __func__, ldcp->ldc_id, ldcp->ldc_id);
63631ae08745Sheppo 		freemsg(mp);
63641ae08745Sheppo 		return (LDC_TX_FAILURE);
63651ae08745Sheppo 	}
63661ae08745Sheppo 
63671ae08745Sheppo 	size = msgsize(mp);
63681ae08745Sheppo 	if (size > (size_t)ETHERMAX) {
63691ae08745Sheppo 		DERR(vswp, "%s(%lld) invalid size (%ld)\n", __func__,
63701ae08745Sheppo 		    ldcp->ldc_id, size);
6371d10e4ef2Snarayan 		freemsg(mp);
6372d10e4ef2Snarayan 		return (LDC_TX_FAILURE);
63731ae08745Sheppo 	}
63741ae08745Sheppo 
63751ae08745Sheppo 	/*
63761ae08745Sheppo 	 * Find a free descriptor
63771ae08745Sheppo 	 *
63781ae08745Sheppo 	 * Note: for the moment we are assuming that we will only
63791ae08745Sheppo 	 * have one dring going from the switch to each of its
63801ae08745Sheppo 	 * peers. This may change in the future.
63811ae08745Sheppo 	 */
63821ae08745Sheppo 	if (vsw_dring_find_free_desc(dp, &priv_desc, &idx) != 0) {
6383d10e4ef2Snarayan 		D2(vswp, "%s(%lld): no descriptor available for ring "
63841ae08745Sheppo 			"at 0x%llx", __func__, ldcp->ldc_id, dp);
63851ae08745Sheppo 
63861ae08745Sheppo 		/* nothing more we can do */
63871ae08745Sheppo 		status = LDC_TX_NORESOURCES;
63881ae08745Sheppo 		goto vsw_dringsend_free_exit;
63891ae08745Sheppo 	} else {
63901ae08745Sheppo 		D2(vswp, "%s(%lld): free private descriptor found at pos "
63911ae08745Sheppo 			"%ld addr 0x%llx\n", __func__, ldcp->ldc_id, idx,
63921ae08745Sheppo 			priv_desc);
63931ae08745Sheppo 	}
63941ae08745Sheppo 
63951ae08745Sheppo 	/* copy data into the descriptor */
63961ae08745Sheppo 	bufp = priv_desc->datap;
6397d10e4ef2Snarayan 	bufp += VNET_IPALIGN;
63981ae08745Sheppo 	for (bp = mp, n = 0; bp != NULL; bp = bp->b_cont) {
63991ae08745Sheppo 		n = MBLKL(bp);
64001ae08745Sheppo 		bcopy(bp->b_rptr, bufp, n);
64011ae08745Sheppo 		bufp += n;
64021ae08745Sheppo 	}
64031ae08745Sheppo 
64041ae08745Sheppo 	priv_desc->datalen = (size < (size_t)ETHERMIN) ? ETHERMIN : size;
6405d10e4ef2Snarayan 
6406d10e4ef2Snarayan 	pub = priv_desc->descp;
6407d10e4ef2Snarayan 	pub->nbytes = priv_desc->datalen;
6408d10e4ef2Snarayan 
6409d10e4ef2Snarayan 	mutex_enter(&priv_desc->dstate_lock);
6410d10e4ef2Snarayan 	pub->hdr.dstate = VIO_DESC_READY;
6411d10e4ef2Snarayan 	mutex_exit(&priv_desc->dstate_lock);
64121ae08745Sheppo 
64131ae08745Sheppo 	/*
6414d10e4ef2Snarayan 	 * Determine whether or not we need to send a message to our
6415d10e4ef2Snarayan 	 * peer prompting them to read our newly updated descriptor(s).
64161ae08745Sheppo 	 */
6417d10e4ef2Snarayan 	mutex_enter(&dp->restart_lock);
6418d10e4ef2Snarayan 	if (dp->restart_reqd) {
6419d10e4ef2Snarayan 		dp->restart_reqd = B_FALSE;
6420d10e4ef2Snarayan 		mutex_exit(&dp->restart_lock);
64211ae08745Sheppo 
64221ae08745Sheppo 		/*
64231ae08745Sheppo 		 * Send a vio_dring_msg to peer to prompt them to read
64241ae08745Sheppo 		 * the updated descriptor ring.
64251ae08745Sheppo 		 */
64261ae08745Sheppo 		dring_pkt.tag.vio_msgtype = VIO_TYPE_DATA;
64271ae08745Sheppo 		dring_pkt.tag.vio_subtype = VIO_SUBTYPE_INFO;
64281ae08745Sheppo 		dring_pkt.tag.vio_subtype_env = VIO_DRING_DATA;
64291ae08745Sheppo 		dring_pkt.tag.vio_sid = ldcp->local_session;
64301ae08745Sheppo 
64311ae08745Sheppo 		/* Note - for now using first ring */
64321ae08745Sheppo 		dring_pkt.dring_ident = dp->ident;
64331ae08745Sheppo 
6434d10e4ef2Snarayan 		mutex_enter(&ldcp->lane_out.seq_lock);
64351ae08745Sheppo 		dring_pkt.seq_num = ldcp->lane_out.seq_num++;
6436d10e4ef2Snarayan 		mutex_exit(&ldcp->lane_out.seq_lock);
64371ae08745Sheppo 
6438d10e4ef2Snarayan 		/*
6439d10e4ef2Snarayan 		 * If last_ack_recv is -1 then we know we've not
6440d10e4ef2Snarayan 		 * received any ack's yet, so this must be the first
6441d10e4ef2Snarayan 		 * msg sent, so set the start to the begining of the ring.
6442d10e4ef2Snarayan 		 */
6443d10e4ef2Snarayan 		mutex_enter(&dp->dlock);
6444d10e4ef2Snarayan 		if (dp->last_ack_recv == -1) {
6445d10e4ef2Snarayan 			dring_pkt.start_idx = 0;
6446d10e4ef2Snarayan 		} else {
6447d10e4ef2Snarayan 			dring_pkt.start_idx = (dp->last_ack_recv + 1) %
6448d10e4ef2Snarayan 						dp->num_descriptors;
6449d10e4ef2Snarayan 		}
6450d10e4ef2Snarayan 		dring_pkt.end_idx = -1;
6451d10e4ef2Snarayan 		mutex_exit(&dp->dlock);
64521ae08745Sheppo 
64531ae08745Sheppo 		D3(vswp, "%s(%lld): dring 0x%llx : ident 0x%llx\n", __func__,
64541ae08745Sheppo 			ldcp->ldc_id, dp, dring_pkt.dring_ident);
6455d10e4ef2Snarayan 		D3(vswp, "%s(%lld): start %lld : end %lld : seq %lld\n",
6456d10e4ef2Snarayan 			__func__, ldcp->ldc_id, dring_pkt.start_idx,
6457d10e4ef2Snarayan 			dring_pkt.end_idx, dring_pkt.seq_num);
64581ae08745Sheppo 
6459d10e4ef2Snarayan 		vsw_send_msg(ldcp, (void *)&dring_pkt,
6460d10e4ef2Snarayan 						sizeof (vio_dring_msg_t));
6461d10e4ef2Snarayan 	} else {
6462d10e4ef2Snarayan 		mutex_exit(&dp->restart_lock);
6463d10e4ef2Snarayan 		D2(vswp, "%s(%lld): updating descp %d", __func__,
6464d10e4ef2Snarayan 			ldcp->ldc_id, idx);
6465d10e4ef2Snarayan 	}
64661ae08745Sheppo 
64671ae08745Sheppo vsw_dringsend_free_exit:
64681ae08745Sheppo 
64691ae08745Sheppo 	/* free the message block */
64701ae08745Sheppo 	freemsg(mp);
64711ae08745Sheppo 
64721ae08745Sheppo 	D1(vswp, "%s(%lld): exit\n", __func__, ldcp->ldc_id);
64731ae08745Sheppo 	return (status);
64741ae08745Sheppo }
64751ae08745Sheppo 
64761ae08745Sheppo /*
64771ae08745Sheppo  * Send an in-band descriptor message over ldc.
64781ae08745Sheppo  */
64791ae08745Sheppo static int
64801ae08745Sheppo vsw_descrsend(vsw_ldc_t *ldcp, mblk_t *mp)
64811ae08745Sheppo {
64821ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
64831ae08745Sheppo 	vio_ibnd_desc_t		ibnd_msg;
64841ae08745Sheppo 	vsw_private_desc_t	*priv_desc = NULL;
64851ae08745Sheppo 	dring_info_t		*dp = NULL;
64861ae08745Sheppo 	size_t			n, size = 0;
64871ae08745Sheppo 	caddr_t			bufp;
64881ae08745Sheppo 	mblk_t			*bp;
64891ae08745Sheppo 	int			idx, i;
64901ae08745Sheppo 	int			status = LDC_TX_SUCCESS;
64911ae08745Sheppo 	static int		warn_msg = 1;
64921ae08745Sheppo 
64931ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
64941ae08745Sheppo 
64951ae08745Sheppo 	ASSERT(mp != NULL);
64961ae08745Sheppo 
64971ae08745Sheppo 	if ((!(ldcp->lane_out.lstate & VSW_LANE_ACTIVE)) ||
64981ae08745Sheppo 		(ldcp->ldc_status != LDC_UP) || (ldcp->ldc_handle == NULL)) {
64991ae08745Sheppo 		DERR(vswp, "%s(%lld) status(%d) state (0x%llx), dropping pkt",
65001ae08745Sheppo 			__func__, ldcp->ldc_id, ldcp->ldc_status,
65011ae08745Sheppo 			ldcp->lane_out.lstate);
65021ae08745Sheppo 		freemsg(mp);
65031ae08745Sheppo 		return (LDC_TX_FAILURE);
65041ae08745Sheppo 	}
65051ae08745Sheppo 
65061ae08745Sheppo 	/*
65071ae08745Sheppo 	 * only expect single dring to exist, which we use
65081ae08745Sheppo 	 * as an internal buffer, rather than a transfer channel.
65091ae08745Sheppo 	 */
65101ae08745Sheppo 	if ((dp = ldcp->lane_out.dringp) == NULL) {
65111ae08745Sheppo 		DERR(vswp, "%s(%lld): no dring for outbound lane",
65121ae08745Sheppo 			__func__, ldcp->ldc_id);
65131ae08745Sheppo 		DERR(vswp, "%s(%lld) status(%d) state (0x%llx)",
65141ae08745Sheppo 			__func__, ldcp->ldc_id, ldcp->ldc_status,
65151ae08745Sheppo 			ldcp->lane_out.lstate);
65161ae08745Sheppo 		freemsg(mp);
65171ae08745Sheppo 		return (LDC_TX_FAILURE);
65181ae08745Sheppo 	}
65191ae08745Sheppo 
65201ae08745Sheppo 	size = msgsize(mp);
65211ae08745Sheppo 	if (size > (size_t)ETHERMAX) {
65221ae08745Sheppo 		DERR(vswp, "%s(%lld) invalid size (%ld)\n", __func__,
65231ae08745Sheppo 		    ldcp->ldc_id, size);
6524d10e4ef2Snarayan 		freemsg(mp);
6525d10e4ef2Snarayan 		return (LDC_TX_FAILURE);
65261ae08745Sheppo 	}
65271ae08745Sheppo 
65281ae08745Sheppo 	/*
65291ae08745Sheppo 	 * Find a free descriptor in our buffer ring
65301ae08745Sheppo 	 */
65311ae08745Sheppo 	if (vsw_dring_find_free_desc(dp, &priv_desc, &idx) != 0) {
65321ae08745Sheppo 		if (warn_msg) {
65331ae08745Sheppo 			DERR(vswp, "%s(%lld): no descriptor available for ring "
65341ae08745Sheppo 			"at 0x%llx", __func__, ldcp->ldc_id, dp);
65351ae08745Sheppo 			warn_msg = 0;
65361ae08745Sheppo 		}
65371ae08745Sheppo 
65381ae08745Sheppo 		/* nothing more we can do */
65391ae08745Sheppo 		status = LDC_TX_NORESOURCES;
65401ae08745Sheppo 		goto vsw_descrsend_free_exit;
65411ae08745Sheppo 	} else {
65421ae08745Sheppo 		D2(vswp, "%s(%lld): free private descriptor found at pos "
65431ae08745Sheppo 			"%ld addr 0x%x\n", __func__, ldcp->ldc_id, idx,
65441ae08745Sheppo 			priv_desc);
65451ae08745Sheppo 		warn_msg = 1;
65461ae08745Sheppo 	}
65471ae08745Sheppo 
65481ae08745Sheppo 	/* copy data into the descriptor */
65491ae08745Sheppo 	bufp = priv_desc->datap;
65501ae08745Sheppo 	for (bp = mp, n = 0; bp != NULL; bp = bp->b_cont) {
65511ae08745Sheppo 		n = MBLKL(bp);
65521ae08745Sheppo 		bcopy(bp->b_rptr, bufp, n);
65531ae08745Sheppo 		bufp += n;
65541ae08745Sheppo 	}
65551ae08745Sheppo 
65561ae08745Sheppo 	priv_desc->datalen = (size < (size_t)ETHERMIN) ? ETHERMIN : size;
65571ae08745Sheppo 
65581ae08745Sheppo 	/* create and send the in-band descp msg */
65591ae08745Sheppo 	ibnd_msg.hdr.tag.vio_msgtype = VIO_TYPE_DATA;
65601ae08745Sheppo 	ibnd_msg.hdr.tag.vio_subtype = VIO_SUBTYPE_INFO;
65611ae08745Sheppo 	ibnd_msg.hdr.tag.vio_subtype_env = VIO_DESC_DATA;
65621ae08745Sheppo 	ibnd_msg.hdr.tag.vio_sid = ldcp->local_session;
65631ae08745Sheppo 
6564d10e4ef2Snarayan 	mutex_enter(&ldcp->lane_out.seq_lock);
65651ae08745Sheppo 	ibnd_msg.hdr.seq_num = ldcp->lane_out.seq_num++;
6566d10e4ef2Snarayan 	mutex_exit(&ldcp->lane_out.seq_lock);
65671ae08745Sheppo 
65681ae08745Sheppo 	/*
65691ae08745Sheppo 	 * Copy the mem cookies describing the data from the
65701ae08745Sheppo 	 * private region of the descriptor ring into the inband
65711ae08745Sheppo 	 * descriptor.
65721ae08745Sheppo 	 */
65731ae08745Sheppo 	for (i = 0; i < priv_desc->ncookies; i++) {
65741ae08745Sheppo 		bcopy(&priv_desc->memcookie[i], &ibnd_msg.memcookie[i],
65751ae08745Sheppo 			sizeof (ldc_mem_cookie_t));
65761ae08745Sheppo 	}
65771ae08745Sheppo 
65781ae08745Sheppo 	ibnd_msg.hdr.desc_handle = idx;
65791ae08745Sheppo 	ibnd_msg.ncookies = priv_desc->ncookies;
65801ae08745Sheppo 	ibnd_msg.nbytes = size;
65811ae08745Sheppo 
65821ae08745Sheppo 	vsw_send_msg(ldcp, (void *)&ibnd_msg, sizeof (vio_ibnd_desc_t));
65831ae08745Sheppo 
65841ae08745Sheppo vsw_descrsend_free_exit:
65851ae08745Sheppo 
65861ae08745Sheppo 	/* free the allocated message blocks */
65871ae08745Sheppo 	freemsg(mp);
65881ae08745Sheppo 
65891ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
65901ae08745Sheppo 	return (status);
65911ae08745Sheppo }
65921ae08745Sheppo 
65931ae08745Sheppo static void
6594*3af08d82Slm66018 vsw_send_ver(void *arg)
65951ae08745Sheppo {
6596*3af08d82Slm66018 	vsw_ldc_t	*ldcp = (vsw_ldc_t *)arg;
65971ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
65981ae08745Sheppo 	lane_t		*lp = &ldcp->lane_out;
65991ae08745Sheppo 	vio_ver_msg_t	ver_msg;
66001ae08745Sheppo 
66011ae08745Sheppo 	D1(vswp, "%s enter", __func__);
66021ae08745Sheppo 
66031ae08745Sheppo 	ver_msg.tag.vio_msgtype = VIO_TYPE_CTRL;
66041ae08745Sheppo 	ver_msg.tag.vio_subtype = VIO_SUBTYPE_INFO;
66051ae08745Sheppo 	ver_msg.tag.vio_subtype_env = VIO_VER_INFO;
66061ae08745Sheppo 	ver_msg.tag.vio_sid = ldcp->local_session;
66071ae08745Sheppo 
66081ae08745Sheppo 	ver_msg.ver_major = vsw_versions[0].ver_major;
66091ae08745Sheppo 	ver_msg.ver_minor = vsw_versions[0].ver_minor;
66101ae08745Sheppo 	ver_msg.dev_class = VDEV_NETWORK_SWITCH;
66111ae08745Sheppo 
66121ae08745Sheppo 	lp->lstate |= VSW_VER_INFO_SENT;
66131ae08745Sheppo 	lp->ver_major = ver_msg.ver_major;
66141ae08745Sheppo 	lp->ver_minor = ver_msg.ver_minor;
66151ae08745Sheppo 
66161ae08745Sheppo 	DUMP_TAG(ver_msg.tag);
66171ae08745Sheppo 
66181ae08745Sheppo 	vsw_send_msg(ldcp, &ver_msg, sizeof (vio_ver_msg_t));
66191ae08745Sheppo 
66201ae08745Sheppo 	D1(vswp, "%s (%d): exit", __func__, ldcp->ldc_id);
66211ae08745Sheppo }
66221ae08745Sheppo 
66231ae08745Sheppo static void
66241ae08745Sheppo vsw_send_attr(vsw_ldc_t *ldcp)
66251ae08745Sheppo {
66261ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
66271ae08745Sheppo 	lane_t			*lp = &ldcp->lane_out;
66281ae08745Sheppo 	vnet_attr_msg_t		attr_msg;
66291ae08745Sheppo 
66301ae08745Sheppo 	D1(vswp, "%s (%ld) enter", __func__, ldcp->ldc_id);
66311ae08745Sheppo 
66321ae08745Sheppo 	/*
66331ae08745Sheppo 	 * Subtype is set to INFO by default
66341ae08745Sheppo 	 */
66351ae08745Sheppo 	attr_msg.tag.vio_msgtype = VIO_TYPE_CTRL;
66361ae08745Sheppo 	attr_msg.tag.vio_subtype = VIO_SUBTYPE_INFO;
66371ae08745Sheppo 	attr_msg.tag.vio_subtype_env = VIO_ATTR_INFO;
66381ae08745Sheppo 	attr_msg.tag.vio_sid = ldcp->local_session;
66391ae08745Sheppo 
66401ae08745Sheppo 	/* payload copied from default settings for lane */
66411ae08745Sheppo 	attr_msg.mtu = lp->mtu;
66421ae08745Sheppo 	attr_msg.addr_type = lp->addr_type;
66431ae08745Sheppo 	attr_msg.xfer_mode = lp->xfer_mode;
66441ae08745Sheppo 	attr_msg.ack_freq = lp->xfer_mode;
66451ae08745Sheppo 
66461ae08745Sheppo 	READ_ENTER(&vswp->if_lockrw);
66471ae08745Sheppo 	bcopy(&(vswp->if_addr), &(attr_msg.addr), ETHERADDRL);
66481ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
66491ae08745Sheppo 
66501ae08745Sheppo 	ldcp->lane_out.lstate |= VSW_ATTR_INFO_SENT;
66511ae08745Sheppo 
66521ae08745Sheppo 	DUMP_TAG(attr_msg.tag);
66531ae08745Sheppo 
66541ae08745Sheppo 	vsw_send_msg(ldcp, &attr_msg, sizeof (vnet_attr_msg_t));
66551ae08745Sheppo 
66561ae08745Sheppo 	D1(vswp, "%s (%ld) enter", __func__, ldcp->ldc_id);
66571ae08745Sheppo }
66581ae08745Sheppo 
66591ae08745Sheppo /*
66601ae08745Sheppo  * Create dring info msg (which also results in the creation of
66611ae08745Sheppo  * a dring).
66621ae08745Sheppo  */
66631ae08745Sheppo static vio_dring_reg_msg_t *
66641ae08745Sheppo vsw_create_dring_info_pkt(vsw_ldc_t *ldcp)
66651ae08745Sheppo {
66661ae08745Sheppo 	vio_dring_reg_msg_t	*mp;
66671ae08745Sheppo 	dring_info_t		*dp;
66681ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
66691ae08745Sheppo 
66701ae08745Sheppo 	D1(vswp, "vsw_create_dring_info_pkt enter\n");
66711ae08745Sheppo 
66721ae08745Sheppo 	/*
66731ae08745Sheppo 	 * If we can't create a dring, obviously no point sending
66741ae08745Sheppo 	 * a message.
66751ae08745Sheppo 	 */
66761ae08745Sheppo 	if ((dp = vsw_create_dring(ldcp)) == NULL)
66771ae08745Sheppo 		return (NULL);
66781ae08745Sheppo 
66791ae08745Sheppo 	mp = kmem_zalloc(sizeof (vio_dring_reg_msg_t), KM_SLEEP);
66801ae08745Sheppo 
66811ae08745Sheppo 	mp->tag.vio_msgtype = VIO_TYPE_CTRL;
66821ae08745Sheppo 	mp->tag.vio_subtype = VIO_SUBTYPE_INFO;
66831ae08745Sheppo 	mp->tag.vio_subtype_env = VIO_DRING_REG;
66841ae08745Sheppo 	mp->tag.vio_sid = ldcp->local_session;
66851ae08745Sheppo 
66861ae08745Sheppo 	/* payload */
66871ae08745Sheppo 	mp->num_descriptors = dp->num_descriptors;
66881ae08745Sheppo 	mp->descriptor_size = dp->descriptor_size;
66891ae08745Sheppo 	mp->options = dp->options;
66901ae08745Sheppo 	mp->ncookies = dp->ncookies;
66911ae08745Sheppo 	bcopy(&dp->cookie[0], &mp->cookie[0], sizeof (ldc_mem_cookie_t));
66921ae08745Sheppo 
66931ae08745Sheppo 	mp->dring_ident = 0;
66941ae08745Sheppo 
66951ae08745Sheppo 	D1(vswp, "vsw_create_dring_info_pkt exit\n");
66961ae08745Sheppo 
66971ae08745Sheppo 	return (mp);
66981ae08745Sheppo }
66991ae08745Sheppo 
67001ae08745Sheppo static void
67011ae08745Sheppo vsw_send_dring_info(vsw_ldc_t *ldcp)
67021ae08745Sheppo {
67031ae08745Sheppo 	vio_dring_reg_msg_t	*dring_msg;
67041ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
67051ae08745Sheppo 
67061ae08745Sheppo 	D1(vswp, "%s: (%ld) enter", __func__, ldcp->ldc_id);
67071ae08745Sheppo 
67081ae08745Sheppo 	dring_msg = vsw_create_dring_info_pkt(ldcp);
67091ae08745Sheppo 	if (dring_msg == NULL) {
67101ae08745Sheppo 		cmn_err(CE_WARN, "vsw_send_dring_info: error creating msg");
67111ae08745Sheppo 		return;
67121ae08745Sheppo 	}
67131ae08745Sheppo 
67141ae08745Sheppo 	ldcp->lane_out.lstate |= VSW_DRING_INFO_SENT;
67151ae08745Sheppo 
67161ae08745Sheppo 	DUMP_TAG_PTR((vio_msg_tag_t *)dring_msg);
67171ae08745Sheppo 
67181ae08745Sheppo 	vsw_send_msg(ldcp, dring_msg,
67191ae08745Sheppo 		sizeof (vio_dring_reg_msg_t));
67201ae08745Sheppo 
67211ae08745Sheppo 	kmem_free(dring_msg, sizeof (vio_dring_reg_msg_t));
67221ae08745Sheppo 
67231ae08745Sheppo 	D1(vswp, "%s: (%ld) exit", __func__, ldcp->ldc_id);
67241ae08745Sheppo }
67251ae08745Sheppo 
67261ae08745Sheppo static void
67271ae08745Sheppo vsw_send_rdx(vsw_ldc_t *ldcp)
67281ae08745Sheppo {
67291ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
67301ae08745Sheppo 	vio_rdx_msg_t	rdx_msg;
67311ae08745Sheppo 
67321ae08745Sheppo 	D1(vswp, "%s (%ld) enter", __func__, ldcp->ldc_id);
67331ae08745Sheppo 
67341ae08745Sheppo 	rdx_msg.tag.vio_msgtype = VIO_TYPE_CTRL;
67351ae08745Sheppo 	rdx_msg.tag.vio_subtype = VIO_SUBTYPE_INFO;
67361ae08745Sheppo 	rdx_msg.tag.vio_subtype_env = VIO_RDX;
67371ae08745Sheppo 	rdx_msg.tag.vio_sid = ldcp->local_session;
67381ae08745Sheppo 
67391ae08745Sheppo 	ldcp->lane_out.lstate |= VSW_RDX_INFO_SENT;
67401ae08745Sheppo 
67411ae08745Sheppo 	DUMP_TAG(rdx_msg.tag);
67421ae08745Sheppo 
67431ae08745Sheppo 	vsw_send_msg(ldcp, &rdx_msg, sizeof (vio_rdx_msg_t));
67441ae08745Sheppo 
67451ae08745Sheppo 	D1(vswp, "%s (%ld) exit", __func__, ldcp->ldc_id);
67461ae08745Sheppo }
67471ae08745Sheppo 
67481ae08745Sheppo /*
67491ae08745Sheppo  * Generic routine to send message out over ldc channel.
67501ae08745Sheppo  */
67511ae08745Sheppo static void
67521ae08745Sheppo vsw_send_msg(vsw_ldc_t *ldcp, void *msgp, int size)
67531ae08745Sheppo {
67541ae08745Sheppo 	int		rv;
67551ae08745Sheppo 	size_t		msglen = size;
67561ae08745Sheppo 	vio_msg_tag_t	*tag = (vio_msg_tag_t *)msgp;
67571ae08745Sheppo 	vsw_t		*vswp = ldcp->ldc_vswp;
67581ae08745Sheppo 
67591ae08745Sheppo 	D1(vswp, "vsw_send_msg (%lld) enter : sending %d bytes",
67601ae08745Sheppo 			ldcp->ldc_id, size);
67611ae08745Sheppo 
67621ae08745Sheppo 	D2(vswp, "send_msg: type 0x%llx", tag->vio_msgtype);
67631ae08745Sheppo 	D2(vswp, "send_msg: stype 0x%llx", tag->vio_subtype);
67641ae08745Sheppo 	D2(vswp, "send_msg: senv 0x%llx", tag->vio_subtype_env);
67651ae08745Sheppo 
67661ae08745Sheppo 	mutex_enter(&ldcp->ldc_txlock);
67671ae08745Sheppo 	do {
67681ae08745Sheppo 		msglen = size;
67691ae08745Sheppo 		rv = ldc_write(ldcp->ldc_handle, (caddr_t)msgp, &msglen);
67701ae08745Sheppo 	} while (rv == EWOULDBLOCK && --vsw_wretries > 0);
67711ae08745Sheppo 
67721ae08745Sheppo 	if ((rv != 0) || (msglen != size)) {
67731ae08745Sheppo 		DERR(vswp, "vsw_send_msg:ldc_write failed: chan(%lld) "
67741ae08745Sheppo 			"rv(%d) size (%d) msglen(%d)\n", ldcp->ldc_id,
67751ae08745Sheppo 			rv, size, msglen);
67761ae08745Sheppo 	}
6777*3af08d82Slm66018 	mutex_exit(&ldcp->ldc_txlock);
6778*3af08d82Slm66018 
6779*3af08d82Slm66018 	/* channel has been reset */
6780*3af08d82Slm66018 	if (rv == ECONNRESET) {
6781*3af08d82Slm66018 		vsw_handle_reset(ldcp);
6782*3af08d82Slm66018 	}
67831ae08745Sheppo 
67841ae08745Sheppo 	D1(vswp, "vsw_send_msg (%lld) exit : sent %d bytes",
67851ae08745Sheppo 			ldcp->ldc_id, msglen);
67861ae08745Sheppo }
67871ae08745Sheppo 
67881ae08745Sheppo /*
67891ae08745Sheppo  * Add an entry into FDB, for the given mac address and port_id.
67901ae08745Sheppo  * Returns 0 on success, 1 on failure.
67911ae08745Sheppo  *
67921ae08745Sheppo  * Lock protecting FDB must be held by calling process.
67931ae08745Sheppo  */
67941ae08745Sheppo static int
67951ae08745Sheppo vsw_add_fdb(vsw_t *vswp, vsw_port_t *port)
67961ae08745Sheppo {
67971ae08745Sheppo 	uint64_t	addr = 0;
67981ae08745Sheppo 
67991ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
68001ae08745Sheppo 
68011ae08745Sheppo 	KEY_HASH(addr, port->p_macaddr);
68021ae08745Sheppo 
68031ae08745Sheppo 	D2(vswp, "%s: key = 0x%llx", __func__, addr);
68041ae08745Sheppo 
68051ae08745Sheppo 	/*
68061ae08745Sheppo 	 * Note: duplicate keys will be rejected by mod_hash.
68071ae08745Sheppo 	 */
68081ae08745Sheppo 	if (mod_hash_insert(vswp->fdb, (mod_hash_key_t)addr,
68091ae08745Sheppo 				(mod_hash_val_t)port) != 0) {
68101ae08745Sheppo 		DERR(vswp, "%s: unable to add entry into fdb.", __func__);
68111ae08745Sheppo 		return (1);
68121ae08745Sheppo 	}
68131ae08745Sheppo 
68141ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
68151ae08745Sheppo 	return (0);
68161ae08745Sheppo }
68171ae08745Sheppo 
68181ae08745Sheppo /*
68191ae08745Sheppo  * Remove an entry from FDB.
68201ae08745Sheppo  * Returns 0 on success, 1 on failure.
68211ae08745Sheppo  */
68221ae08745Sheppo static int
68231ae08745Sheppo vsw_del_fdb(vsw_t *vswp, vsw_port_t *port)
68241ae08745Sheppo {
68251ae08745Sheppo 	uint64_t	addr = 0;
68261ae08745Sheppo 
68271ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
68281ae08745Sheppo 
68291ae08745Sheppo 	KEY_HASH(addr, port->p_macaddr);
68301ae08745Sheppo 
68311ae08745Sheppo 	D2(vswp, "%s: key = 0x%llx", __func__, addr);
68321ae08745Sheppo 
68331ae08745Sheppo 	(void) mod_hash_destroy(vswp->fdb, (mod_hash_val_t)addr);
68341ae08745Sheppo 
68351ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
68361ae08745Sheppo 
68371ae08745Sheppo 	return (0);
68381ae08745Sheppo }
68391ae08745Sheppo 
68401ae08745Sheppo /*
68411ae08745Sheppo  * Search fdb for a given mac address.
68421ae08745Sheppo  * Returns pointer to the entry if found, else returns NULL.
68431ae08745Sheppo  */
68441ae08745Sheppo static vsw_port_t *
68451ae08745Sheppo vsw_lookup_fdb(vsw_t *vswp, struct ether_header *ehp)
68461ae08745Sheppo {
68471ae08745Sheppo 	uint64_t	key = 0;
68481ae08745Sheppo 	vsw_port_t	*port = NULL;
68491ae08745Sheppo 
68501ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
68511ae08745Sheppo 
68521ae08745Sheppo 	KEY_HASH(key, ehp->ether_dhost);
68531ae08745Sheppo 
68541ae08745Sheppo 	D2(vswp, "%s: key = 0x%llx", __func__, key);
68551ae08745Sheppo 
68561ae08745Sheppo 	if (mod_hash_find(vswp->fdb, (mod_hash_key_t)key,
68571ae08745Sheppo 				(mod_hash_val_t *)&port) != 0) {
68581ae08745Sheppo 		return (NULL);
68591ae08745Sheppo 	}
68601ae08745Sheppo 
68611ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
68621ae08745Sheppo 
68631ae08745Sheppo 	return (port);
68641ae08745Sheppo }
68651ae08745Sheppo 
68661ae08745Sheppo /*
68671ae08745Sheppo  * Add or remove multicast address(es).
68681ae08745Sheppo  *
68691ae08745Sheppo  * Returns 0 on success, 1 on failure.
68701ae08745Sheppo  */
68711ae08745Sheppo static int
68721ae08745Sheppo vsw_add_rem_mcst(vnet_mcast_msg_t *mcst_pkt, vsw_port_t *port)
68731ae08745Sheppo {
68741ae08745Sheppo 	mcst_addr_t		*mcst_p = NULL;
68751ae08745Sheppo 	vsw_t			*vswp = port->p_vswp;
68761ae08745Sheppo 	uint64_t		addr = 0x0;
6877e1ebb9ecSlm66018 	int			i, ret;
68781ae08745Sheppo 
68791ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
68801ae08745Sheppo 
68811ae08745Sheppo 	D2(vswp, "%s: %d addresses", __func__, mcst_pkt->count);
68821ae08745Sheppo 
6883e1ebb9ecSlm66018 	if (vswp->mh == NULL)
6884e1ebb9ecSlm66018 		return (1);
6885e1ebb9ecSlm66018 
68861ae08745Sheppo 	for (i = 0; i < mcst_pkt->count; i++) {
68871ae08745Sheppo 		/*
68881ae08745Sheppo 		 * Convert address into form that can be used
68891ae08745Sheppo 		 * as hash table key.
68901ae08745Sheppo 		 */
68911ae08745Sheppo 		KEY_HASH(addr, mcst_pkt->mca[i]);
68921ae08745Sheppo 
68931ae08745Sheppo 		/*
68941ae08745Sheppo 		 * Add or delete the specified address/port combination.
68951ae08745Sheppo 		 */
68961ae08745Sheppo 		if (mcst_pkt->set == 0x1) {
68971ae08745Sheppo 			D3(vswp, "%s: adding multicast address 0x%llx for "
68981ae08745Sheppo 				"port %ld", __func__, addr, port->p_instance);
68991ae08745Sheppo 			if (vsw_add_mcst(vswp, VSW_VNETPORT, addr, port) == 0) {
69001ae08745Sheppo 				/*
69011ae08745Sheppo 				 * Update the list of multicast
69021ae08745Sheppo 				 * addresses contained within the
69031ae08745Sheppo 				 * port structure to include this new
69041ae08745Sheppo 				 * one.
69051ae08745Sheppo 				 */
69061ae08745Sheppo 				mcst_p = kmem_alloc(sizeof (mcst_addr_t),
69071ae08745Sheppo 								KM_NOSLEEP);
69081ae08745Sheppo 				if (mcst_p == NULL) {
69091ae08745Sheppo 					DERR(vswp, "%s: unable to alloc mem",
69101ae08745Sheppo 						__func__);
69111ae08745Sheppo 					return (1);
69121ae08745Sheppo 				}
69131ae08745Sheppo 
69141ae08745Sheppo 				mcst_p->nextp = NULL;
69151ae08745Sheppo 				mcst_p->addr = addr;
69161ae08745Sheppo 
69171ae08745Sheppo 				mutex_enter(&port->mca_lock);
69181ae08745Sheppo 				mcst_p->nextp = port->mcap;
69191ae08745Sheppo 				port->mcap = mcst_p;
69201ae08745Sheppo 				mutex_exit(&port->mca_lock);
69211ae08745Sheppo 
69221ae08745Sheppo 				/*
69231ae08745Sheppo 				 * Program the address into HW. If the addr
69241ae08745Sheppo 				 * has already been programmed then the MAC
69251ae08745Sheppo 				 * just increments a ref counter (which is
69261ae08745Sheppo 				 * used when the address is being deleted)
69271ae08745Sheppo 				 */
6928e1ebb9ecSlm66018 				ret = mac_multicst_add(vswp->mh,
69291ae08745Sheppo 						(uchar_t *)&mcst_pkt->mca[i]);
6930e1ebb9ecSlm66018 				if (ret) {
6931e1ebb9ecSlm66018 					cmn_err(CE_WARN, "!unable to add "
6932e1ebb9ecSlm66018 						"multicast address");
6933e1ebb9ecSlm66018 					(void) vsw_del_mcst(vswp, VSW_VNETPORT,
6934e1ebb9ecSlm66018 						addr, port);
6935e1ebb9ecSlm66018 					vsw_del_addr(VSW_VNETPORT, port, addr);
6936e1ebb9ecSlm66018 					return (ret);
6937e1ebb9ecSlm66018 				}
69381ae08745Sheppo 
69391ae08745Sheppo 			} else {
69401ae08745Sheppo 				DERR(vswp, "%s: error adding multicast "
69411ae08745Sheppo 					"address 0x%llx for port %ld",
69421ae08745Sheppo 					__func__, addr, port->p_instance);
69431ae08745Sheppo 				return (1);
69441ae08745Sheppo 			}
69451ae08745Sheppo 		} else {
69461ae08745Sheppo 			/*
69471ae08745Sheppo 			 * Delete an entry from the multicast hash
69481ae08745Sheppo 			 * table and update the address list
69491ae08745Sheppo 			 * appropriately.
69501ae08745Sheppo 			 */
69511ae08745Sheppo 			if (vsw_del_mcst(vswp, VSW_VNETPORT, addr, port) == 0) {
69521ae08745Sheppo 				D3(vswp, "%s: deleting multicast address "
69531ae08745Sheppo 					"0x%llx for port %ld", __func__, addr,
69541ae08745Sheppo 					port->p_instance);
69551ae08745Sheppo 
69561ae08745Sheppo 				vsw_del_addr(VSW_VNETPORT, port, addr);
69571ae08745Sheppo 
69581ae08745Sheppo 				/*
69591ae08745Sheppo 				 * Remove the address from HW. The address
69601ae08745Sheppo 				 * will actually only be removed once the ref
69611ae08745Sheppo 				 * count within the MAC layer has dropped to
69621ae08745Sheppo 				 * zero. I.e. we can safely call this fn even
69631ae08745Sheppo 				 * if other ports are interested in this
69641ae08745Sheppo 				 * address.
69651ae08745Sheppo 				 */
69661ae08745Sheppo 				(void) mac_multicst_remove(vswp->mh,
69671ae08745Sheppo 						(uchar_t *)&mcst_pkt->mca[i]);
69681ae08745Sheppo 
69691ae08745Sheppo 			} else {
69701ae08745Sheppo 				DERR(vswp, "%s: error deleting multicast "
69711ae08745Sheppo 					"addr 0x%llx for port %ld",
69721ae08745Sheppo 					__func__, addr, port->p_instance);
69731ae08745Sheppo 				return (1);
69741ae08745Sheppo 			}
69751ae08745Sheppo 		}
69761ae08745Sheppo 	}
69771ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
69781ae08745Sheppo 	return (0);
69791ae08745Sheppo }
69801ae08745Sheppo 
69811ae08745Sheppo /*
69821ae08745Sheppo  * Add a new multicast entry.
69831ae08745Sheppo  *
69841ae08745Sheppo  * Search hash table based on address. If match found then
69851ae08745Sheppo  * update associated val (which is chain of ports), otherwise
69861ae08745Sheppo  * create new key/val (addr/port) pair and insert into table.
69871ae08745Sheppo  */
69881ae08745Sheppo static int
69891ae08745Sheppo vsw_add_mcst(vsw_t *vswp, uint8_t devtype, uint64_t addr, void *arg)
69901ae08745Sheppo {
69911ae08745Sheppo 	int		dup = 0;
69921ae08745Sheppo 	int		rv = 0;
69931ae08745Sheppo 	mfdb_ent_t	*ment = NULL;
69941ae08745Sheppo 	mfdb_ent_t	*tmp_ent = NULL;
69951ae08745Sheppo 	mfdb_ent_t	*new_ent = NULL;
69961ae08745Sheppo 	void		*tgt = NULL;
69971ae08745Sheppo 
69981ae08745Sheppo 	if (devtype == VSW_VNETPORT) {
69991ae08745Sheppo 		/*
70001ae08745Sheppo 		 * Being invoked from a vnet.
70011ae08745Sheppo 		 */
70021ae08745Sheppo 		ASSERT(arg != NULL);
70031ae08745Sheppo 		tgt = arg;
70041ae08745Sheppo 		D2(NULL, "%s: port %d : address 0x%llx", __func__,
70051ae08745Sheppo 			((vsw_port_t *)arg)->p_instance, addr);
70061ae08745Sheppo 	} else {
70071ae08745Sheppo 		/*
70081ae08745Sheppo 		 * We are being invoked via the m_multicst mac entry
70091ae08745Sheppo 		 * point.
70101ae08745Sheppo 		 */
70111ae08745Sheppo 		D2(NULL, "%s: address 0x%llx", __func__, addr);
70121ae08745Sheppo 		tgt = (void *)vswp;
70131ae08745Sheppo 	}
70141ae08745Sheppo 
70151ae08745Sheppo 	WRITE_ENTER(&vswp->mfdbrw);
70161ae08745Sheppo 	if (mod_hash_find(vswp->mfdb, (mod_hash_key_t)addr,
70171ae08745Sheppo 				(mod_hash_val_t *)&ment) != 0) {
70181ae08745Sheppo 
70191ae08745Sheppo 		/* address not currently in table */
70201ae08745Sheppo 		ment = kmem_alloc(sizeof (mfdb_ent_t), KM_SLEEP);
70211ae08745Sheppo 		ment->d_addr = (void *)tgt;
70221ae08745Sheppo 		ment->d_type = devtype;
70231ae08745Sheppo 		ment->nextp = NULL;
70241ae08745Sheppo 
70251ae08745Sheppo 		if (mod_hash_insert(vswp->mfdb, (mod_hash_key_t)addr,
70261ae08745Sheppo 			(mod_hash_val_t)ment) != 0) {
70271ae08745Sheppo 			DERR(vswp, "%s: hash table insertion failed", __func__);
70281ae08745Sheppo 			kmem_free(ment, sizeof (mfdb_ent_t));
70291ae08745Sheppo 			rv = 1;
70301ae08745Sheppo 		} else {
70311ae08745Sheppo 			D2(vswp, "%s: added initial entry for 0x%llx to "
70321ae08745Sheppo 				"table", __func__, addr);
70331ae08745Sheppo 		}
70341ae08745Sheppo 	} else {
70351ae08745Sheppo 		/*
70361ae08745Sheppo 		 * Address in table. Check to see if specified port
70371ae08745Sheppo 		 * is already associated with the address. If not add
70381ae08745Sheppo 		 * it now.
70391ae08745Sheppo 		 */
70401ae08745Sheppo 		tmp_ent = ment;
70411ae08745Sheppo 		while (tmp_ent != NULL) {
70421ae08745Sheppo 			if (tmp_ent->d_addr == (void *)tgt) {
70431ae08745Sheppo 				if (devtype == VSW_VNETPORT) {
70441ae08745Sheppo 					DERR(vswp, "%s: duplicate port entry "
70451ae08745Sheppo 						"found for portid %ld and key "
70461ae08745Sheppo 						"0x%llx", __func__,
70471ae08745Sheppo 						((vsw_port_t *)arg)->p_instance,
70481ae08745Sheppo 						addr);
70491ae08745Sheppo 				} else {
70501ae08745Sheppo 					DERR(vswp, "%s: duplicate entry found"
70511ae08745Sheppo 						"for key 0x%llx",
70521ae08745Sheppo 						__func__, addr);
70531ae08745Sheppo 				}
70541ae08745Sheppo 				rv = 1;
70551ae08745Sheppo 				dup = 1;
70561ae08745Sheppo 				break;
70571ae08745Sheppo 			}
70581ae08745Sheppo 			tmp_ent = tmp_ent->nextp;
70591ae08745Sheppo 		}
70601ae08745Sheppo 
70611ae08745Sheppo 		/*
70621ae08745Sheppo 		 * Port not on list so add it to end now.
70631ae08745Sheppo 		 */
70641ae08745Sheppo 		if (0 == dup) {
70651ae08745Sheppo 			D2(vswp, "%s: added entry for 0x%llx to table",
70661ae08745Sheppo 				__func__, addr);
70671ae08745Sheppo 			new_ent = kmem_alloc(sizeof (mfdb_ent_t), KM_SLEEP);
70681ae08745Sheppo 			new_ent->d_addr = (void *)tgt;
70691ae08745Sheppo 			new_ent->d_type = devtype;
70701ae08745Sheppo 			new_ent->nextp = NULL;
70711ae08745Sheppo 
70721ae08745Sheppo 			tmp_ent = ment;
70731ae08745Sheppo 			while (tmp_ent->nextp != NULL)
70741ae08745Sheppo 				tmp_ent = tmp_ent->nextp;
70751ae08745Sheppo 
70761ae08745Sheppo 			tmp_ent->nextp = new_ent;
70771ae08745Sheppo 		}
70781ae08745Sheppo 	}
70791ae08745Sheppo 
70801ae08745Sheppo 	RW_EXIT(&vswp->mfdbrw);
70811ae08745Sheppo 	return (rv);
70821ae08745Sheppo }
70831ae08745Sheppo 
70841ae08745Sheppo /*
70851ae08745Sheppo  * Remove a multicast entry from the hashtable.
70861ae08745Sheppo  *
70871ae08745Sheppo  * Search hash table based on address. If match found, scan
70881ae08745Sheppo  * list of ports associated with address. If specified port
70891ae08745Sheppo  * found remove it from list.
70901ae08745Sheppo  */
70911ae08745Sheppo static int
70921ae08745Sheppo vsw_del_mcst(vsw_t *vswp, uint8_t devtype, uint64_t addr, void *arg)
70931ae08745Sheppo {
70941ae08745Sheppo 	mfdb_ent_t	*ment = NULL;
70951ae08745Sheppo 	mfdb_ent_t	*curr_p, *prev_p;
70961ae08745Sheppo 	void		*tgt = NULL;
70971ae08745Sheppo 
70981ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
70991ae08745Sheppo 
71001ae08745Sheppo 	if (devtype == VSW_VNETPORT) {
71011ae08745Sheppo 		tgt = (vsw_port_t *)arg;
71021ae08745Sheppo 		D2(vswp, "%s: removing port %d from mFDB for address"
71031ae08745Sheppo 			" 0x%llx", __func__, ((vsw_port_t *)tgt)->p_instance,
71041ae08745Sheppo 			addr);
71051ae08745Sheppo 	} else {
71061ae08745Sheppo 		D2(vswp, "%s: removing entry", __func__);
71071ae08745Sheppo 		tgt = (void *)vswp;
71081ae08745Sheppo 	}
71091ae08745Sheppo 
71101ae08745Sheppo 	WRITE_ENTER(&vswp->mfdbrw);
71111ae08745Sheppo 	if (mod_hash_find(vswp->mfdb, (mod_hash_key_t)addr,
71121ae08745Sheppo 				(mod_hash_val_t *)&ment) != 0) {
71131ae08745Sheppo 		D2(vswp, "%s: address 0x%llx not in table", __func__, addr);
71141ae08745Sheppo 		RW_EXIT(&vswp->mfdbrw);
71151ae08745Sheppo 		return (1);
71161ae08745Sheppo 	}
71171ae08745Sheppo 
71181ae08745Sheppo 	prev_p = curr_p = ment;
71191ae08745Sheppo 
71201ae08745Sheppo 	while (curr_p != NULL) {
71211ae08745Sheppo 		if (curr_p->d_addr == (void *)tgt) {
71221ae08745Sheppo 			if (devtype == VSW_VNETPORT) {
71231ae08745Sheppo 				D2(vswp, "%s: port %d found", __func__,
71241ae08745Sheppo 					((vsw_port_t *)tgt)->p_instance);
71251ae08745Sheppo 			} else {
71261ae08745Sheppo 				D2(vswp, "%s: instance found", __func__);
71271ae08745Sheppo 			}
71281ae08745Sheppo 
71291ae08745Sheppo 			if (prev_p == curr_p) {
71301ae08745Sheppo 				/*
71311ae08745Sheppo 				 * head of list, if no other element is in
71321ae08745Sheppo 				 * list then destroy this entry, otherwise
71331ae08745Sheppo 				 * just replace it with updated value.
71341ae08745Sheppo 				 */
71351ae08745Sheppo 				ment = curr_p->nextp;
71361ae08745Sheppo 				kmem_free(curr_p, sizeof (mfdb_ent_t));
71371ae08745Sheppo 				if (ment == NULL) {
71381ae08745Sheppo 					(void) mod_hash_destroy(vswp->mfdb,
71391ae08745Sheppo 							(mod_hash_val_t)addr);
71401ae08745Sheppo 				} else {
71411ae08745Sheppo 					(void) mod_hash_replace(vswp->mfdb,
71421ae08745Sheppo 							(mod_hash_key_t)addr,
71431ae08745Sheppo 							(mod_hash_val_t)ment);
71441ae08745Sheppo 				}
71451ae08745Sheppo 			} else {
71461ae08745Sheppo 				/*
71471ae08745Sheppo 				 * Not head of list, no need to do
71481ae08745Sheppo 				 * replacement, just adjust list pointers.
71491ae08745Sheppo 				 */
71501ae08745Sheppo 				prev_p->nextp = curr_p->nextp;
71511ae08745Sheppo 				kmem_free(curr_p, sizeof (mfdb_ent_t));
71521ae08745Sheppo 			}
71531ae08745Sheppo 			break;
71541ae08745Sheppo 		}
71551ae08745Sheppo 
71561ae08745Sheppo 		prev_p = curr_p;
71571ae08745Sheppo 		curr_p = curr_p->nextp;
71581ae08745Sheppo 	}
71591ae08745Sheppo 
71601ae08745Sheppo 	RW_EXIT(&vswp->mfdbrw);
71611ae08745Sheppo 
71621ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
71631ae08745Sheppo 
71641ae08745Sheppo 	return (0);
71651ae08745Sheppo }
71661ae08745Sheppo 
71671ae08745Sheppo /*
71681ae08745Sheppo  * Port is being deleted, but has registered an interest in one
71691ae08745Sheppo  * or more multicast groups. Using the list of addresses maintained
71701ae08745Sheppo  * within the port structure find the appropriate entry in the hash
71711ae08745Sheppo  * table and remove this port from the list of interested ports.
71721ae08745Sheppo  */
71731ae08745Sheppo static void
71741ae08745Sheppo vsw_del_mcst_port(vsw_port_t *port)
71751ae08745Sheppo {
71761ae08745Sheppo 	mcst_addr_t	*mcst_p = NULL;
71771ae08745Sheppo 	vsw_t		*vswp = port->p_vswp;
71781ae08745Sheppo 
71791ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
71801ae08745Sheppo 
71811ae08745Sheppo 	mutex_enter(&port->mca_lock);
71821ae08745Sheppo 	while (port->mcap != NULL) {
71831ae08745Sheppo 		(void) vsw_del_mcst(vswp, VSW_VNETPORT,
71841ae08745Sheppo 					port->mcap->addr, port);
71851ae08745Sheppo 
71861ae08745Sheppo 		mcst_p = port->mcap->nextp;
71871ae08745Sheppo 		kmem_free(port->mcap, sizeof (mcst_addr_t));
71881ae08745Sheppo 		port->mcap = mcst_p;
71891ae08745Sheppo 	}
71901ae08745Sheppo 	mutex_exit(&port->mca_lock);
71911ae08745Sheppo 
71921ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
71931ae08745Sheppo }
71941ae08745Sheppo 
71951ae08745Sheppo /*
71961ae08745Sheppo  * This vsw instance is detaching, but has registered an interest in one
71971ae08745Sheppo  * or more multicast groups. Using the list of addresses maintained
71981ae08745Sheppo  * within the vsw structure find the appropriate entry in the hash
71991ae08745Sheppo  * table and remove this instance from the list of interested ports.
72001ae08745Sheppo  */
72011ae08745Sheppo static void
72021ae08745Sheppo vsw_del_mcst_vsw(vsw_t *vswp)
72031ae08745Sheppo {
72041ae08745Sheppo 	mcst_addr_t	*next_p = NULL;
72051ae08745Sheppo 
72061ae08745Sheppo 	D1(vswp, "%s: enter", __func__);
72071ae08745Sheppo 
72081ae08745Sheppo 	mutex_enter(&vswp->mca_lock);
72091ae08745Sheppo 
72101ae08745Sheppo 	while (vswp->mcap != NULL) {
72111ae08745Sheppo 		DERR(vswp, "%s: deleting addr 0x%llx",
72121ae08745Sheppo 			__func__, vswp->mcap->addr);
72131ae08745Sheppo 		(void) vsw_del_mcst(vswp, VSW_LOCALDEV,
72141ae08745Sheppo 				vswp->mcap->addr, NULL);
72151ae08745Sheppo 
72161ae08745Sheppo 		next_p = vswp->mcap->nextp;
72171ae08745Sheppo 		kmem_free(vswp->mcap, sizeof (mcst_addr_t));
72181ae08745Sheppo 		vswp->mcap = next_p;
72191ae08745Sheppo 	}
72201ae08745Sheppo 
72211ae08745Sheppo 	vswp->mcap = NULL;
72221ae08745Sheppo 	mutex_exit(&vswp->mca_lock);
72231ae08745Sheppo 
72241ae08745Sheppo 	D1(vswp, "%s: exit", __func__);
72251ae08745Sheppo }
72261ae08745Sheppo 
72271ae08745Sheppo 
72281ae08745Sheppo /*
72291ae08745Sheppo  * Remove the specified address from the list of address maintained
72301ae08745Sheppo  * in this port node.
72311ae08745Sheppo  */
72321ae08745Sheppo static void
72331ae08745Sheppo vsw_del_addr(uint8_t devtype, void *arg, uint64_t addr)
72341ae08745Sheppo {
72351ae08745Sheppo 	vsw_t		*vswp = NULL;
72361ae08745Sheppo 	vsw_port_t	*port = NULL;
72371ae08745Sheppo 	mcst_addr_t	*prev_p = NULL;
72381ae08745Sheppo 	mcst_addr_t	*curr_p = NULL;
72391ae08745Sheppo 
72401ae08745Sheppo 	D1(NULL, "%s: enter : devtype %d : addr 0x%llx",
72411ae08745Sheppo 		__func__, devtype, addr);
72421ae08745Sheppo 
72431ae08745Sheppo 	if (devtype == VSW_VNETPORT) {
72441ae08745Sheppo 		port = (vsw_port_t *)arg;
72451ae08745Sheppo 		mutex_enter(&port->mca_lock);
72461ae08745Sheppo 		prev_p = curr_p = port->mcap;
72471ae08745Sheppo 	} else {
72481ae08745Sheppo 		vswp = (vsw_t *)arg;
72491ae08745Sheppo 		mutex_enter(&vswp->mca_lock);
72501ae08745Sheppo 		prev_p = curr_p = vswp->mcap;
72511ae08745Sheppo 	}
72521ae08745Sheppo 
72531ae08745Sheppo 	while (curr_p != NULL) {
72541ae08745Sheppo 		if (curr_p->addr == addr) {
72551ae08745Sheppo 			D2(NULL, "%s: address found", __func__);
72561ae08745Sheppo 			/* match found */
72571ae08745Sheppo 			if (prev_p == curr_p) {
72581ae08745Sheppo 				/* list head */
72591ae08745Sheppo 				if (devtype == VSW_VNETPORT)
72601ae08745Sheppo 					port->mcap = curr_p->nextp;
72611ae08745Sheppo 				else
72621ae08745Sheppo 					vswp->mcap = curr_p->nextp;
72631ae08745Sheppo 			} else {
72641ae08745Sheppo 				prev_p->nextp = curr_p->nextp;
72651ae08745Sheppo 			}
72661ae08745Sheppo 			kmem_free(curr_p, sizeof (mcst_addr_t));
72671ae08745Sheppo 			break;
72681ae08745Sheppo 		} else {
72691ae08745Sheppo 			prev_p = curr_p;
72701ae08745Sheppo 			curr_p = curr_p->nextp;
72711ae08745Sheppo 		}
72721ae08745Sheppo 	}
72731ae08745Sheppo 
72741ae08745Sheppo 	if (devtype == VSW_VNETPORT)
72751ae08745Sheppo 		mutex_exit(&port->mca_lock);
72761ae08745Sheppo 	else
72771ae08745Sheppo 		mutex_exit(&vswp->mca_lock);
72781ae08745Sheppo 
72791ae08745Sheppo 	D1(NULL, "%s: exit", __func__);
72801ae08745Sheppo }
72811ae08745Sheppo 
72821ae08745Sheppo /*
72831ae08745Sheppo  * Creates a descriptor ring (dring) and links it into the
72841ae08745Sheppo  * link of outbound drings for this channel.
72851ae08745Sheppo  *
72861ae08745Sheppo  * Returns NULL if creation failed.
72871ae08745Sheppo  */
72881ae08745Sheppo static dring_info_t *
72891ae08745Sheppo vsw_create_dring(vsw_ldc_t *ldcp)
72901ae08745Sheppo {
72911ae08745Sheppo 	vsw_private_desc_t	*priv_addr = NULL;
72921ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
72931ae08745Sheppo 	ldc_mem_info_t		minfo;
72941ae08745Sheppo 	dring_info_t		*dp, *tp;
72951ae08745Sheppo 	int			i;
72961ae08745Sheppo 
72971ae08745Sheppo 	dp = (dring_info_t *)kmem_zalloc(sizeof (dring_info_t), KM_SLEEP);
72981ae08745Sheppo 
72991ae08745Sheppo 	mutex_init(&dp->dlock, NULL, MUTEX_DRIVER, NULL);
73001ae08745Sheppo 
73011ae08745Sheppo 	/* create public section of ring */
73021ae08745Sheppo 	if ((ldc_mem_dring_create(VSW_RING_NUM_EL,
73031ae08745Sheppo 			VSW_PUB_SIZE, &dp->handle)) != 0) {
73041ae08745Sheppo 
73051ae08745Sheppo 		DERR(vswp, "vsw_create_dring(%lld): ldc dring create "
73061ae08745Sheppo 			"failed", ldcp->ldc_id);
73071ae08745Sheppo 		goto create_fail_exit;
73081ae08745Sheppo 	}
73091ae08745Sheppo 
73101ae08745Sheppo 	ASSERT(dp->handle != NULL);
73111ae08745Sheppo 
73121ae08745Sheppo 	/*
73131ae08745Sheppo 	 * Get the base address of the public section of the ring.
73141ae08745Sheppo 	 */
73151ae08745Sheppo 	if ((ldc_mem_dring_info(dp->handle, &minfo)) != 0) {
73161ae08745Sheppo 		DERR(vswp, "vsw_create_dring(%lld): dring info failed\n",
73171ae08745Sheppo 			ldcp->ldc_id);
73181ae08745Sheppo 		goto dring_fail_exit;
73191ae08745Sheppo 	} else {
73201ae08745Sheppo 		ASSERT(minfo.vaddr != 0);
73211ae08745Sheppo 		dp->pub_addr = minfo.vaddr;
73221ae08745Sheppo 	}
73231ae08745Sheppo 
73241ae08745Sheppo 	dp->num_descriptors = VSW_RING_NUM_EL;
73251ae08745Sheppo 	dp->descriptor_size = VSW_PUB_SIZE;
73261ae08745Sheppo 	dp->options = VIO_TX_DRING;
73271ae08745Sheppo 	dp->ncookies = 1;	/* guaranteed by ldc */
73281ae08745Sheppo 
73291ae08745Sheppo 	/*
73301ae08745Sheppo 	 * create private portion of ring
73311ae08745Sheppo 	 */
73321ae08745Sheppo 	dp->priv_addr = (vsw_private_desc_t *)kmem_zalloc(
73331ae08745Sheppo 		(sizeof (vsw_private_desc_t) * VSW_RING_NUM_EL), KM_SLEEP);
73341ae08745Sheppo 
73351ae08745Sheppo 	if (vsw_setup_ring(ldcp, dp)) {
73361ae08745Sheppo 		DERR(vswp, "%s: unable to setup ring", __func__);
73371ae08745Sheppo 		goto dring_fail_exit;
73381ae08745Sheppo 	}
73391ae08745Sheppo 
73401ae08745Sheppo 	/* haven't used any descriptors yet */
73411ae08745Sheppo 	dp->end_idx = 0;
7342d10e4ef2Snarayan 	dp->last_ack_recv = -1;
73431ae08745Sheppo 
73441ae08745Sheppo 	/* bind dring to the channel */
73451ae08745Sheppo 	if ((ldc_mem_dring_bind(ldcp->ldc_handle, dp->handle,
73461ae08745Sheppo 		LDC_SHADOW_MAP, LDC_MEM_RW,
73471ae08745Sheppo 		&dp->cookie[0], &dp->ncookies)) != 0) {
73481ae08745Sheppo 		DERR(vswp, "vsw_create_dring: unable to bind to channel "
73491ae08745Sheppo 			"%lld", ldcp->ldc_id);
73501ae08745Sheppo 		goto dring_fail_exit;
73511ae08745Sheppo 	}
73521ae08745Sheppo 
7353d10e4ef2Snarayan 	mutex_init(&dp->restart_lock, NULL, MUTEX_DRIVER, NULL);
7354d10e4ef2Snarayan 	dp->restart_reqd = B_TRUE;
7355d10e4ef2Snarayan 
73561ae08745Sheppo 	/*
73571ae08745Sheppo 	 * Only ever create rings for outgoing lane. Link it onto
73581ae08745Sheppo 	 * end of list.
73591ae08745Sheppo 	 */
73601ae08745Sheppo 	if (ldcp->lane_out.dringp == NULL) {
73611ae08745Sheppo 		D2(vswp, "vsw_create_dring: adding first outbound ring");
73621ae08745Sheppo 		ldcp->lane_out.dringp = dp;
73631ae08745Sheppo 	} else {
73641ae08745Sheppo 		tp = ldcp->lane_out.dringp;
73651ae08745Sheppo 		while (tp->next != NULL)
73661ae08745Sheppo 			tp = tp->next;
73671ae08745Sheppo 
73681ae08745Sheppo 		tp->next = dp;
73691ae08745Sheppo 	}
73701ae08745Sheppo 
73711ae08745Sheppo 	return (dp);
73721ae08745Sheppo 
73731ae08745Sheppo dring_fail_exit:
73741ae08745Sheppo 	(void) ldc_mem_dring_destroy(dp->handle);
73751ae08745Sheppo 
73761ae08745Sheppo create_fail_exit:
73771ae08745Sheppo 	if (dp->priv_addr != NULL) {
73781ae08745Sheppo 		priv_addr = dp->priv_addr;
73791ae08745Sheppo 		for (i = 0; i < VSW_RING_NUM_EL; i++) {
73801ae08745Sheppo 			if (priv_addr->memhandle != NULL)
73811ae08745Sheppo 				(void) ldc_mem_free_handle(
73821ae08745Sheppo 						priv_addr->memhandle);
73831ae08745Sheppo 			priv_addr++;
73841ae08745Sheppo 		}
73851ae08745Sheppo 		kmem_free(dp->priv_addr,
73861ae08745Sheppo 			(sizeof (vsw_private_desc_t) * VSW_RING_NUM_EL));
73871ae08745Sheppo 	}
73881ae08745Sheppo 	mutex_destroy(&dp->dlock);
73891ae08745Sheppo 
73901ae08745Sheppo 	kmem_free(dp, sizeof (dring_info_t));
73911ae08745Sheppo 	return (NULL);
73921ae08745Sheppo }
73931ae08745Sheppo 
73941ae08745Sheppo /*
73951ae08745Sheppo  * Create a ring consisting of just a private portion and link
73961ae08745Sheppo  * it into the list of rings for the outbound lane.
73971ae08745Sheppo  *
73981ae08745Sheppo  * These type of rings are used primarily for temporary data
73991ae08745Sheppo  * storage (i.e. as data buffers).
74001ae08745Sheppo  */
74011ae08745Sheppo void
74021ae08745Sheppo vsw_create_privring(vsw_ldc_t *ldcp)
74031ae08745Sheppo {
74041ae08745Sheppo 	dring_info_t		*dp, *tp;
74051ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
74061ae08745Sheppo 
74071ae08745Sheppo 	D1(vswp, "%s(%lld): enter", __func__, ldcp->ldc_id);
74081ae08745Sheppo 
74091ae08745Sheppo 	dp = kmem_zalloc(sizeof (dring_info_t), KM_SLEEP);
74101ae08745Sheppo 
74111ae08745Sheppo 	mutex_init(&dp->dlock, NULL, MUTEX_DRIVER, NULL);
74121ae08745Sheppo 
74131ae08745Sheppo 	/* no public section */
74141ae08745Sheppo 	dp->pub_addr = NULL;
74151ae08745Sheppo 
74161ae08745Sheppo 	dp->priv_addr = kmem_zalloc((sizeof (vsw_private_desc_t) *
74171ae08745Sheppo 					VSW_RING_NUM_EL), KM_SLEEP);
74181ae08745Sheppo 
74194bac2208Snarayan 	dp->num_descriptors = VSW_RING_NUM_EL;
74204bac2208Snarayan 
74211ae08745Sheppo 	if (vsw_setup_ring(ldcp, dp)) {
74221ae08745Sheppo 		DERR(vswp, "%s: setup of ring failed", __func__);
74231ae08745Sheppo 		kmem_free(dp->priv_addr,
74241ae08745Sheppo 			(sizeof (vsw_private_desc_t) * VSW_RING_NUM_EL));
74251ae08745Sheppo 		mutex_destroy(&dp->dlock);
74261ae08745Sheppo 		kmem_free(dp, sizeof (dring_info_t));
74271ae08745Sheppo 		return;
74281ae08745Sheppo 	}
74291ae08745Sheppo 
74301ae08745Sheppo 	/* haven't used any descriptors yet */
74311ae08745Sheppo 	dp->end_idx = 0;
74321ae08745Sheppo 
7433d10e4ef2Snarayan 	mutex_init(&dp->restart_lock, NULL, MUTEX_DRIVER, NULL);
7434d10e4ef2Snarayan 	dp->restart_reqd = B_TRUE;
7435d10e4ef2Snarayan 
74361ae08745Sheppo 	/*
74371ae08745Sheppo 	 * Only ever create rings for outgoing lane. Link it onto
74381ae08745Sheppo 	 * end of list.
74391ae08745Sheppo 	 */
74401ae08745Sheppo 	if (ldcp->lane_out.dringp == NULL) {
74411ae08745Sheppo 		D2(vswp, "%s: adding first outbound privring", __func__);
74421ae08745Sheppo 		ldcp->lane_out.dringp = dp;
74431ae08745Sheppo 	} else {
74441ae08745Sheppo 		tp = ldcp->lane_out.dringp;
74451ae08745Sheppo 		while (tp->next != NULL)
74461ae08745Sheppo 			tp = tp->next;
74471ae08745Sheppo 
74481ae08745Sheppo 		tp->next = dp;
74491ae08745Sheppo 	}
74501ae08745Sheppo 
74511ae08745Sheppo 	D1(vswp, "%s(%lld): exit", __func__, ldcp->ldc_id);
74521ae08745Sheppo }
74531ae08745Sheppo 
74541ae08745Sheppo /*
74551ae08745Sheppo  * Setup the descriptors in the dring. Returns 0 on success, 1 on
74561ae08745Sheppo  * failure.
74571ae08745Sheppo  */
74581ae08745Sheppo int
74591ae08745Sheppo vsw_setup_ring(vsw_ldc_t *ldcp, dring_info_t *dp)
74601ae08745Sheppo {
74611ae08745Sheppo 	vnet_public_desc_t	*pub_addr = NULL;
74621ae08745Sheppo 	vsw_private_desc_t	*priv_addr = NULL;
74631ae08745Sheppo 	vsw_t			*vswp = ldcp->ldc_vswp;
74641ae08745Sheppo 	uint64_t		*tmpp;
74651ae08745Sheppo 	uint64_t		offset = 0;
74661ae08745Sheppo 	uint32_t		ncookies = 0;
74671ae08745Sheppo 	static char		*name = "vsw_setup_ring";
7468d10e4ef2Snarayan 	int			i, j, nc, rv;
74691ae08745Sheppo 
74701ae08745Sheppo 	priv_addr = dp->priv_addr;
74711ae08745Sheppo 	pub_addr = dp->pub_addr;
74721ae08745Sheppo 
7473d10e4ef2Snarayan 	/* public section may be null but private should never be */
7474d10e4ef2Snarayan 	ASSERT(priv_addr != NULL);
7475d10e4ef2Snarayan 
74761ae08745Sheppo 	/*
74771ae08745Sheppo 	 * Allocate the region of memory which will be used to hold
74781ae08745Sheppo 	 * the data the descriptors will refer to.
74791ae08745Sheppo 	 */
74801ae08745Sheppo 	dp->data_sz = (VSW_RING_NUM_EL * VSW_RING_EL_DATA_SZ);
74811ae08745Sheppo 	dp->data_addr = kmem_alloc(dp->data_sz, KM_SLEEP);
74821ae08745Sheppo 
74831ae08745Sheppo 	D2(vswp, "%s: allocated %lld bytes at 0x%llx\n", name,
74841ae08745Sheppo 		dp->data_sz, dp->data_addr);
74851ae08745Sheppo 
74861ae08745Sheppo 	tmpp = (uint64_t *)dp->data_addr;
74871ae08745Sheppo 	offset = VSW_RING_EL_DATA_SZ / sizeof (tmpp);
74881ae08745Sheppo 
74891ae08745Sheppo 	/*
74901ae08745Sheppo 	 * Initialise some of the private and public (if they exist)
74911ae08745Sheppo 	 * descriptor fields.
74921ae08745Sheppo 	 */
74931ae08745Sheppo 	for (i = 0; i < VSW_RING_NUM_EL; i++) {
7494d10e4ef2Snarayan 		mutex_init(&priv_addr->dstate_lock, NULL, MUTEX_DRIVER, NULL);
7495d10e4ef2Snarayan 
74961ae08745Sheppo 		if ((ldc_mem_alloc_handle(ldcp->ldc_handle,
74971ae08745Sheppo 			&priv_addr->memhandle)) != 0) {
74981ae08745Sheppo 			DERR(vswp, "%s: alloc mem handle failed", name);
74991ae08745Sheppo 			goto setup_ring_cleanup;
75001ae08745Sheppo 		}
75011ae08745Sheppo 
75021ae08745Sheppo 		priv_addr->datap = (void *)tmpp;
75031ae08745Sheppo 
75041ae08745Sheppo 		rv = ldc_mem_bind_handle(priv_addr->memhandle,
75051ae08745Sheppo 			(caddr_t)priv_addr->datap, VSW_RING_EL_DATA_SZ,
75061ae08745Sheppo 			LDC_SHADOW_MAP, LDC_MEM_R|LDC_MEM_W,
75071ae08745Sheppo 			&(priv_addr->memcookie[0]), &ncookies);
75081ae08745Sheppo 		if (rv != 0) {
75091ae08745Sheppo 			DERR(vswp, "%s(%lld): ldc_mem_bind_handle failed "
75101ae08745Sheppo 				"(rv %d)", name, ldcp->ldc_id, rv);
75111ae08745Sheppo 			goto setup_ring_cleanup;
75121ae08745Sheppo 		}
75131ae08745Sheppo 		priv_addr->bound = 1;
75141ae08745Sheppo 
75151ae08745Sheppo 		D2(vswp, "%s: %d: memcookie 0 : addr 0x%llx : size 0x%llx",
75161ae08745Sheppo 			name, i, priv_addr->memcookie[0].addr,
75171ae08745Sheppo 			priv_addr->memcookie[0].size);
75181ae08745Sheppo 
75191ae08745Sheppo 		if (ncookies >= (uint32_t)(VSW_MAX_COOKIES + 1)) {
75201ae08745Sheppo 			DERR(vswp, "%s(%lld) ldc_mem_bind_handle returned "
75211ae08745Sheppo 				"invalid num of cookies (%d) for size 0x%llx",
75221ae08745Sheppo 				name, ldcp->ldc_id, ncookies,
75231ae08745Sheppo 				VSW_RING_EL_DATA_SZ);
75241ae08745Sheppo 
75251ae08745Sheppo 			goto setup_ring_cleanup;
75261ae08745Sheppo 		} else {
75271ae08745Sheppo 			for (j = 1; j < ncookies; j++) {
75281ae08745Sheppo 				rv = ldc_mem_nextcookie(priv_addr->memhandle,
75291ae08745Sheppo 					&(priv_addr->memcookie[j]));
75301ae08745Sheppo 				if (rv != 0) {
75311ae08745Sheppo 					DERR(vswp, "%s: ldc_mem_nextcookie "
75321ae08745Sheppo 						"failed rv (%d)", name, rv);
75331ae08745Sheppo 					goto setup_ring_cleanup;
75341ae08745Sheppo 				}
75351ae08745Sheppo 				D3(vswp, "%s: memcookie %d : addr 0x%llx : "
75361ae08745Sheppo 					"size 0x%llx", name, j,
75371ae08745Sheppo 					priv_addr->memcookie[j].addr,
75381ae08745Sheppo 					priv_addr->memcookie[j].size);
75391ae08745Sheppo 			}
75401ae08745Sheppo 
75411ae08745Sheppo 		}
75421ae08745Sheppo 		priv_addr->ncookies = ncookies;
75431ae08745Sheppo 		priv_addr->dstate = VIO_DESC_FREE;
75441ae08745Sheppo 
75451ae08745Sheppo 		if (pub_addr != NULL) {
75461ae08745Sheppo 
75471ae08745Sheppo 			/* link pub and private sides */
75481ae08745Sheppo 			priv_addr->descp = pub_addr;
75491ae08745Sheppo 
7550d10e4ef2Snarayan 			pub_addr->ncookies = priv_addr->ncookies;
7551d10e4ef2Snarayan 
7552d10e4ef2Snarayan 			for (nc = 0; nc < pub_addr->ncookies; nc++) {
7553d10e4ef2Snarayan 				bcopy(&priv_addr->memcookie[nc],
7554d10e4ef2Snarayan 					&pub_addr->memcookie[nc],
7555d10e4ef2Snarayan 					sizeof (ldc_mem_cookie_t));
7556d10e4ef2Snarayan 			}
7557d10e4ef2Snarayan 
75581ae08745Sheppo 			pub_addr->hdr.dstate = VIO_DESC_FREE;
75591ae08745Sheppo 			pub_addr++;
75601ae08745Sheppo 		}
75611ae08745Sheppo 
75621ae08745Sheppo 		/*
75631ae08745Sheppo 		 * move to next element in the dring and the next
75641ae08745Sheppo 		 * position in the data buffer.
75651ae08745Sheppo 		 */
75661ae08745Sheppo 		priv_addr++;
75671ae08745Sheppo 		tmpp += offset;
75681ae08745Sheppo 	}
75691ae08745Sheppo 
75701ae08745Sheppo 	return (0);
75711ae08745Sheppo 
75721ae08745Sheppo setup_ring_cleanup:
75731ae08745Sheppo 	priv_addr = dp->priv_addr;
75741ae08745Sheppo 
7575d10e4ef2Snarayan 	for (j = 0; j < i; j++) {
75761ae08745Sheppo 		(void) ldc_mem_unbind_handle(priv_addr->memhandle);
75771ae08745Sheppo 		(void) ldc_mem_free_handle(priv_addr->memhandle);
75781ae08745Sheppo 
7579d10e4ef2Snarayan 		mutex_destroy(&priv_addr->dstate_lock);
7580d10e4ef2Snarayan 
75811ae08745Sheppo 		priv_addr++;
75821ae08745Sheppo 	}
75831ae08745Sheppo 	kmem_free(dp->data_addr, dp->data_sz);
75841ae08745Sheppo 
75851ae08745Sheppo 	return (1);
75861ae08745Sheppo }
75871ae08745Sheppo 
75881ae08745Sheppo /*
75891ae08745Sheppo  * Searches the private section of a ring for a free descriptor,
75901ae08745Sheppo  * starting at the location of the last free descriptor found
75911ae08745Sheppo  * previously.
75921ae08745Sheppo  *
7593d10e4ef2Snarayan  * Returns 0 if free descriptor is available, and updates state
7594d10e4ef2Snarayan  * of private descriptor to VIO_DESC_READY,  otherwise returns 1.
75951ae08745Sheppo  *
75961ae08745Sheppo  * FUTURE: might need to return contiguous range of descriptors
75971ae08745Sheppo  * as dring info msg assumes all will be contiguous.
75981ae08745Sheppo  */
75991ae08745Sheppo static int
76001ae08745Sheppo vsw_dring_find_free_desc(dring_info_t *dringp,
76011ae08745Sheppo 		vsw_private_desc_t **priv_p, int *idx)
76021ae08745Sheppo {
7603d10e4ef2Snarayan 	vsw_private_desc_t	*addr = NULL;
76041ae08745Sheppo 	int			num = VSW_RING_NUM_EL;
76051ae08745Sheppo 	int			ret = 1;
76061ae08745Sheppo 
76071ae08745Sheppo 	D1(NULL, "%s enter\n", __func__);
76081ae08745Sheppo 
7609d10e4ef2Snarayan 	ASSERT(dringp->priv_addr != NULL);
76101ae08745Sheppo 
76111ae08745Sheppo 	D2(NULL, "%s: searching ring, dringp 0x%llx : start pos %lld",
7612d10e4ef2Snarayan 			__func__, dringp, dringp->end_idx);
76131ae08745Sheppo 
7614d10e4ef2Snarayan 	addr = (vsw_private_desc_t *)dringp->priv_addr + dringp->end_idx;
7615d10e4ef2Snarayan 
7616d10e4ef2Snarayan 	mutex_enter(&addr->dstate_lock);
76171ae08745Sheppo 	if (addr->dstate == VIO_DESC_FREE) {
7618d10e4ef2Snarayan 		addr->dstate = VIO_DESC_READY;
76191ae08745Sheppo 		*priv_p = addr;
7620d10e4ef2Snarayan 		*idx = dringp->end_idx;
7621d10e4ef2Snarayan 		dringp->end_idx = (dringp->end_idx + 1) % num;
76221ae08745Sheppo 		ret = 0;
7623d10e4ef2Snarayan 
76241ae08745Sheppo 	}
7625d10e4ef2Snarayan 	mutex_exit(&addr->dstate_lock);
76261ae08745Sheppo 
76271ae08745Sheppo 	/* ring full */
76281ae08745Sheppo 	if (ret == 1) {
7629d10e4ef2Snarayan 		D2(NULL, "%s: no desp free: started at %d", __func__,
7630d10e4ef2Snarayan 			dringp->end_idx);
76311ae08745Sheppo 	}
76321ae08745Sheppo 
76331ae08745Sheppo 	D1(NULL, "%s: exit\n", __func__);
76341ae08745Sheppo 
76351ae08745Sheppo 	return (ret);
76361ae08745Sheppo }
76371ae08745Sheppo 
76381ae08745Sheppo /*
76391ae08745Sheppo  * Map from a dring identifier to the ring itself. Returns
76401ae08745Sheppo  * pointer to ring or NULL if no match found.
76411ae08745Sheppo  */
76421ae08745Sheppo static dring_info_t *
76431ae08745Sheppo vsw_ident2dring(lane_t *lane, uint64_t ident)
76441ae08745Sheppo {
76451ae08745Sheppo 	dring_info_t	*dp = NULL;
76461ae08745Sheppo 
76471ae08745Sheppo 	if ((dp = lane->dringp) == NULL) {
76481ae08745Sheppo 		return (NULL);
76491ae08745Sheppo 	} else {
76501ae08745Sheppo 		if (dp->ident == ident)
76511ae08745Sheppo 			return (dp);
76521ae08745Sheppo 
76531ae08745Sheppo 		while (dp != NULL) {
76541ae08745Sheppo 			if (dp->ident == ident)
76551ae08745Sheppo 				break;
76561ae08745Sheppo 			dp = dp->next;
76571ae08745Sheppo 		}
76581ae08745Sheppo 	}
76591ae08745Sheppo 
76601ae08745Sheppo 	return (dp);
76611ae08745Sheppo }
76621ae08745Sheppo 
76631ae08745Sheppo /*
76641ae08745Sheppo  * Set the default lane attributes. These are copied into
76651ae08745Sheppo  * the attr msg we send to our peer. If they are not acceptable
76661ae08745Sheppo  * then (currently) the handshake ends.
76671ae08745Sheppo  */
76681ae08745Sheppo static void
76691ae08745Sheppo vsw_set_lane_attr(vsw_t *vswp, lane_t *lp)
76701ae08745Sheppo {
76711ae08745Sheppo 	bzero(lp, sizeof (lane_t));
76721ae08745Sheppo 
76731ae08745Sheppo 	READ_ENTER(&vswp->if_lockrw);
76741ae08745Sheppo 	ether_copy(&(vswp->if_addr), &(lp->addr));
76751ae08745Sheppo 	RW_EXIT(&vswp->if_lockrw);
76761ae08745Sheppo 
76771ae08745Sheppo 	lp->mtu = VSW_MTU;
76781ae08745Sheppo 	lp->addr_type = ADDR_TYPE_MAC;
76791ae08745Sheppo 	lp->xfer_mode = VIO_DRING_MODE;
76801ae08745Sheppo 	lp->ack_freq = 0;	/* for shared mode */
7681d10e4ef2Snarayan 
7682d10e4ef2Snarayan 	mutex_enter(&lp->seq_lock);
76831ae08745Sheppo 	lp->seq_num = VNET_ISS;
7684d10e4ef2Snarayan 	mutex_exit(&lp->seq_lock);
76851ae08745Sheppo }
76861ae08745Sheppo 
76871ae08745Sheppo /*
76881ae08745Sheppo  * Verify that the attributes are acceptable.
76891ae08745Sheppo  *
76901ae08745Sheppo  * FUTURE: If some attributes are not acceptable, change them
76911ae08745Sheppo  * our desired values.
76921ae08745Sheppo  */
76931ae08745Sheppo static int
76941ae08745Sheppo vsw_check_attr(vnet_attr_msg_t *pkt, vsw_port_t *port)
76951ae08745Sheppo {
76961ae08745Sheppo 	int	ret = 0;
76971ae08745Sheppo 
76981ae08745Sheppo 	D1(NULL, "vsw_check_attr enter\n");
76991ae08745Sheppo 
77001ae08745Sheppo 	/*
77011ae08745Sheppo 	 * Note we currently only support in-band descriptors
77021ae08745Sheppo 	 * and descriptor rings, not packet based transfer (VIO_PKT_MODE)
77031ae08745Sheppo 	 */
77041ae08745Sheppo 	if ((pkt->xfer_mode != VIO_DESC_MODE) &&
77051ae08745Sheppo 			(pkt->xfer_mode != VIO_DRING_MODE)) {
77061ae08745Sheppo 		D2(NULL, "vsw_check_attr: unknown mode %x\n",
77071ae08745Sheppo 			pkt->xfer_mode);
77081ae08745Sheppo 		ret = 1;
77091ae08745Sheppo 	}
77101ae08745Sheppo 
77111ae08745Sheppo 	/* Only support MAC addresses at moment. */
77121ae08745Sheppo 	if ((pkt->addr_type != ADDR_TYPE_MAC) || (pkt->addr == 0)) {
77131ae08745Sheppo 		D2(NULL, "vsw_check_attr: invalid addr_type %x, "
77141ae08745Sheppo 			"or address 0x%llx\n", pkt->addr_type,
77151ae08745Sheppo 			pkt->addr);
77161ae08745Sheppo 		ret = 1;
77171ae08745Sheppo 	}
77181ae08745Sheppo 
77191ae08745Sheppo 	/*
77201ae08745Sheppo 	 * MAC address supplied by device should match that stored
77211ae08745Sheppo 	 * in the vsw-port OBP node. Need to decide what to do if they
77221ae08745Sheppo 	 * don't match, for the moment just warn but don't fail.
77231ae08745Sheppo 	 */
77241ae08745Sheppo 	if (bcmp(&pkt->addr, &port->p_macaddr, ETHERADDRL) != 0) {
77251ae08745Sheppo 		DERR(NULL, "vsw_check_attr: device supplied address "
77261ae08745Sheppo 			"0x%llx doesn't match node address 0x%llx\n",
77271ae08745Sheppo 			pkt->addr, port->p_macaddr);
77281ae08745Sheppo 	}
77291ae08745Sheppo 
77301ae08745Sheppo 	/*
77311ae08745Sheppo 	 * Ack freq only makes sense in pkt mode, in shared
77321ae08745Sheppo 	 * mode the ring descriptors say whether or not to
77331ae08745Sheppo 	 * send back an ACK.
77341ae08745Sheppo 	 */
77351ae08745Sheppo 	if ((pkt->xfer_mode == VIO_DRING_MODE) &&
77361ae08745Sheppo 				(pkt->ack_freq > 0)) {
77371ae08745Sheppo 		D2(NULL, "vsw_check_attr: non zero ack freq "
77381ae08745Sheppo 			" in SHM mode\n");
77391ae08745Sheppo 		ret = 1;
77401ae08745Sheppo 	}
77411ae08745Sheppo 
77421ae08745Sheppo 	/*
77431ae08745Sheppo 	 * Note: for the moment we only support ETHER
77441ae08745Sheppo 	 * frames. This may change in the future.
77451ae08745Sheppo 	 */
77461ae08745Sheppo 	if ((pkt->mtu > VSW_MTU) || (pkt->mtu <= 0)) {
77471ae08745Sheppo 		D2(NULL, "vsw_check_attr: invalid MTU (0x%llx)\n",
77481ae08745Sheppo 			pkt->mtu);
77491ae08745Sheppo 		ret = 1;
77501ae08745Sheppo 	}
77511ae08745Sheppo 
77521ae08745Sheppo 	D1(NULL, "vsw_check_attr exit\n");
77531ae08745Sheppo 
77541ae08745Sheppo 	return (ret);
77551ae08745Sheppo }
77561ae08745Sheppo 
77571ae08745Sheppo /*
77581ae08745Sheppo  * Returns 1 if there is a problem, 0 otherwise.
77591ae08745Sheppo  */
77601ae08745Sheppo static int
77611ae08745Sheppo vsw_check_dring_info(vio_dring_reg_msg_t *pkt)
77621ae08745Sheppo {
77631ae08745Sheppo 	_NOTE(ARGUNUSED(pkt))
77641ae08745Sheppo 
77651ae08745Sheppo 	int	ret = 0;
77661ae08745Sheppo 
77671ae08745Sheppo 	D1(NULL, "vsw_check_dring_info enter\n");
77681ae08745Sheppo 
77691ae08745Sheppo 	if ((pkt->num_descriptors == 0) ||
77701ae08745Sheppo 		(pkt->descriptor_size == 0) ||
77711ae08745Sheppo 		(pkt->ncookies != 1)) {
77721ae08745Sheppo 		DERR(NULL, "vsw_check_dring_info: invalid dring msg");
77731ae08745Sheppo 		ret = 1;
77741ae08745Sheppo 	}
77751ae08745Sheppo 
77761ae08745Sheppo 	D1(NULL, "vsw_check_dring_info exit\n");
77771ae08745Sheppo 
77781ae08745Sheppo 	return (ret);
77791ae08745Sheppo }
77801ae08745Sheppo 
77811ae08745Sheppo /*
77821ae08745Sheppo  * Returns 1 if two memory cookies match. Otherwise returns 0.
77831ae08745Sheppo  */
77841ae08745Sheppo static int
77851ae08745Sheppo vsw_mem_cookie_match(ldc_mem_cookie_t *m1, ldc_mem_cookie_t *m2)
77861ae08745Sheppo {
77871ae08745Sheppo 	if ((m1->addr != m2->addr) ||
77881ae08745Sheppo 		(m2->size != m2->size)) {
77891ae08745Sheppo 		return (0);
77901ae08745Sheppo 	} else {
77911ae08745Sheppo 		return (1);
77921ae08745Sheppo 	}
77931ae08745Sheppo }
77941ae08745Sheppo 
77951ae08745Sheppo /*
77961ae08745Sheppo  * Returns 1 if ring described in reg message matches that
77971ae08745Sheppo  * described by dring_info structure. Otherwise returns 0.
77981ae08745Sheppo  */
77991ae08745Sheppo static int
78001ae08745Sheppo vsw_dring_match(dring_info_t *dp, vio_dring_reg_msg_t *msg)
78011ae08745Sheppo {
78021ae08745Sheppo 	if ((msg->descriptor_size != dp->descriptor_size) ||
78031ae08745Sheppo 		(msg->num_descriptors != dp->num_descriptors) ||
78041ae08745Sheppo 		(msg->ncookies != dp->ncookies) ||
78051ae08745Sheppo 		!(vsw_mem_cookie_match(&msg->cookie[0], &dp->cookie[0]))) {
78061ae08745Sheppo 		return (0);
78071ae08745Sheppo 	} else {
78081ae08745Sheppo 		return (1);
78091ae08745Sheppo 	}
78101ae08745Sheppo 
78111ae08745Sheppo }
78121ae08745Sheppo 
78131ae08745Sheppo static caddr_t
78141ae08745Sheppo vsw_print_ethaddr(uint8_t *a, char *ebuf)
78151ae08745Sheppo {
78161ae08745Sheppo 	(void) sprintf(ebuf, "%x:%x:%x:%x:%x:%x",
78171ae08745Sheppo 	    a[0], a[1], a[2], a[3], a[4], a[5]);
78181ae08745Sheppo 	return (ebuf);
78191ae08745Sheppo }
78201ae08745Sheppo 
78211ae08745Sheppo /*
78221ae08745Sheppo  * Reset and free all the resources associated with
78231ae08745Sheppo  * the channel.
78241ae08745Sheppo  */
78251ae08745Sheppo static void
78261ae08745Sheppo vsw_free_lane_resources(vsw_ldc_t *ldcp, uint64_t dir)
78271ae08745Sheppo {
78281ae08745Sheppo 	dring_info_t		*dp, *dpp;
78291ae08745Sheppo 	lane_t			*lp = NULL;
78301ae08745Sheppo 	int			rv = 0;
78311ae08745Sheppo 
78321ae08745Sheppo 	ASSERT(ldcp != NULL);
78331ae08745Sheppo 
78341ae08745Sheppo 	D1(ldcp->ldc_vswp, "%s (%lld): enter", __func__, ldcp->ldc_id);
78351ae08745Sheppo 
78361ae08745Sheppo 	if (dir == INBOUND) {
78371ae08745Sheppo 		D2(ldcp->ldc_vswp, "%s: freeing INBOUND lane"
78381ae08745Sheppo 			" of channel %lld", __func__, ldcp->ldc_id);
78391ae08745Sheppo 		lp = &ldcp->lane_in;
78401ae08745Sheppo 	} else {
78411ae08745Sheppo 		D2(ldcp->ldc_vswp, "%s: freeing OUTBOUND lane"
78421ae08745Sheppo 			" of channel %lld", __func__, ldcp->ldc_id);
78431ae08745Sheppo 		lp = &ldcp->lane_out;
78441ae08745Sheppo 	}
78451ae08745Sheppo 
78461ae08745Sheppo 	lp->lstate = VSW_LANE_INACTIV;
7847d10e4ef2Snarayan 	mutex_enter(&lp->seq_lock);
78481ae08745Sheppo 	lp->seq_num = VNET_ISS;
7849d10e4ef2Snarayan 	mutex_exit(&lp->seq_lock);
78501ae08745Sheppo 	if (lp->dringp) {
78511ae08745Sheppo 		if (dir == INBOUND) {
78521ae08745Sheppo 			dp = lp->dringp;
78531ae08745Sheppo 			while (dp != NULL) {
78541ae08745Sheppo 				dpp = dp->next;
78551ae08745Sheppo 				if (dp->handle != NULL)
78561ae08745Sheppo 					(void) ldc_mem_dring_unmap(dp->handle);
78571ae08745Sheppo 				kmem_free(dp, sizeof (dring_info_t));
78581ae08745Sheppo 				dp = dpp;
78591ae08745Sheppo 			}
78601ae08745Sheppo 		} else {
78611ae08745Sheppo 			/*
78621ae08745Sheppo 			 * unbind, destroy exported dring, free dring struct
78631ae08745Sheppo 			 */
78641ae08745Sheppo 			dp = lp->dringp;
78651ae08745Sheppo 			rv = vsw_free_ring(dp);
78661ae08745Sheppo 		}
78671ae08745Sheppo 		if (rv == 0) {
78681ae08745Sheppo 			lp->dringp = NULL;
78691ae08745Sheppo 		}
78701ae08745Sheppo 	}
78711ae08745Sheppo 
78721ae08745Sheppo 	D1(ldcp->ldc_vswp, "%s (%lld): exit", __func__, ldcp->ldc_id);
78731ae08745Sheppo }
78741ae08745Sheppo 
78751ae08745Sheppo /*
78761ae08745Sheppo  * Free ring and all associated resources.
78771ae08745Sheppo  */
78781ae08745Sheppo static int
78791ae08745Sheppo vsw_free_ring(dring_info_t *dp)
78801ae08745Sheppo {
78811ae08745Sheppo 	vsw_private_desc_t	*paddr = NULL;
78821ae08745Sheppo 	dring_info_t		*dpp;
78831ae08745Sheppo 	int			i, rv = 1;
78841ae08745Sheppo 
78851ae08745Sheppo 	while (dp != NULL) {
78861ae08745Sheppo 		mutex_enter(&dp->dlock);
78871ae08745Sheppo 		dpp = dp->next;
78881ae08745Sheppo 		if (dp->priv_addr != NULL) {
78891ae08745Sheppo 			/*
78901ae08745Sheppo 			 * First unbind and free the memory handles
78911ae08745Sheppo 			 * stored in each descriptor within the ring.
78921ae08745Sheppo 			 */
78931ae08745Sheppo 			for (i = 0; i < VSW_RING_NUM_EL; i++) {
78941ae08745Sheppo 				paddr = (vsw_private_desc_t *)
78951ae08745Sheppo 						dp->priv_addr + i;
78961ae08745Sheppo 				if (paddr->memhandle != NULL) {
78971ae08745Sheppo 					if (paddr->bound == 1) {
78981ae08745Sheppo 						rv = ldc_mem_unbind_handle(
78991ae08745Sheppo 							paddr->memhandle);
79001ae08745Sheppo 
79011ae08745Sheppo 						if (rv != 0) {
79021ae08745Sheppo 							DERR(NULL, "error "
79031ae08745Sheppo 							"unbinding handle for "
79041ae08745Sheppo 							"ring 0x%llx at pos %d",
79051ae08745Sheppo 							dp, i);
79061ae08745Sheppo 							mutex_exit(&dp->dlock);
79071ae08745Sheppo 							return (rv);
79081ae08745Sheppo 						}
79091ae08745Sheppo 						paddr->bound = 0;
79101ae08745Sheppo 					}
79111ae08745Sheppo 
79121ae08745Sheppo 					rv = ldc_mem_free_handle(
79131ae08745Sheppo 							paddr->memhandle);
79141ae08745Sheppo 					if (rv != 0) {
79151ae08745Sheppo 						DERR(NULL, "error freeing "
79161ae08745Sheppo 							"handle for ring "
79171ae08745Sheppo 							"0x%llx at pos %d",
79181ae08745Sheppo 							dp, i);
79191ae08745Sheppo 						mutex_exit(&dp->dlock);
79201ae08745Sheppo 						return (rv);
79211ae08745Sheppo 					}
79221ae08745Sheppo 					paddr->memhandle = NULL;
79231ae08745Sheppo 				}
7924d10e4ef2Snarayan 				mutex_destroy(&paddr->dstate_lock);
79251ae08745Sheppo 			}
79261ae08745Sheppo 			kmem_free(dp->priv_addr, (sizeof (vsw_private_desc_t)
79271ae08745Sheppo 					* VSW_RING_NUM_EL));
79281ae08745Sheppo 		}
79291ae08745Sheppo 
79301ae08745Sheppo 		/*
79311ae08745Sheppo 		 * Now unbind and destroy the ring itself.
79321ae08745Sheppo 		 */
79331ae08745Sheppo 		if (dp->handle != NULL) {
79341ae08745Sheppo 			(void) ldc_mem_dring_unbind(dp->handle);
79351ae08745Sheppo 			(void) ldc_mem_dring_destroy(dp->handle);
79361ae08745Sheppo 		}
79371ae08745Sheppo 
79381ae08745Sheppo 		if (dp->data_addr != NULL) {
79391ae08745Sheppo 			kmem_free(dp->data_addr, dp->data_sz);
79401ae08745Sheppo 		}
79411ae08745Sheppo 
79421ae08745Sheppo 		mutex_exit(&dp->dlock);
79431ae08745Sheppo 		mutex_destroy(&dp->dlock);
7944d10e4ef2Snarayan 		mutex_destroy(&dp->restart_lock);
79451ae08745Sheppo 		kmem_free(dp, sizeof (dring_info_t));
79461ae08745Sheppo 
79471ae08745Sheppo 		dp = dpp;
79481ae08745Sheppo 	}
79491ae08745Sheppo 	return (0);
79501ae08745Sheppo }
79511ae08745Sheppo 
79521ae08745Sheppo /*
79531ae08745Sheppo  * Debugging routines
79541ae08745Sheppo  */
79551ae08745Sheppo static void
79561ae08745Sheppo display_state(void)
79571ae08745Sheppo {
79581ae08745Sheppo 	vsw_t		*vswp;
79591ae08745Sheppo 	vsw_port_list_t	*plist;
79601ae08745Sheppo 	vsw_port_t 	*port;
79611ae08745Sheppo 	vsw_ldc_list_t	*ldcl;
79621ae08745Sheppo 	vsw_ldc_t 	*ldcp;
79631ae08745Sheppo 
79641ae08745Sheppo 	cmn_err(CE_NOTE, "***** system state *****");
79651ae08745Sheppo 
79661ae08745Sheppo 	for (vswp = vsw_head; vswp; vswp = vswp->next) {
79671ae08745Sheppo 		plist = &vswp->plist;
79681ae08745Sheppo 		READ_ENTER(&plist->lockrw);
79691ae08745Sheppo 		cmn_err(CE_CONT, "vsw instance %d has %d ports attached\n",
79701ae08745Sheppo 			vswp->instance, plist->num_ports);
79711ae08745Sheppo 
79721ae08745Sheppo 		for (port = plist->head; port != NULL; port = port->p_next) {
79731ae08745Sheppo 			ldcl = &port->p_ldclist;
79741ae08745Sheppo 			cmn_err(CE_CONT, "port %d : %d ldcs attached\n",
79751ae08745Sheppo 				port->p_instance, ldcl->num_ldcs);
79761ae08745Sheppo 			READ_ENTER(&ldcl->lockrw);
79771ae08745Sheppo 			ldcp = ldcl->head;
79781ae08745Sheppo 			for (; ldcp != NULL; ldcp = ldcp->ldc_next) {
79791ae08745Sheppo 				cmn_err(CE_CONT, "chan %lu : dev %d : "
79801ae08745Sheppo 					"status %d : phase %u\n",
79811ae08745Sheppo 					ldcp->ldc_id, ldcp->dev_class,
79821ae08745Sheppo 					ldcp->ldc_status, ldcp->hphase);
79831ae08745Sheppo 				cmn_err(CE_CONT, "chan %lu : lsession %lu : "
79841ae08745Sheppo 					"psession %lu\n",
79851ae08745Sheppo 					ldcp->ldc_id,
79861ae08745Sheppo 					ldcp->local_session,
79871ae08745Sheppo 					ldcp->peer_session);
79881ae08745Sheppo 
79891ae08745Sheppo 				cmn_err(CE_CONT, "Inbound lane:\n");
79901ae08745Sheppo 				display_lane(&ldcp->lane_in);
79911ae08745Sheppo 				cmn_err(CE_CONT, "Outbound lane:\n");
79921ae08745Sheppo 				display_lane(&ldcp->lane_out);
79931ae08745Sheppo 			}
79941ae08745Sheppo 			RW_EXIT(&ldcl->lockrw);
79951ae08745Sheppo 		}
79961ae08745Sheppo 		RW_EXIT(&plist->lockrw);
79971ae08745Sheppo 	}
79981ae08745Sheppo 	cmn_err(CE_NOTE, "***** system state *****");
79991ae08745Sheppo }
80001ae08745Sheppo 
80011ae08745Sheppo static void
80021ae08745Sheppo display_lane(lane_t *lp)
80031ae08745Sheppo {
80041ae08745Sheppo 	dring_info_t	*drp;
80051ae08745Sheppo 
80061ae08745Sheppo 	cmn_err(CE_CONT, "ver 0x%x:0x%x : state %lx : mtu 0x%lx\n",
80071ae08745Sheppo 		lp->ver_major, lp->ver_minor, lp->lstate, lp->mtu);
80081ae08745Sheppo 	cmn_err(CE_CONT, "addr_type %d : addr 0x%lx : xmode %d\n",
80091ae08745Sheppo 		lp->addr_type, lp->addr, lp->xfer_mode);
80101ae08745Sheppo 	cmn_err(CE_CONT, "dringp 0x%lx\n", (uint64_t)lp->dringp);
80111ae08745Sheppo 
80121ae08745Sheppo 	cmn_err(CE_CONT, "Dring info:\n");
80131ae08745Sheppo 	for (drp = lp->dringp; drp != NULL; drp = drp->next) {
80141ae08745Sheppo 		cmn_err(CE_CONT, "\tnum_desc %u : dsize %u\n",
80151ae08745Sheppo 			drp->num_descriptors, drp->descriptor_size);
80161ae08745Sheppo 		cmn_err(CE_CONT, "\thandle 0x%lx\n", drp->handle);
80171ae08745Sheppo 		cmn_err(CE_CONT, "\tpub_addr 0x%lx : priv_addr 0x%lx\n",
80181ae08745Sheppo 			(uint64_t)drp->pub_addr, (uint64_t)drp->priv_addr);
80191ae08745Sheppo 		cmn_err(CE_CONT, "\tident 0x%lx : end_idx %lu\n",
80201ae08745Sheppo 			drp->ident, drp->end_idx);
80211ae08745Sheppo 		display_ring(drp);
80221ae08745Sheppo 	}
80231ae08745Sheppo }
80241ae08745Sheppo 
80251ae08745Sheppo static void
80261ae08745Sheppo display_ring(dring_info_t *dringp)
80271ae08745Sheppo {
80281ae08745Sheppo 	uint64_t		i;
80291ae08745Sheppo 	uint64_t		priv_count = 0;
80301ae08745Sheppo 	uint64_t		pub_count = 0;
80311ae08745Sheppo 	vnet_public_desc_t	*pub_addr = NULL;
80321ae08745Sheppo 	vsw_private_desc_t	*priv_addr = NULL;
80331ae08745Sheppo 
80341ae08745Sheppo 	for (i = 0; i < VSW_RING_NUM_EL; i++) {
80351ae08745Sheppo 		if (dringp->pub_addr != NULL) {
80361ae08745Sheppo 			pub_addr = (vnet_public_desc_t *)dringp->pub_addr + i;
80371ae08745Sheppo 
80381ae08745Sheppo 			if (pub_addr->hdr.dstate == VIO_DESC_FREE)
80391ae08745Sheppo 				pub_count++;
80401ae08745Sheppo 		}
80411ae08745Sheppo 
80421ae08745Sheppo 		if (dringp->priv_addr != NULL) {
80431ae08745Sheppo 			priv_addr =
80441ae08745Sheppo 				(vsw_private_desc_t *)dringp->priv_addr + i;
80451ae08745Sheppo 
80461ae08745Sheppo 			if (priv_addr->dstate == VIO_DESC_FREE)
80471ae08745Sheppo 				priv_count++;
80481ae08745Sheppo 		}
80491ae08745Sheppo 	}
80501ae08745Sheppo 	cmn_err(CE_CONT, "\t%lu elements: %lu priv free: %lu pub free\n",
80511ae08745Sheppo 			i, priv_count, pub_count);
80521ae08745Sheppo }
80531ae08745Sheppo 
80541ae08745Sheppo static void
80551ae08745Sheppo dump_flags(uint64_t state)
80561ae08745Sheppo {
80571ae08745Sheppo 	int	i;
80581ae08745Sheppo 
80591ae08745Sheppo 	typedef struct flag_name {
80601ae08745Sheppo 		int	flag_val;
80611ae08745Sheppo 		char	*flag_name;
80621ae08745Sheppo 	} flag_name_t;
80631ae08745Sheppo 
80641ae08745Sheppo 	flag_name_t	flags[] = {
80651ae08745Sheppo 		VSW_VER_INFO_SENT, "VSW_VER_INFO_SENT",
80661ae08745Sheppo 		VSW_VER_INFO_RECV, "VSW_VER_INFO_RECV",
80671ae08745Sheppo 		VSW_VER_ACK_RECV, "VSW_VER_ACK_RECV",
80681ae08745Sheppo 		VSW_VER_ACK_SENT, "VSW_VER_ACK_SENT",
80691ae08745Sheppo 		VSW_VER_NACK_RECV, "VSW_VER_NACK_RECV",
80701ae08745Sheppo 		VSW_VER_NACK_SENT, "VSW_VER_NACK_SENT",
80711ae08745Sheppo 		VSW_ATTR_INFO_SENT, "VSW_ATTR_INFO_SENT",
80721ae08745Sheppo 		VSW_ATTR_INFO_RECV, "VSW_ATTR_INFO_RECV",
80731ae08745Sheppo 		VSW_ATTR_ACK_SENT, "VSW_ATTR_ACK_SENT",
80741ae08745Sheppo 		VSW_ATTR_ACK_RECV, "VSW_ATTR_ACK_RECV",
80751ae08745Sheppo 		VSW_ATTR_NACK_SENT, "VSW_ATTR_NACK_SENT",
80761ae08745Sheppo 		VSW_ATTR_NACK_RECV, "VSW_ATTR_NACK_RECV",
80771ae08745Sheppo 		VSW_DRING_INFO_SENT, "VSW_DRING_INFO_SENT",
80781ae08745Sheppo 		VSW_DRING_INFO_RECV, "VSW_DRING_INFO_RECV",
80791ae08745Sheppo 		VSW_DRING_ACK_SENT, "VSW_DRING_ACK_SENT",
80801ae08745Sheppo 		VSW_DRING_ACK_RECV, "VSW_DRING_ACK_RECV",
80811ae08745Sheppo 		VSW_DRING_NACK_SENT, "VSW_DRING_NACK_SENT",
80821ae08745Sheppo 		VSW_DRING_NACK_RECV, "VSW_DRING_NACK_RECV",
80831ae08745Sheppo 		VSW_RDX_INFO_SENT, "VSW_RDX_INFO_SENT",
80841ae08745Sheppo 		VSW_RDX_INFO_RECV, "VSW_RDX_INFO_RECV",
80851ae08745Sheppo 		VSW_RDX_ACK_SENT, "VSW_RDX_ACK_SENT",
80861ae08745Sheppo 		VSW_RDX_ACK_RECV, "VSW_RDX_ACK_RECV",
80871ae08745Sheppo 		VSW_RDX_NACK_SENT, "VSW_RDX_NACK_SENT",
80881ae08745Sheppo 		VSW_RDX_NACK_RECV, "VSW_RDX_NACK_RECV",
80891ae08745Sheppo 		VSW_MCST_INFO_SENT, "VSW_MCST_INFO_SENT",
80901ae08745Sheppo 		VSW_MCST_INFO_RECV, "VSW_MCST_INFO_RECV",
80911ae08745Sheppo 		VSW_MCST_ACK_SENT, "VSW_MCST_ACK_SENT",
80921ae08745Sheppo 		VSW_MCST_ACK_RECV, "VSW_MCST_ACK_RECV",
80931ae08745Sheppo 		VSW_MCST_NACK_SENT, "VSW_MCST_NACK_SENT",
80941ae08745Sheppo 		VSW_MCST_NACK_RECV, "VSW_MCST_NACK_RECV",
80951ae08745Sheppo 		VSW_LANE_ACTIVE, "VSW_LANE_ACTIVE"};
80961ae08745Sheppo 
80971ae08745Sheppo 	DERR(NULL, "DUMP_FLAGS: %llx\n", state);
80981ae08745Sheppo 	for (i = 0; i < sizeof (flags)/sizeof (flag_name_t); i++) {
80991ae08745Sheppo 		if (state & flags[i].flag_val)
81001ae08745Sheppo 			DERR(NULL, "DUMP_FLAGS %s", flags[i].flag_name);
81011ae08745Sheppo 	}
81021ae08745Sheppo }
8103