1678453a8Sspeer /* 2678453a8Sspeer * CDDL HEADER START 3678453a8Sspeer * 4678453a8Sspeer * The contents of this file are subject to the terms of the 5678453a8Sspeer * Common Development and Distribution License (the "License"). 6678453a8Sspeer * You may not use this file except in compliance with the License. 7678453a8Sspeer * 8678453a8Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9678453a8Sspeer * or http://www.opensolaris.org/os/licensing. 10678453a8Sspeer * See the License for the specific language governing permissions 11678453a8Sspeer * and limitations under the License. 12678453a8Sspeer * 13678453a8Sspeer * When distributing Covered Code, include this CDDL HEADER in each 14678453a8Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15678453a8Sspeer * If applicable, add the following below this CDDL HEADER, with the 16678453a8Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 17678453a8Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 18678453a8Sspeer * 19678453a8Sspeer * CDDL HEADER END 20678453a8Sspeer */ 21678453a8Sspeer 22678453a8Sspeer /* 23*7bd3a2e2SSriharsha Basavapatna * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24678453a8Sspeer * Use is subject to license terms. 25678453a8Sspeer */ 26678453a8Sspeer 27678453a8Sspeer #include <sys/types.h> 28678453a8Sspeer #include <sys/errno.h> 29678453a8Sspeer #include <sys/debug.h> 30678453a8Sspeer #include <sys/time.h> 31678453a8Sspeer #include <sys/sysmacros.h> 32678453a8Sspeer #include <sys/systm.h> 33678453a8Sspeer #include <sys/user.h> 34678453a8Sspeer #include <sys/stropts.h> 35678453a8Sspeer #include <sys/stream.h> 36678453a8Sspeer #include <sys/strlog.h> 37678453a8Sspeer #include <sys/strsubr.h> 38678453a8Sspeer #include <sys/cmn_err.h> 39678453a8Sspeer #include <sys/cpu.h> 40678453a8Sspeer #include <sys/kmem.h> 41678453a8Sspeer #include <sys/conf.h> 42678453a8Sspeer #include <sys/ddi.h> 43678453a8Sspeer #include <sys/sunddi.h> 44678453a8Sspeer #include <sys/ksynch.h> 45678453a8Sspeer #include <sys/stat.h> 46678453a8Sspeer #include <sys/kstat.h> 47678453a8Sspeer #include <sys/vtrace.h> 48678453a8Sspeer #include <sys/strsun.h> 49678453a8Sspeer #include <sys/dlpi.h> 50678453a8Sspeer #include <sys/ethernet.h> 51678453a8Sspeer #include <net/if.h> 52678453a8Sspeer #include <sys/varargs.h> 53678453a8Sspeer #include <sys/machsystm.h> 54678453a8Sspeer #include <sys/modctl.h> 55678453a8Sspeer #include <sys/modhash.h> 56da14cebeSEric Cheng #include <sys/mac_provider.h> 57678453a8Sspeer #include <sys/mac_ether.h> 58678453a8Sspeer #include <sys/taskq.h> 59678453a8Sspeer #include <sys/note.h> 60678453a8Sspeer #include <sys/mach_descrip.h> 61678453a8Sspeer #include <sys/mac.h> 62678453a8Sspeer #include <sys/mdeg.h> 63678453a8Sspeer #include <sys/ldc.h> 64678453a8Sspeer #include <sys/vsw_fdb.h> 65678453a8Sspeer #include <sys/vsw.h> 66678453a8Sspeer #include <sys/vio_mailbox.h> 67678453a8Sspeer #include <sys/vnet_mailbox.h> 68678453a8Sspeer #include <sys/vnet_common.h> 69678453a8Sspeer #include <sys/vio_util.h> 70678453a8Sspeer #include <sys/sdt.h> 71678453a8Sspeer #include <sys/atomic.h> 72678453a8Sspeer #include <sys/callb.h> 73678453a8Sspeer 74678453a8Sspeer 75678453a8Sspeer #define VSW_DDS_NEXT_REQID(vsharep) (++vsharep->vs_req_id) 76678453a8Sspeer 77678453a8Sspeer extern boolean_t vsw_hio_enabled; /* HybridIO enabled? */ 78678453a8Sspeer extern int vsw_hio_max_cleanup_retries; 79678453a8Sspeer extern int vsw_hio_cleanup_delay; 80678453a8Sspeer 81678453a8Sspeer /* Functions imported from other files */ 82678453a8Sspeer extern int vsw_send_msg(vsw_ldc_t *, void *, int, boolean_t); 83cdfc78adSraghuram extern void vsw_hio_port_reset(vsw_port_t *portp, boolean_t immediate); 84da14cebeSEric Cheng extern void vsw_port_mac_reconfig(vsw_port_t *portp, boolean_t update_vlans, 85da14cebeSEric Cheng uint16_t new_pvid, vsw_vlanid_t *new_vids, int new_nvids); 86678453a8Sspeer 87678453a8Sspeer /* Functions exported to other files */ 88678453a8Sspeer void vsw_hio_init(vsw_t *vswp); 89678453a8Sspeer void vsw_hio_cleanup(vsw_t *vswp); 90678453a8Sspeer void vsw_hio_start(vsw_t *vswp, vsw_ldc_t *ldcp); 91678453a8Sspeer void vsw_hio_stop(vsw_t *vswp, vsw_ldc_t *ldcp); 92678453a8Sspeer void vsw_process_dds_msg(vsw_t *vswp, vsw_ldc_t *ldcp, void *msg); 93678453a8Sspeer void vsw_hio_start_ports(vsw_t *vswp); 94678453a8Sspeer void vsw_hio_stop_port(vsw_port_t *portp); 95678453a8Sspeer 96678453a8Sspeer /* Support functions */ 97cdfc78adSraghuram static void vsw_hio_free_all_shares(vsw_t *vswp, boolean_t reboot); 98678453a8Sspeer static vsw_share_t *vsw_hio_alloc_share(vsw_t *vswp, vsw_ldc_t *ldcp); 99678453a8Sspeer static void vsw_hio_free_share(vsw_share_t *vsharep); 100678453a8Sspeer static vsw_share_t *vsw_hio_find_free_share(vsw_t *vswp); 101678453a8Sspeer static vsw_share_t *vsw_hio_find_vshare_ldcid(vsw_t *vswp, uint64_t ldc_id); 102678453a8Sspeer static vsw_share_t *vsw_hio_find_vshare_port(vsw_t *vswp, vsw_port_t *portp); 103678453a8Sspeer static int vsw_send_dds_msg(vsw_ldc_t *ldcp, uint8_t dds_subclass, 104678453a8Sspeer uint64_t cookie, uint64_t macaddr, uint32_t req_id); 105678453a8Sspeer static int vsw_send_dds_resp_msg(vsw_ldc_t *ldcp, vio_dds_msg_t *dmsg, int ack); 106678453a8Sspeer static int vsw_hio_send_delshare_msg(vsw_share_t *vsharep); 107cdfc78adSraghuram static boolean_t vsw_hio_reboot_callb(void *arg, int code); 108cdfc78adSraghuram static boolean_t vsw_hio_panic_callb(void *arg, int code); 109678453a8Sspeer 110da14cebeSEric Cheng /* 111da14cebeSEric Cheng * Locking strategy for HybridIO is followed as below: 112da14cebeSEric Cheng * 113da14cebeSEric Cheng * - As the Shares are associated with a network device, the 114da14cebeSEric Cheng * the global lock('vswp>mac_lock') is used for all Shares 115da14cebeSEric Cheng * related operations. 116da14cebeSEric Cheng * - The 'port->maccl_rwlock' is used to synchronize only the 117da14cebeSEric Cheng * the operations that operate on that port's mac client. That 118da14cebeSEric Cheng * is, the share_bind and unbind operations only. 119da14cebeSEric Cheng * 120da14cebeSEric Cheng * - The locking hierarchy follows that the global mac_lock is 121da14cebeSEric Cheng * acquired first and then the ports mac client lock(maccl_rwlock) 122da14cebeSEric Cheng */ 123da14cebeSEric Cheng 124da14cebeSEric Cheng 1256ab6cb20SWENTAO YANG static kstat_t *vsw_hio_setup_kstats(char *ks_mod, char *ks_name, vsw_t *vswp); 1266ab6cb20SWENTAO YANG static void vsw_hio_destroy_kstats(vsw_t *vswp); 1276ab6cb20SWENTAO YANG static int vsw_hio_kstats_update(kstat_t *ksp, int rw); 128678453a8Sspeer 129678453a8Sspeer /* 130678453a8Sspeer * vsw_hio_init -- Initialize the HybridIO related info. 131678453a8Sspeer * - Query SHARES and RINGS capability. Both capabilities 132678453a8Sspeer * need to be supported by the physical-device. 133678453a8Sspeer */ 134678453a8Sspeer void 135678453a8Sspeer vsw_hio_init(vsw_t *vswp) 136678453a8Sspeer { 137678453a8Sspeer vsw_hio_t *hiop = &vswp->vhio; 138da14cebeSEric Cheng int num_shares; 139678453a8Sspeer int i; 140678453a8Sspeer 141da14cebeSEric Cheng ASSERT(MUTEX_HELD(&vswp->mac_lock)); 142678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 143678453a8Sspeer if (vsw_hio_enabled == B_FALSE) { 144678453a8Sspeer return; 145678453a8Sspeer } 146678453a8Sspeer 147678453a8Sspeer vswp->hio_capable = B_FALSE; 148da14cebeSEric Cheng num_shares = mac_share_capable(vswp->mh); 149da14cebeSEric Cheng if (num_shares == 0) { 150678453a8Sspeer D2(vswp, "%s: %s is not HybridIO capable\n", __func__, 151678453a8Sspeer vswp->physname); 152678453a8Sspeer return; 153678453a8Sspeer } 154da14cebeSEric Cheng hiop->vh_num_shares = num_shares; 155678453a8Sspeer hiop->vh_shares = kmem_zalloc((sizeof (vsw_share_t) * 156678453a8Sspeer hiop->vh_num_shares), KM_SLEEP); 157678453a8Sspeer for (i = 0; i < hiop->vh_num_shares; i++) { 158678453a8Sspeer hiop->vh_shares[i].vs_state = VSW_SHARE_FREE; 159678453a8Sspeer hiop->vh_shares[i].vs_index = i; 160678453a8Sspeer hiop->vh_shares[i].vs_vswp = vswp; 161678453a8Sspeer } 162678453a8Sspeer vswp->hio_capable = B_TRUE; 163cdfc78adSraghuram 164cdfc78adSraghuram /* 165cdfc78adSraghuram * Register to get reboot and panic events so that 166cdfc78adSraghuram * we can cleanup HybridIO resources gracefully. 167cdfc78adSraghuram */ 168cdfc78adSraghuram vswp->hio_reboot_cb_id = callb_add(vsw_hio_reboot_callb, 169cdfc78adSraghuram (void *)vswp, CB_CL_MDBOOT, "vsw_hio"); 170cdfc78adSraghuram 171cdfc78adSraghuram vswp->hio_panic_cb_id = callb_add(vsw_hio_panic_callb, 172cdfc78adSraghuram (void *)vswp, CB_CL_PANIC, "vsw_hio"); 173cdfc78adSraghuram 1746ab6cb20SWENTAO YANG /* setup kstats for hybrid resources */ 1756ab6cb20SWENTAO YANG hiop->vh_ksp = vsw_hio_setup_kstats(DRV_NAME, "hio", vswp); 1766ab6cb20SWENTAO YANG if (hiop->vh_ksp == NULL) { 1776ab6cb20SWENTAO YANG DERR(vswp, "%s: kstats setup failed", __func__); 1786ab6cb20SWENTAO YANG } 1796ab6cb20SWENTAO YANG 180678453a8Sspeer D2(vswp, "%s: %s is HybridIO capable num_shares=%d\n", __func__, 181678453a8Sspeer vswp->physname, hiop->vh_num_shares); 182678453a8Sspeer D1(vswp, "%s:exit\n", __func__); 183678453a8Sspeer } 184678453a8Sspeer 185678453a8Sspeer /* 186678453a8Sspeer * vsw_hio_alloc_share -- Allocate and setup the share for a guest domain. 187678453a8Sspeer * - Allocate a free share. 188678453a8Sspeer * - Bind the Guest's MAC address. 189678453a8Sspeer */ 190678453a8Sspeer static vsw_share_t * 191678453a8Sspeer vsw_hio_alloc_share(vsw_t *vswp, vsw_ldc_t *ldcp) 192678453a8Sspeer { 193678453a8Sspeer vsw_share_t *vsharep; 194678453a8Sspeer vsw_port_t *portp = ldcp->ldc_port; 195678453a8Sspeer uint64_t ldc_id = ldcp->ldc_id; 196678453a8Sspeer int rv; 197678453a8Sspeer 198678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 199678453a8Sspeer vsharep = vsw_hio_find_free_share(vswp); 200678453a8Sspeer if (vsharep == NULL) { 201678453a8Sspeer /* No free shares available */ 202678453a8Sspeer return (NULL); 203678453a8Sspeer } 204da14cebeSEric Cheng 205da14cebeSEric Cheng WRITE_ENTER(&portp->maccl_rwlock); 206da14cebeSEric Cheng rv = mac_share_bind(portp->p_mch, ldc_id, &vsharep->vs_cookie); 207da14cebeSEric Cheng RW_EXIT(&portp->maccl_rwlock); 208678453a8Sspeer if (rv != 0) { 209678453a8Sspeer return (NULL); 210678453a8Sspeer } 211678453a8Sspeer 212678453a8Sspeer /* Cache some useful info */ 213678453a8Sspeer vsharep->vs_ldcid = ldcp->ldc_id; 214678453a8Sspeer vsharep->vs_macaddr = vnet_macaddr_strtoul( 215678453a8Sspeer portp->p_macaddr.ether_addr_octet); 216678453a8Sspeer vsharep->vs_portp = ldcp->ldc_port; 217678453a8Sspeer vsharep->vs_state |= VSW_SHARE_ASSIGNED; 218678453a8Sspeer 219678453a8Sspeer D1(vswp, "%s:exit\n", __func__); 220678453a8Sspeer return (vsharep); 221678453a8Sspeer } 222678453a8Sspeer 223678453a8Sspeer /* 224678453a8Sspeer * vsw_hio_find_free_share -- Find a free Share. 225678453a8Sspeer */ 226678453a8Sspeer static vsw_share_t * 227678453a8Sspeer vsw_hio_find_free_share(vsw_t *vswp) 228678453a8Sspeer { 229678453a8Sspeer vsw_hio_t *hiop = &vswp->vhio; 230678453a8Sspeer vsw_share_t *vsharep; 231678453a8Sspeer int i; 232678453a8Sspeer 233678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 234678453a8Sspeer for (i = 0; i < hiop->vh_num_shares; i++) { 235678453a8Sspeer vsharep = &hiop->vh_shares[i]; 236678453a8Sspeer if (vsharep->vs_state == VSW_SHARE_FREE) { 237678453a8Sspeer D1(vswp, "%s:Returning free share(%d)\n", 238678453a8Sspeer __func__, vsharep->vs_index); 239678453a8Sspeer return (vsharep); 240678453a8Sspeer } 241678453a8Sspeer } 242678453a8Sspeer D1(vswp, "%s:no free share\n", __func__); 243678453a8Sspeer return (NULL); 244678453a8Sspeer } 245678453a8Sspeer 246678453a8Sspeer /* 247678453a8Sspeer * vsw_hio_find_vshare_ldcid -- Given ldc_id, find the corresponding 248678453a8Sspeer * share structure. 249678453a8Sspeer */ 250678453a8Sspeer static vsw_share_t * 251678453a8Sspeer vsw_hio_find_vshare_ldcid(vsw_t *vswp, uint64_t ldc_id) 252678453a8Sspeer { 253678453a8Sspeer vsw_hio_t *hiop = &vswp->vhio; 254678453a8Sspeer vsw_share_t *vsharep; 255678453a8Sspeer int i; 256678453a8Sspeer 257678453a8Sspeer D1(vswp, "%s:enter, ldc=0x%lx", __func__, ldc_id); 258678453a8Sspeer for (i = 0; i < hiop->vh_num_shares; i++) { 259678453a8Sspeer vsharep = &hiop->vh_shares[i]; 260678453a8Sspeer if (vsharep->vs_state == VSW_SHARE_FREE) { 261678453a8Sspeer continue; 262678453a8Sspeer } 263678453a8Sspeer if (vsharep->vs_ldcid == ldc_id) { 264678453a8Sspeer D1(vswp, "%s:returning share(%d)", 265678453a8Sspeer __func__, vsharep->vs_index); 266678453a8Sspeer return (vsharep); 267678453a8Sspeer } 268678453a8Sspeer } 269678453a8Sspeer D1(vswp, "%s:returning NULL", __func__); 270678453a8Sspeer return (NULL); 271678453a8Sspeer } 272678453a8Sspeer 273678453a8Sspeer /* 274678453a8Sspeer * vsw_hio_find_vshare_port -- Given portp, find the corresponding 275678453a8Sspeer * share structure. 276678453a8Sspeer */ 277678453a8Sspeer static vsw_share_t * 278678453a8Sspeer vsw_hio_find_vshare_port(vsw_t *vswp, vsw_port_t *portp) 279678453a8Sspeer { 280678453a8Sspeer vsw_hio_t *hiop = &vswp->vhio; 281678453a8Sspeer vsw_share_t *vsharep; 282678453a8Sspeer int i; 283678453a8Sspeer 284678453a8Sspeer D1(vswp, "%s:enter, portp=0x%p", __func__, portp); 285678453a8Sspeer for (i = 0; i < hiop->vh_num_shares; i++) { 286678453a8Sspeer vsharep = &hiop->vh_shares[i]; 287678453a8Sspeer if (vsharep->vs_state == VSW_SHARE_FREE) { 288678453a8Sspeer continue; 289678453a8Sspeer } 290678453a8Sspeer if (vsharep->vs_portp == portp) { 291678453a8Sspeer D1(vswp, "%s:returning share(%d)", 292678453a8Sspeer __func__, vsharep->vs_index); 293678453a8Sspeer return (vsharep); 294678453a8Sspeer } 295678453a8Sspeer } 296678453a8Sspeer D1(vswp, "%s:returning NULL", __func__); 297678453a8Sspeer return (NULL); 298678453a8Sspeer } 299678453a8Sspeer 300678453a8Sspeer /* 301678453a8Sspeer * vsw_hio_free_share -- Unbind the MAC address and free share. 302678453a8Sspeer */ 303678453a8Sspeer static void 304678453a8Sspeer vsw_hio_free_share(vsw_share_t *vsharep) 305678453a8Sspeer { 306678453a8Sspeer vsw_t *vswp = vsharep->vs_vswp; 307da14cebeSEric Cheng vsw_port_t *portp = vsharep->vs_portp; 308678453a8Sspeer 309678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 310678453a8Sspeer 311da14cebeSEric Cheng WRITE_ENTER(&portp->maccl_rwlock); 312da14cebeSEric Cheng mac_share_unbind(portp->p_mch); 313da14cebeSEric Cheng RW_EXIT(&portp->maccl_rwlock); 314678453a8Sspeer vsharep->vs_state = VSW_SHARE_FREE; 3156ab6cb20SWENTAO YANG vsharep->vs_macaddr = 0; 3166f09f0feSWENTAO YANG vsharep->vs_portp = NULL; 317678453a8Sspeer 318678453a8Sspeer /* DERR only for printing by default */ 319678453a8Sspeer DERR(vswp, "Share freed for ldc_id=0x%lx Cookie=0x%lX", 320678453a8Sspeer vsharep->vs_ldcid, vsharep->vs_cookie); 321678453a8Sspeer D1(vswp, "%s:exit\n", __func__); 322678453a8Sspeer } 323678453a8Sspeer 324678453a8Sspeer 325678453a8Sspeer /* 326cdfc78adSraghuram * vsw_hio_cleanup -- Cleanup the HybridIO. It unregisters the callbs 327cdfc78adSraghuram * and frees all shares. 328cdfc78adSraghuram */ 329cdfc78adSraghuram void 330cdfc78adSraghuram vsw_hio_cleanup(vsw_t *vswp) 331cdfc78adSraghuram { 332cdfc78adSraghuram D1(vswp, "%s:enter\n", __func__); 333cdfc78adSraghuram 334cdfc78adSraghuram /* Unregister reboot and panic callbs. */ 335cdfc78adSraghuram if (vswp->hio_reboot_cb_id) { 336cdfc78adSraghuram (void) callb_delete(vswp->hio_reboot_cb_id); 337cdfc78adSraghuram vswp->hio_reboot_cb_id = 0; 338cdfc78adSraghuram } 339cdfc78adSraghuram if (vswp->hio_panic_cb_id) { 340cdfc78adSraghuram (void) callb_delete(vswp->hio_panic_cb_id); 341cdfc78adSraghuram vswp->hio_panic_cb_id = 0; 342cdfc78adSraghuram } 343cdfc78adSraghuram vsw_hio_free_all_shares(vswp, B_FALSE); 3446ab6cb20SWENTAO YANG vsw_hio_destroy_kstats(vswp); 345cdfc78adSraghuram D1(vswp, "%s:exit\n", __func__); 346cdfc78adSraghuram } 347cdfc78adSraghuram 348cdfc78adSraghuram /* 349cdfc78adSraghuram * vsw_hio_free_all_shares -- A routine to free all shares gracefully. 350678453a8Sspeer * The following are the steps followed to accomplish this: 351678453a8Sspeer * 352678453a8Sspeer * - First clear 'hio_capable' to avoid further share allocations. 353678453a8Sspeer * - If a share is in accepted(ACKD) state, that means the guest 354678453a8Sspeer * has HybridIO setup etc. If so, send a DEL_SHARE message and 355678453a8Sspeer * give some time(delay) for the guest to ACK. 356678453a8Sspeer * - If the Share is another state, give some time to transition to 357678453a8Sspeer * ACKD state, then try the above. 358678453a8Sspeer * - After max retries, reset the ports to brute force the shares 359678453a8Sspeer * to be freed. Give a little delay for the LDC reset code to 360678453a8Sspeer * free the Share. 361678453a8Sspeer */ 362cdfc78adSraghuram static void 363cdfc78adSraghuram vsw_hio_free_all_shares(vsw_t *vswp, boolean_t reboot) 364678453a8Sspeer { 365678453a8Sspeer vsw_hio_t *hiop = &vswp->vhio; 366678453a8Sspeer vsw_port_list_t *plist = &vswp->plist; 367678453a8Sspeer vsw_share_t *vsharep; 368678453a8Sspeer int free_shares = 0; 369678453a8Sspeer int max_retries = vsw_hio_max_cleanup_retries; 370678453a8Sspeer int i; 371678453a8Sspeer 372678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 373678453a8Sspeer 374678453a8Sspeer /* 375678453a8Sspeer * Acquire plist->lockrw to make the locking a bit easier 376678453a8Sspeer * and keep the ports in a stable state while we are cleaningup 377678453a8Sspeer * HybridIO. 378678453a8Sspeer */ 379678453a8Sspeer READ_ENTER(&plist->lockrw); 380da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 381678453a8Sspeer /* 382678453a8Sspeer * first clear the hio_capable flag so that no more 383678453a8Sspeer * HybridIO operations are initiated. 384678453a8Sspeer */ 385678453a8Sspeer vswp->hio_capable = B_FALSE; 386678453a8Sspeer 387678453a8Sspeer do { 388678453a8Sspeer free_shares = 0; 389678453a8Sspeer for (i = 0; i < hiop->vh_num_shares; i++) { 390678453a8Sspeer vsharep = &hiop->vh_shares[i]; 391678453a8Sspeer if (vsharep->vs_state == VSW_SHARE_FREE) { 392678453a8Sspeer free_shares++; 393678453a8Sspeer continue; 394678453a8Sspeer } 395678453a8Sspeer /* 396678453a8Sspeer * If the share is in DDS_ACKD state, then 397678453a8Sspeer * send DEL_SHARE message so that guest can 398678453a8Sspeer * release its Hybrid resource. 399678453a8Sspeer */ 400678453a8Sspeer if (vsharep->vs_state & VSW_SHARE_DDS_ACKD) { 401678453a8Sspeer int rv; 402678453a8Sspeer 403678453a8Sspeer /* send DDS_DEL_SHARE */ 404678453a8Sspeer D1(vswp, "%s:sending DEL_SHARE msg for " 405678453a8Sspeer "share(%d)", __func__, vsharep->vs_index); 406678453a8Sspeer rv = vsw_hio_send_delshare_msg(vsharep); 407678453a8Sspeer if (rv != 0) { 408678453a8Sspeer /* 409678453a8Sspeer * No alternative, reset the port 410678453a8Sspeer * to force the release of Hybrid 411678453a8Sspeer * resources. 412678453a8Sspeer */ 413cdfc78adSraghuram vsw_hio_port_reset(vsharep->vs_portp, 414cdfc78adSraghuram B_FALSE); 415678453a8Sspeer } 416678453a8Sspeer } 417678453a8Sspeer if (max_retries == 1) { 418cdfc78adSraghuram /* 419cdfc78adSraghuram * Last retry, reset the port. 420cdfc78adSraghuram * If it is reboot case, issue an immediate 421cdfc78adSraghuram * reset. 422cdfc78adSraghuram */ 423678453a8Sspeer DWARN(vswp, "%s:All retries failed, " 424678453a8Sspeer " cause a reset to trigger cleanup for " 425678453a8Sspeer "share(%d)", __func__, vsharep->vs_index); 426cdfc78adSraghuram vsw_hio_port_reset(vsharep->vs_portp, reboot); 427678453a8Sspeer } 428678453a8Sspeer } 429678453a8Sspeer if (free_shares == hiop->vh_num_shares) { 430678453a8Sspeer /* Clean up is done */ 431678453a8Sspeer break; 432678453a8Sspeer } 433678453a8Sspeer /* 434678453a8Sspeer * Release the lock so that reply for DEL_SHARE 435678453a8Sspeer * messages come and get processed, that is, shares 436678453a8Sspeer * get freed. 437678453a8Sspeer * This delay is also needed for the port reset to 438678453a8Sspeer * release the Hybrid resource. 439678453a8Sspeer */ 440da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 441d0288fccSRaghuram Kothakota drv_usecwait(vsw_hio_cleanup_delay); 442da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 443678453a8Sspeer max_retries--; 444678453a8Sspeer } while ((free_shares < hiop->vh_num_shares) && (max_retries > 0)); 445678453a8Sspeer 446678453a8Sspeer /* By now, all shares should be freed */ 447678453a8Sspeer if (free_shares != hiop->vh_num_shares) { 448cdfc78adSraghuram if (reboot == B_FALSE) { 449678453a8Sspeer cmn_err(CE_NOTE, "vsw%d: All physical resources " 450678453a8Sspeer "could not be freed", vswp->instance); 451678453a8Sspeer } 452cdfc78adSraghuram } 453678453a8Sspeer 454678453a8Sspeer kmem_free(hiop->vh_shares, sizeof (vsw_share_t) * hiop->vh_num_shares); 455678453a8Sspeer hiop->vh_shares = NULL; 456678453a8Sspeer hiop->vh_num_shares = 0; 457da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 458678453a8Sspeer RW_EXIT(&plist->lockrw); 459678453a8Sspeer D1(vswp, "%s:exit\n", __func__); 460678453a8Sspeer } 461678453a8Sspeer 462678453a8Sspeer /* 463678453a8Sspeer * vsw_hio_start_ports -- Start HybridIO for ports that have 464678453a8Sspeer * already established connection before HybridIO is intialized. 465678453a8Sspeer */ 466678453a8Sspeer void 467678453a8Sspeer vsw_hio_start_ports(vsw_t *vswp) 468678453a8Sspeer { 469678453a8Sspeer vsw_port_list_t *plist = &vswp->plist; 470678453a8Sspeer vsw_port_t *portp; 471678453a8Sspeer vsw_share_t *vsharep; 472678453a8Sspeer boolean_t reset; 473678453a8Sspeer 474678453a8Sspeer if (vswp->hio_capable == B_FALSE) { 475678453a8Sspeer return; 476678453a8Sspeer } 477678453a8Sspeer READ_ENTER(&plist->lockrw); 478678453a8Sspeer for (portp = plist->head; portp != NULL; portp = portp->p_next) { 479678453a8Sspeer if ((portp->p_hio_enabled == B_FALSE) || 480678453a8Sspeer (portp->p_hio_capable == B_FALSE)) { 481678453a8Sspeer continue; 482678453a8Sspeer } 483678453a8Sspeer 484678453a8Sspeer reset = B_FALSE; 485da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 486678453a8Sspeer vsharep = vsw_hio_find_vshare_port(vswp, portp); 487678453a8Sspeer if (vsharep == NULL) { 488678453a8Sspeer reset = B_TRUE; 489678453a8Sspeer } 490da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 491678453a8Sspeer 492678453a8Sspeer if (reset == B_TRUE) { 493678453a8Sspeer /* Cause a rest to trigger HybridIO setup */ 494cdfc78adSraghuram vsw_hio_port_reset(portp, B_FALSE); 495678453a8Sspeer } 496678453a8Sspeer } 497678453a8Sspeer RW_EXIT(&plist->lockrw); 498678453a8Sspeer } 499678453a8Sspeer 500678453a8Sspeer /* 501678453a8Sspeer * vsw_hio_start -- Start HybridIO for a guest(given LDC) 502678453a8Sspeer */ 503678453a8Sspeer void 504678453a8Sspeer vsw_hio_start(vsw_t *vswp, vsw_ldc_t *ldcp) 505678453a8Sspeer { 506678453a8Sspeer vsw_share_t *vsharep; 507678453a8Sspeer uint32_t req_id; 508678453a8Sspeer int rv; 509678453a8Sspeer 510678453a8Sspeer D1(vswp, "%s:enter ldc=0x%lx", __func__, ldcp->ldc_id); 511da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 512678453a8Sspeer if (vswp->hio_capable == B_FALSE) { 513da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 514678453a8Sspeer D2(vswp, "%s:not HIO capable", __func__); 515678453a8Sspeer return; 516678453a8Sspeer } 517678453a8Sspeer 518678453a8Sspeer /* Verify if a share was already allocated */ 519678453a8Sspeer vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id); 520678453a8Sspeer if (vsharep != NULL) { 521da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 522678453a8Sspeer D2(vswp, "%s:Share already allocated to ldc=0x%lx", 523678453a8Sspeer __func__, ldcp->ldc_id); 524678453a8Sspeer return; 525678453a8Sspeer } 526678453a8Sspeer vsharep = vsw_hio_alloc_share(vswp, ldcp); 527678453a8Sspeer if (vsharep == NULL) { 528da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 529678453a8Sspeer D2(vswp, "%s: no Share available for ldc=0x%lx", 530678453a8Sspeer __func__, ldcp->ldc_id); 531678453a8Sspeer return; 532678453a8Sspeer } 533678453a8Sspeer req_id = VSW_DDS_NEXT_REQID(vsharep); 534678453a8Sspeer rv = vsw_send_dds_msg(ldcp, DDS_VNET_ADD_SHARE, vsharep->vs_cookie, 535678453a8Sspeer vsharep->vs_macaddr, req_id); 536678453a8Sspeer if (rv != 0) { 537678453a8Sspeer /* 538678453a8Sspeer * Failed to send a DDS message, so cleanup now. 539678453a8Sspeer */ 540678453a8Sspeer vsw_hio_free_share(vsharep); 541da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 542678453a8Sspeer return; 543678453a8Sspeer } 544cdfc78adSraghuram vsharep->vs_state &= ~VSW_SHARE_DDS_ACKD; 545678453a8Sspeer vsharep->vs_state |= VSW_SHARE_DDS_SENT; 546da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 547678453a8Sspeer 548678453a8Sspeer /* DERR only to print by default */ 549678453a8Sspeer DERR(vswp, "Share allocated for ldc_id=0x%lx Cookie=0x%lX", 550678453a8Sspeer ldcp->ldc_id, vsharep->vs_cookie); 551678453a8Sspeer 552678453a8Sspeer D1(vswp, "%s:exit ldc=0x%lx", __func__, ldcp->ldc_id); 553678453a8Sspeer } 554678453a8Sspeer 555678453a8Sspeer /* 556678453a8Sspeer * vsw_hio_stop -- Stop/clean the HybridIO config for a guest(given ldc). 557678453a8Sspeer */ 558678453a8Sspeer void 559678453a8Sspeer vsw_hio_stop(vsw_t *vswp, vsw_ldc_t *ldcp) 560678453a8Sspeer { 561678453a8Sspeer vsw_share_t *vsharep; 562678453a8Sspeer 563678453a8Sspeer D1(vswp, "%s:enter ldc=0x%lx", __func__, ldcp->ldc_id); 564678453a8Sspeer 565da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 566678453a8Sspeer vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id); 567678453a8Sspeer if (vsharep == NULL) { 568678453a8Sspeer D1(vswp, "%s:no share found for ldc=0x%lx", 569678453a8Sspeer __func__, ldcp->ldc_id); 570da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 571678453a8Sspeer return; 572678453a8Sspeer } 573678453a8Sspeer vsw_hio_free_share(vsharep); 574da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 575678453a8Sspeer 576678453a8Sspeer D1(vswp, "%s:exit ldc=0x%lx", __func__, ldcp->ldc_id); 577678453a8Sspeer } 578678453a8Sspeer 579678453a8Sspeer /* 580678453a8Sspeer * vsw_hio_send_delshare_msg -- Send a DEL_SHARE message to the guest. 581678453a8Sspeer */ 582678453a8Sspeer static int 583678453a8Sspeer vsw_hio_send_delshare_msg(vsw_share_t *vsharep) 584678453a8Sspeer { 585678453a8Sspeer vsw_t *vswp = vsharep->vs_vswp; 586678453a8Sspeer vsw_port_t *portp; 587678453a8Sspeer vsw_ldc_t *ldcp; 588678453a8Sspeer uint32_t req_id; 589678453a8Sspeer uint64_t cookie = vsharep->vs_cookie; 590678453a8Sspeer uint64_t macaddr = vsharep->vs_macaddr; 591678453a8Sspeer int rv; 592678453a8Sspeer 593da14cebeSEric Cheng ASSERT(MUTEX_HELD(&vswp->mac_lock)); 594da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 595678453a8Sspeer 596678453a8Sspeer portp = vsharep->vs_portp; 597678453a8Sspeer if (portp == NULL) { 598da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 599678453a8Sspeer return (0); 600678453a8Sspeer } 601678453a8Sspeer 602*7bd3a2e2SSriharsha Basavapatna ldcp = portp->ldcp; 603678453a8Sspeer if ((ldcp == NULL) || (ldcp->ldc_id != vsharep->vs_ldcid)) { 604da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 605678453a8Sspeer return (0); 606678453a8Sspeer } 607678453a8Sspeer req_id = VSW_DDS_NEXT_REQID(vsharep); 608678453a8Sspeer rv = vsw_send_dds_msg(ldcp, DDS_VNET_DEL_SHARE, 609678453a8Sspeer cookie, macaddr, req_id); 610cdfc78adSraghuram 611da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 612cdfc78adSraghuram if (rv == 0) { 613cdfc78adSraghuram vsharep->vs_state &= ~VSW_SHARE_DDS_ACKD; 614cdfc78adSraghuram vsharep->vs_state |= VSW_SHARE_DDS_SENT; 615cdfc78adSraghuram } 616678453a8Sspeer return (rv); 617678453a8Sspeer } 618678453a8Sspeer 619678453a8Sspeer /* 620678453a8Sspeer * vsw_send_dds_msg -- Send a DDS message. 621678453a8Sspeer */ 622678453a8Sspeer static int 623678453a8Sspeer vsw_send_dds_msg(vsw_ldc_t *ldcp, uint8_t dds_subclass, uint64_t 624678453a8Sspeer cookie, uint64_t macaddr, uint32_t req_id) 625678453a8Sspeer { 626678453a8Sspeer vsw_t *vswp = ldcp->ldc_port->p_vswp; 627678453a8Sspeer vio_dds_msg_t vmsg; 628678453a8Sspeer dds_share_msg_t *smsg = &vmsg.msg.share_msg; 629678453a8Sspeer int rv; 630678453a8Sspeer 631678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 632678453a8Sspeer vmsg.tag.vio_msgtype = VIO_TYPE_CTRL; 633678453a8Sspeer vmsg.tag.vio_subtype = VIO_SUBTYPE_INFO; 634678453a8Sspeer vmsg.tag.vio_subtype_env = VIO_DDS_INFO; 635678453a8Sspeer vmsg.tag.vio_sid = ldcp->local_session; 636678453a8Sspeer vmsg.dds_class = DDS_VNET_NIU; 637678453a8Sspeer vmsg.dds_subclass = dds_subclass; 638678453a8Sspeer vmsg.dds_req_id = req_id; 639678453a8Sspeer smsg->macaddr = macaddr; 640678453a8Sspeer smsg->cookie = cookie; 641678453a8Sspeer rv = vsw_send_msg(ldcp, &vmsg, sizeof (vmsg), B_FALSE); 642678453a8Sspeer D1(vswp, "%s:exit rv=%d\n", __func__, rv); 643678453a8Sspeer return (rv); 644678453a8Sspeer } 645678453a8Sspeer 646678453a8Sspeer /* 647678453a8Sspeer * vsw_process_dds_msg -- Process a DDS message received from a guest. 648678453a8Sspeer */ 649678453a8Sspeer void 650678453a8Sspeer vsw_process_dds_msg(vsw_t *vswp, vsw_ldc_t *ldcp, void *msg) 651678453a8Sspeer { 652678453a8Sspeer vsw_share_t *vsharep; 653678453a8Sspeer vio_dds_msg_t *dmsg = msg; 654678453a8Sspeer 655678453a8Sspeer D1(vswp, "%s:enter ldc=0x%lx\n", __func__, ldcp->ldc_id); 656678453a8Sspeer if (dmsg->dds_class != DDS_VNET_NIU) { 657678453a8Sspeer /* discard */ 658678453a8Sspeer return; 659678453a8Sspeer } 660da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 661678453a8Sspeer /* 662678453a8Sspeer * We expect to receive DDS messages only from guests that 663678453a8Sspeer * have HybridIO started. 664678453a8Sspeer */ 665678453a8Sspeer vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id); 666678453a8Sspeer if (vsharep == NULL) { 667da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 668678453a8Sspeer return; 669678453a8Sspeer } 670678453a8Sspeer 671678453a8Sspeer switch (dmsg->dds_subclass) { 672678453a8Sspeer case DDS_VNET_ADD_SHARE: 673678453a8Sspeer /* A response for ADD_SHARE message. */ 674678453a8Sspeer D1(vswp, "%s:DDS_VNET_ADD_SHARE\n", __func__); 675678453a8Sspeer if (!(vsharep->vs_state & VSW_SHARE_DDS_SENT)) { 676678453a8Sspeer DWARN(vswp, "%s: invalid ADD_SHARE response message " 677678453a8Sspeer " share state=0x%X", __func__, vsharep->vs_state); 678678453a8Sspeer break; 679678453a8Sspeer } 680678453a8Sspeer 681678453a8Sspeer if (dmsg->dds_req_id != vsharep->vs_req_id) { 682678453a8Sspeer DWARN(vswp, "%s: invalid req_id in ADD_SHARE response" 683678453a8Sspeer " message req_id=0x%X share's req_id=0x%X", 684678453a8Sspeer __func__, dmsg->dds_req_id, vsharep->vs_req_id); 685678453a8Sspeer break; 686678453a8Sspeer } 687678453a8Sspeer 688678453a8Sspeer if (dmsg->tag.vio_subtype == VIO_SUBTYPE_NACK) { 689678453a8Sspeer DWARN(vswp, "%s: NACK received for ADD_SHARE" 690678453a8Sspeer " message ldcid=0x%lx", __func__, ldcp->ldc_id); 691678453a8Sspeer /* cleanup for NACK */ 692678453a8Sspeer vsw_hio_free_share(vsharep); 693678453a8Sspeer } else { 694678453a8Sspeer D2(vswp, "%s: ACK received for ADD_SHARE", __func__); 695678453a8Sspeer vsharep->vs_state &= ~VSW_SHARE_DDS_SENT; 696678453a8Sspeer vsharep->vs_state |= VSW_SHARE_DDS_ACKD; 697678453a8Sspeer } 698678453a8Sspeer break; 699678453a8Sspeer 700678453a8Sspeer case DDS_VNET_DEL_SHARE: 701678453a8Sspeer /* A response for DEL_SHARE message */ 702678453a8Sspeer D1(vswp, "%s:DDS_VNET_DEL_SHARE\n", __func__); 703678453a8Sspeer if (!(vsharep->vs_state & VSW_SHARE_DDS_SENT)) { 704cdfc78adSraghuram DWARN(vswp, "%s: invalid DEL_SHARE response message " 705678453a8Sspeer " share state=0x%X", __func__, vsharep->vs_state); 706678453a8Sspeer break; 707678453a8Sspeer } 708678453a8Sspeer 709678453a8Sspeer if (dmsg->dds_req_id != vsharep->vs_req_id) { 710cdfc78adSraghuram DWARN(vswp, "%s: invalid req_id in DEL_SHARE response" 711678453a8Sspeer " message share req_id=0x%X share's req_id=0x%X", 712678453a8Sspeer __func__, dmsg->dds_req_id, vsharep->vs_req_id); 713678453a8Sspeer break; 714678453a8Sspeer } 715678453a8Sspeer if (dmsg->tag.vio_subtype == VIO_SUBTYPE_NACK) { 716cdfc78adSraghuram DWARN(vswp, "%s: NACK received for DEL_SHARE", 717678453a8Sspeer __func__); 718678453a8Sspeer } 719cdfc78adSraghuram 720678453a8Sspeer /* There is nothing we can do, free share now */ 721678453a8Sspeer vsw_hio_free_share(vsharep); 722678453a8Sspeer break; 723678453a8Sspeer 724678453a8Sspeer case DDS_VNET_REL_SHARE: 725678453a8Sspeer /* Guest has released Share voluntarily, so free it now */ 726678453a8Sspeer D1(vswp, "%s:DDS_VNET_REL_SHARE\n", __func__); 727678453a8Sspeer /* send ACK */ 728678453a8Sspeer (void) vsw_send_dds_resp_msg(ldcp, dmsg, B_FALSE); 729678453a8Sspeer vsw_hio_free_share(vsharep); 730678453a8Sspeer break; 731678453a8Sspeer default: 732678453a8Sspeer DERR(vswp, "%s: Invalid DDS message type=0x%X", 733678453a8Sspeer __func__, dmsg->dds_subclass); 734678453a8Sspeer break; 735678453a8Sspeer } 736da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 737678453a8Sspeer D1(vswp, "%s:exit ldc=0x%lx\n", __func__, ldcp->ldc_id); 738678453a8Sspeer } 739678453a8Sspeer 740678453a8Sspeer /* 741678453a8Sspeer * vsw_send_dds_resp_msg -- Send a DDS response message. 742678453a8Sspeer */ 743678453a8Sspeer static int 744678453a8Sspeer vsw_send_dds_resp_msg(vsw_ldc_t *ldcp, vio_dds_msg_t *dmsg, int ack) 745678453a8Sspeer { 746678453a8Sspeer vsw_t *vswp = ldcp->ldc_port->p_vswp; 747678453a8Sspeer int rv; 748678453a8Sspeer 749678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 750678453a8Sspeer if (ack == B_TRUE) { 751678453a8Sspeer dmsg->tag.vio_subtype = VIO_SUBTYPE_ACK; 752678453a8Sspeer dmsg->msg.share_resp_msg.status = DDS_VNET_SUCCESS; 753678453a8Sspeer } else { 754678453a8Sspeer dmsg->tag.vio_subtype = VIO_SUBTYPE_NACK; 755678453a8Sspeer dmsg->msg.share_resp_msg.status = DDS_VNET_FAIL; 756678453a8Sspeer } 757678453a8Sspeer rv = vsw_send_msg(ldcp, dmsg, sizeof (vio_dds_msg_t), B_FALSE); 758678453a8Sspeer D1(vswp, "%s:exit rv=%d\n", __func__, rv); 759678453a8Sspeer return (rv); 760678453a8Sspeer } 761678453a8Sspeer 762678453a8Sspeer /* 763678453a8Sspeer * vsw_hio_port_update -- update Hybrid mode change for a port. 764678453a8Sspeer */ 765678453a8Sspeer void 766678453a8Sspeer vsw_hio_port_update(vsw_port_t *portp, boolean_t hio_enabled) 767678453a8Sspeer { 768678453a8Sspeer /* Verify if the mode really changed */ 769678453a8Sspeer if (portp->p_hio_enabled == hio_enabled) { 770678453a8Sspeer return; 771678453a8Sspeer } 772678453a8Sspeer 773678453a8Sspeer if (hio_enabled == B_FALSE) { 774678453a8Sspeer /* Hybrid Mode is disabled, so stop HybridIO */ 775678453a8Sspeer vsw_hio_stop_port(portp); 776678453a8Sspeer portp->p_hio_enabled = B_FALSE; 777da14cebeSEric Cheng 778da14cebeSEric Cheng vsw_port_mac_reconfig(portp, B_FALSE, 0, NULL, 0); 779678453a8Sspeer } else { 780678453a8Sspeer portp->p_hio_enabled = B_TRUE; 781da14cebeSEric Cheng vsw_port_mac_reconfig(portp, B_FALSE, 0, NULL, 0); 782da14cebeSEric Cheng 783678453a8Sspeer /* reset the port to initiate HybridIO setup */ 784cdfc78adSraghuram vsw_hio_port_reset(portp, B_FALSE); 785678453a8Sspeer } 786678453a8Sspeer } 787678453a8Sspeer 788678453a8Sspeer /* 789678453a8Sspeer * vsw_hio_stop_port -- Stop HybridIO for a given port. Sequence 790cdfc78adSraghuram * followed is similar to vsw_hio_free_all_shares(). 791678453a8Sspeer * 792678453a8Sspeer */ 793678453a8Sspeer void 794678453a8Sspeer vsw_hio_stop_port(vsw_port_t *portp) 795678453a8Sspeer { 796678453a8Sspeer vsw_t *vswp = portp->p_vswp; 797678453a8Sspeer vsw_share_t *vsharep; 798678453a8Sspeer int max_retries = vsw_hio_max_cleanup_retries; 799678453a8Sspeer 800678453a8Sspeer D1(vswp, "%s:enter\n", __func__); 801da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 802678453a8Sspeer 803678453a8Sspeer if (vswp->hio_capable == B_FALSE) { 804da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 805678453a8Sspeer return; 806678453a8Sspeer } 807678453a8Sspeer 808678453a8Sspeer vsharep = vsw_hio_find_vshare_port(vswp, portp); 809678453a8Sspeer if (vsharep == NULL) { 810da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 811678453a8Sspeer return; 812678453a8Sspeer } 813678453a8Sspeer 814678453a8Sspeer do { 815678453a8Sspeer if (vsharep->vs_state & VSW_SHARE_DDS_ACKD) { 816678453a8Sspeer int rv; 817678453a8Sspeer 818678453a8Sspeer /* send DDS_DEL_SHARE */ 819678453a8Sspeer D1(vswp, "%s:sending DEL_SHARE msg for " 820678453a8Sspeer "share(%d)", __func__, vsharep->vs_index); 821678453a8Sspeer rv = vsw_hio_send_delshare_msg(vsharep); 822678453a8Sspeer if (rv != 0) { 823678453a8Sspeer /* 824678453a8Sspeer * Cause a port reset to trigger 825678453a8Sspeer * cleanup. 826678453a8Sspeer */ 827cdfc78adSraghuram vsw_hio_port_reset(vsharep->vs_portp, B_FALSE); 828678453a8Sspeer } 829678453a8Sspeer } 830678453a8Sspeer if (max_retries == 1) { 831678453a8Sspeer /* last retry */ 832678453a8Sspeer DWARN(vswp, "%s:All retries failed, " 833678453a8Sspeer " cause a reset to trigger cleanup for " 834678453a8Sspeer "share(%d)", __func__, vsharep->vs_index); 835cdfc78adSraghuram vsw_hio_port_reset(vsharep->vs_portp, B_FALSE); 836678453a8Sspeer } 837678453a8Sspeer 838678453a8Sspeer /* Check if the share still assigned to this port */ 839678453a8Sspeer if ((vsharep->vs_portp != portp) || 840678453a8Sspeer (vsharep->vs_state == VSW_SHARE_FREE)) { 841678453a8Sspeer break; 842678453a8Sspeer } 843678453a8Sspeer 844678453a8Sspeer /* 845678453a8Sspeer * Release the lock so that reply for DEL_SHARE 846678453a8Sspeer * messages come and get processed, that is, shares 847678453a8Sspeer * get freed. 848678453a8Sspeer */ 849da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 850d0288fccSRaghuram Kothakota drv_usecwait(vsw_hio_cleanup_delay); 851da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 852678453a8Sspeer 853678453a8Sspeer /* Check if the share still assigned to this port */ 854678453a8Sspeer if ((vsharep->vs_portp != portp) || 855678453a8Sspeer (vsharep->vs_state == VSW_SHARE_FREE)) { 856678453a8Sspeer break; 857678453a8Sspeer } 858678453a8Sspeer max_retries--; 859678453a8Sspeer } while ((vsharep->vs_state != VSW_SHARE_FREE) && (max_retries > 0)); 860678453a8Sspeer 861da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 862678453a8Sspeer D1(vswp, "%s:exit\n", __func__); 863678453a8Sspeer } 864cdfc78adSraghuram 865cdfc78adSraghuram /* 866cdfc78adSraghuram * vsw_hio_rest_all -- Resets all ports that have shares allocated. 867cdfc78adSraghuram * It is called only in the panic code path, so the LDC channels 868cdfc78adSraghuram * are reset immediately. 869cdfc78adSraghuram */ 870cdfc78adSraghuram static void 871cdfc78adSraghuram vsw_hio_reset_all(vsw_t *vswp) 872cdfc78adSraghuram { 873cdfc78adSraghuram vsw_hio_t *hiop = &vswp->vhio; 874cdfc78adSraghuram vsw_share_t *vsharep; 875cdfc78adSraghuram int i; 876cdfc78adSraghuram 877cdfc78adSraghuram D1(vswp, "%s:enter\n", __func__); 878cdfc78adSraghuram 879cdfc78adSraghuram if (vswp->hio_capable != B_TRUE) 880cdfc78adSraghuram return; 881cdfc78adSraghuram 882cdfc78adSraghuram for (i = 0; i < hiop->vh_num_shares; i++) { 883cdfc78adSraghuram vsharep = &hiop->vh_shares[i]; 884cdfc78adSraghuram if (vsharep->vs_state == VSW_SHARE_FREE) { 885cdfc78adSraghuram continue; 886cdfc78adSraghuram } 887cdfc78adSraghuram /* 888cdfc78adSraghuram * Reset the port with immediate flag enabled, 889cdfc78adSraghuram * to cause LDC reset immediately. 890cdfc78adSraghuram */ 891cdfc78adSraghuram vsw_hio_port_reset(vsharep->vs_portp, B_TRUE); 892cdfc78adSraghuram } 893cdfc78adSraghuram D1(vswp, "%s:exit\n", __func__); 894cdfc78adSraghuram } 895cdfc78adSraghuram 896cdfc78adSraghuram /* 897cdfc78adSraghuram * vsw_hio_reboot_callb -- Called for reboot event. It tries to 898cdfc78adSraghuram * free all currently allocated shares. 899cdfc78adSraghuram */ 900cdfc78adSraghuram /* ARGSUSED */ 901cdfc78adSraghuram static boolean_t 902cdfc78adSraghuram vsw_hio_reboot_callb(void *arg, int code) 903cdfc78adSraghuram { 904cdfc78adSraghuram vsw_t *vswp = arg; 905cdfc78adSraghuram 906cdfc78adSraghuram D1(vswp, "%s:enter\n", __func__); 907cdfc78adSraghuram vsw_hio_free_all_shares(vswp, B_TRUE); 908cdfc78adSraghuram D1(vswp, "%s:exit\n", __func__); 909cdfc78adSraghuram return (B_TRUE); 910cdfc78adSraghuram } 911cdfc78adSraghuram 912cdfc78adSraghuram /* 913cdfc78adSraghuram * vsw_hio_panic_callb -- Called from panic event. It resets all 914cdfc78adSraghuram * the ports that have shares allocated. This is done to 915cdfc78adSraghuram * trigger the cleanup in the guest ahead of HV reset. 916cdfc78adSraghuram */ 917cdfc78adSraghuram /* ARGSUSED */ 918cdfc78adSraghuram static boolean_t 919cdfc78adSraghuram vsw_hio_panic_callb(void *arg, int code) 920cdfc78adSraghuram { 921cdfc78adSraghuram vsw_t *vswp = arg; 922cdfc78adSraghuram 923cdfc78adSraghuram D1(vswp, "%s:enter\n", __func__); 924cdfc78adSraghuram vsw_hio_reset_all(vswp); 925cdfc78adSraghuram D1(vswp, "%s:exit\n", __func__); 926cdfc78adSraghuram return (B_TRUE); 927cdfc78adSraghuram } 9286ab6cb20SWENTAO YANG 9296ab6cb20SWENTAO YANG /* 9306ab6cb20SWENTAO YANG * Setup kstats for hio statistics. 9316ab6cb20SWENTAO YANG */ 9326ab6cb20SWENTAO YANG static kstat_t * 9336ab6cb20SWENTAO YANG vsw_hio_setup_kstats(char *ks_mod, char *ks_name, vsw_t *vswp) 9346ab6cb20SWENTAO YANG { 9356ab6cb20SWENTAO YANG kstat_t *ksp; 9366ab6cb20SWENTAO YANG vsw_hio_kstats_t *hiokp; 9376ab6cb20SWENTAO YANG vsw_hio_t *hiop; 9386ab6cb20SWENTAO YANG char share_assigned_info[MAXNAMELEN]; 9396ab6cb20SWENTAO YANG size_t size; 9406ab6cb20SWENTAO YANG int i; 9416ab6cb20SWENTAO YANG 9426ab6cb20SWENTAO YANG hiop = &vswp->vhio; 9436ab6cb20SWENTAO YANG /* 9446ab6cb20SWENTAO YANG * vsw_hio_stats_t structure is variable size structure 9456ab6cb20SWENTAO YANG * having fields defined only for one share. So, we need 9466ab6cb20SWENTAO YANG * allocate additional space for the rest of the shares. 9476ab6cb20SWENTAO YANG */ 9486ab6cb20SWENTAO YANG size = sizeof (vsw_hio_kstats_t) / sizeof (kstat_named_t); 9496ab6cb20SWENTAO YANG ASSERT(hiop->vh_num_shares >= 1); 9506ab6cb20SWENTAO YANG size += ((hiop->vh_num_shares - 1) * 2); 9516ab6cb20SWENTAO YANG 9526ab6cb20SWENTAO YANG ksp = kstat_create(ks_mod, vswp->instance, ks_name, "misc", 9536ab6cb20SWENTAO YANG KSTAT_TYPE_NAMED, size, KSTAT_FLAG_VIRTUAL); 9546ab6cb20SWENTAO YANG 9556ab6cb20SWENTAO YANG if (ksp == NULL) { 9566ab6cb20SWENTAO YANG return (NULL); 9576ab6cb20SWENTAO YANG } 9586ab6cb20SWENTAO YANG hiokp = (vsw_hio_kstats_t *)kmem_zalloc(sizeof (kstat_named_t) * 9596ab6cb20SWENTAO YANG size, KM_SLEEP); 9606ab6cb20SWENTAO YANG ksp->ks_data = hiokp; 9616ab6cb20SWENTAO YANG 9626ab6cb20SWENTAO YANG hiop->vh_ksp = ksp; 9636ab6cb20SWENTAO YANG hiop->vh_kstatsp = hiokp; 9646ab6cb20SWENTAO YANG hiop->vh_kstat_size = size; 9656ab6cb20SWENTAO YANG 9666ab6cb20SWENTAO YANG kstat_named_init(&hiokp->hio_capable, "hio_capable", KSTAT_DATA_CHAR); 9676ab6cb20SWENTAO YANG kstat_named_init(&hiokp->hio_num_shares, "hio_num_shares", 9686ab6cb20SWENTAO YANG KSTAT_DATA_ULONG); 9696ab6cb20SWENTAO YANG 9706ab6cb20SWENTAO YANG for (i = 0; i < hiop->vh_num_shares; i++) { 9716ab6cb20SWENTAO YANG (void) sprintf(share_assigned_info, "%s%d", "hio_share_", i); 9726ab6cb20SWENTAO YANG kstat_named_init(&(hiokp->share[i].assigned), 9736ab6cb20SWENTAO YANG share_assigned_info, KSTAT_DATA_ULONG); 9746ab6cb20SWENTAO YANG 9756ab6cb20SWENTAO YANG (void) sprintf(share_assigned_info, "%s%d%s", 9766ab6cb20SWENTAO YANG "hio_share_", i, "_state"); 9776ab6cb20SWENTAO YANG kstat_named_init(&(hiokp->share[i].state), 9786ab6cb20SWENTAO YANG share_assigned_info, KSTAT_DATA_ULONG); 9796ab6cb20SWENTAO YANG } 9806ab6cb20SWENTAO YANG 9816ab6cb20SWENTAO YANG ksp->ks_update = vsw_hio_kstats_update; 9826ab6cb20SWENTAO YANG ksp->ks_private = (void *)vswp; 9836ab6cb20SWENTAO YANG kstat_install(ksp); 9846ab6cb20SWENTAO YANG return (ksp); 9856ab6cb20SWENTAO YANG } 9866ab6cb20SWENTAO YANG 9876ab6cb20SWENTAO YANG /* 9886ab6cb20SWENTAO YANG * Destroy hio kstats. 9896ab6cb20SWENTAO YANG */ 9906ab6cb20SWENTAO YANG static void 9916ab6cb20SWENTAO YANG vsw_hio_destroy_kstats(vsw_t *vswp) 9926ab6cb20SWENTAO YANG { 9936ab6cb20SWENTAO YANG kstat_t *ksp; 9946ab6cb20SWENTAO YANG vsw_hio_t *hiop; 9956ab6cb20SWENTAO YANG 9966ab6cb20SWENTAO YANG ASSERT(vswp != NULL); 9976ab6cb20SWENTAO YANG 9986ab6cb20SWENTAO YANG ksp = vswp->vhio.vh_ksp; 9996ab6cb20SWENTAO YANG hiop = &vswp->vhio; 10006ab6cb20SWENTAO YANG if (ksp != NULL) { 10016ab6cb20SWENTAO YANG kmem_free(hiop->vh_kstatsp, sizeof (kstat_named_t) * 10026ab6cb20SWENTAO YANG hiop->vh_kstat_size); 10036ab6cb20SWENTAO YANG kstat_delete(ksp); 10046ab6cb20SWENTAO YANG hiop->vh_kstatsp = NULL; 10056ab6cb20SWENTAO YANG hiop->vh_ksp = NULL; 10066ab6cb20SWENTAO YANG } 10076ab6cb20SWENTAO YANG } 10086ab6cb20SWENTAO YANG 10096ab6cb20SWENTAO YANG /* 10106ab6cb20SWENTAO YANG * Update hio kstats. 10116ab6cb20SWENTAO YANG */ 10126ab6cb20SWENTAO YANG static int 10136ab6cb20SWENTAO YANG vsw_hio_kstats_update(kstat_t *ksp, int rw) 10146ab6cb20SWENTAO YANG { 10156ab6cb20SWENTAO YANG vsw_t *vswp; 10166ab6cb20SWENTAO YANG vsw_hio_t *hiop; 10176ab6cb20SWENTAO YANG vsw_hio_kstats_t *hiokp; 10186ab6cb20SWENTAO YANG int i; 10196ab6cb20SWENTAO YANG 10206ab6cb20SWENTAO YANG vswp = (vsw_t *)ksp->ks_private; 10216ab6cb20SWENTAO YANG ASSERT(vswp != NULL); 10226ab6cb20SWENTAO YANG 10236ab6cb20SWENTAO YANG hiop = &vswp->vhio; 10246ab6cb20SWENTAO YANG hiokp = hiop->vh_kstatsp; 10256ab6cb20SWENTAO YANG 10266ab6cb20SWENTAO YANG if (rw == KSTAT_READ) { 10276ab6cb20SWENTAO YANG if (vswp->hio_capable) { 10286ab6cb20SWENTAO YANG (void) strcpy(hiokp->hio_capable.value.c, "Yes"); 10296ab6cb20SWENTAO YANG } else { 10306ab6cb20SWENTAO YANG /* not hio capable, just return */ 10316ab6cb20SWENTAO YANG (void) strcpy(hiokp->hio_capable.value.c, "No"); 10326ab6cb20SWENTAO YANG return (0); 10336ab6cb20SWENTAO YANG } 10346ab6cb20SWENTAO YANG 1035da14cebeSEric Cheng mutex_enter(&vswp->mac_lock); 10366ab6cb20SWENTAO YANG hiokp->hio_num_shares.value.ul = (uint32_t)hiop->vh_num_shares; 10376ab6cb20SWENTAO YANG for (i = 0; i < hiop->vh_num_shares; i++) { 10386ab6cb20SWENTAO YANG hiokp->share[i].assigned.value.ul = 10396ab6cb20SWENTAO YANG hiop->vh_shares[i].vs_macaddr; 10406ab6cb20SWENTAO YANG hiokp->share[i].state.value.ul = 10416ab6cb20SWENTAO YANG hiop->vh_shares[i].vs_state; 10426ab6cb20SWENTAO YANG } 1043da14cebeSEric Cheng mutex_exit(&vswp->mac_lock); 10446ab6cb20SWENTAO YANG } else { 10456ab6cb20SWENTAO YANG return (EACCES); 10466ab6cb20SWENTAO YANG } 10476ab6cb20SWENTAO YANG 10486ab6cb20SWENTAO YANG return (0); 10496ab6cb20SWENTAO YANG } 1050