xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision b6cd053e6da9bb8f77d2c6069260e52bbd53fa4a)
18eb6488eSEric Joyner /******************************************************************************
28eb6488eSEric Joyner 
38eb6488eSEric Joyner   Copyright (c) 2001-2017, Intel Corporation
48eb6488eSEric Joyner   All rights reserved.
58eb6488eSEric Joyner 
68eb6488eSEric Joyner   Redistribution and use in source and binary forms, with or without
78eb6488eSEric Joyner   modification, are permitted provided that the following conditions are met:
88eb6488eSEric Joyner 
98eb6488eSEric Joyner    1. Redistributions of source code must retain the above copyright notice,
108eb6488eSEric Joyner       this list of conditions and the following disclaimer.
118eb6488eSEric Joyner 
128eb6488eSEric Joyner    2. Redistributions in binary form must reproduce the above copyright
138eb6488eSEric Joyner       notice, this list of conditions and the following disclaimer in the
148eb6488eSEric Joyner       documentation and/or other materials provided with the distribution.
158eb6488eSEric Joyner 
168eb6488eSEric Joyner    3. Neither the name of the Intel Corporation nor the names of its
178eb6488eSEric Joyner       contributors may be used to endorse or promote products derived from
188eb6488eSEric Joyner       this software without specific prior written permission.
198eb6488eSEric Joyner 
208eb6488eSEric Joyner   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
218eb6488eSEric Joyner   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
228eb6488eSEric Joyner   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
238eb6488eSEric Joyner   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
248eb6488eSEric Joyner   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
258eb6488eSEric Joyner   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
268eb6488eSEric Joyner   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
278eb6488eSEric Joyner   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
288eb6488eSEric Joyner   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
298eb6488eSEric Joyner   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
308eb6488eSEric Joyner   POSSIBILITY OF SUCH DAMAGE.
318eb6488eSEric Joyner 
328eb6488eSEric Joyner ******************************************************************************/
338eb6488eSEric Joyner 
348eb6488eSEric Joyner #include "ixgbe.h"
35c19c7afeSEric Joyner #include "ixgbe_sriov.h"
368eb6488eSEric Joyner 
378eb6488eSEric Joyner #ifdef PCI_IOV
388eb6488eSEric Joyner 
39daec9284SConrad Meyer #include <sys/ktr.h>
40daec9284SConrad Meyer 
418eb6488eSEric Joyner MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
428eb6488eSEric Joyner 
438eb6488eSEric Joyner /************************************************************************
448eb6488eSEric Joyner  * ixgbe_pci_iov_detach
458eb6488eSEric Joyner  ************************************************************************/
468eb6488eSEric Joyner int
478eb6488eSEric Joyner ixgbe_pci_iov_detach(device_t dev)
488eb6488eSEric Joyner {
498eb6488eSEric Joyner 	return pci_iov_detach(dev);
508eb6488eSEric Joyner }
518eb6488eSEric Joyner 
528eb6488eSEric Joyner /************************************************************************
538eb6488eSEric Joyner  * ixgbe_define_iov_schemas
548eb6488eSEric Joyner  ************************************************************************/
558eb6488eSEric Joyner void
568eb6488eSEric Joyner ixgbe_define_iov_schemas(device_t dev, int *error)
578eb6488eSEric Joyner {
588eb6488eSEric Joyner 	nvlist_t *pf_schema, *vf_schema;
598eb6488eSEric Joyner 
608eb6488eSEric Joyner 	pf_schema = pci_iov_schema_alloc_node();
618eb6488eSEric Joyner 	vf_schema = pci_iov_schema_alloc_node();
628eb6488eSEric Joyner 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
638eb6488eSEric Joyner 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
6479b36ec9SKevin Bowling 	    IOV_SCHEMA_HASDEFAULT, true);
658eb6488eSEric Joyner 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
6679b36ec9SKevin Bowling 	    IOV_SCHEMA_HASDEFAULT, false);
678eb6488eSEric Joyner 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
6879b36ec9SKevin Bowling 	    IOV_SCHEMA_HASDEFAULT, false);
698eb6488eSEric Joyner 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
708eb6488eSEric Joyner 	if (*error != 0) {
718eb6488eSEric Joyner 		device_printf(dev,
728eb6488eSEric Joyner 		    "Error %d setting up SR-IOV\n", *error);
738eb6488eSEric Joyner 	}
748eb6488eSEric Joyner } /* ixgbe_define_iov_schemas */
758eb6488eSEric Joyner 
768eb6488eSEric Joyner /************************************************************************
778eb6488eSEric Joyner  * ixgbe_align_all_queue_indices
788eb6488eSEric Joyner  ************************************************************************/
798eb6488eSEric Joyner inline void
80b1d5caf3SKevin Bowling ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
818eb6488eSEric Joyner {
828eb6488eSEric Joyner 	int i;
838eb6488eSEric Joyner 	int index;
848eb6488eSEric Joyner 
85b1d5caf3SKevin Bowling 	for (i = 0; i < sc->num_rx_queues; i++) {
86b1d5caf3SKevin Bowling 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
87b1d5caf3SKevin Bowling 		sc->rx_queues[i].rxr.me = index;
88c19c7afeSEric Joyner 	}
89c19c7afeSEric Joyner 
90b1d5caf3SKevin Bowling 	for (i = 0; i < sc->num_tx_queues; i++) {
91b1d5caf3SKevin Bowling 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
92b1d5caf3SKevin Bowling 		sc->tx_queues[i].txr.me = index;
938eb6488eSEric Joyner 	}
948eb6488eSEric Joyner }
958eb6488eSEric Joyner 
968eb6488eSEric Joyner /* Support functions for SR-IOV/VF management */
978eb6488eSEric Joyner static inline void
98*b6cd053eSKevin Bowling ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
998eb6488eSEric Joyner {
1008eb6488eSEric Joyner 	if (vf->flags & IXGBE_VF_CTS)
1018eb6488eSEric Joyner 		msg |= IXGBE_VT_MSGTYPE_CTS;
1028eb6488eSEric Joyner 
103*b6cd053eSKevin Bowling 	hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
1048eb6488eSEric Joyner }
1058eb6488eSEric Joyner 
1068eb6488eSEric Joyner static inline void
10736c516b3SKevin Bowling ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
1088eb6488eSEric Joyner {
1098eb6488eSEric Joyner 	msg &= IXGBE_VT_MSG_MASK;
110*b6cd053eSKevin Bowling 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
1118eb6488eSEric Joyner }
1128eb6488eSEric Joyner 
1138eb6488eSEric Joyner static inline void
11436c516b3SKevin Bowling ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
1158eb6488eSEric Joyner {
1168eb6488eSEric Joyner 	msg &= IXGBE_VT_MSG_MASK;
117*b6cd053eSKevin Bowling 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
1188eb6488eSEric Joyner }
1198eb6488eSEric Joyner 
1208eb6488eSEric Joyner static inline void
121b1d5caf3SKevin Bowling ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
1228eb6488eSEric Joyner {
1238eb6488eSEric Joyner 	if (!(vf->flags & IXGBE_VF_CTS))
12436c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, 0);
1258eb6488eSEric Joyner }
1268eb6488eSEric Joyner 
1278eb6488eSEric Joyner static inline boolean_t
1288eb6488eSEric Joyner ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
1298eb6488eSEric Joyner {
1308eb6488eSEric Joyner 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
1318eb6488eSEric Joyner }
1328eb6488eSEric Joyner 
1338eb6488eSEric Joyner static inline int
1348eb6488eSEric Joyner ixgbe_vf_queues(int mode)
1358eb6488eSEric Joyner {
1368eb6488eSEric Joyner 	switch (mode) {
1378eb6488eSEric Joyner 	case IXGBE_64_VM:
1388eb6488eSEric Joyner 		return (2);
1398eb6488eSEric Joyner 	case IXGBE_32_VM:
1408eb6488eSEric Joyner 		return (4);
1418eb6488eSEric Joyner 	case IXGBE_NO_VM:
1428eb6488eSEric Joyner 	default:
1438eb6488eSEric Joyner 		return (0);
1448eb6488eSEric Joyner 	}
1458eb6488eSEric Joyner }
1468eb6488eSEric Joyner 
1478eb6488eSEric Joyner inline int
1488eb6488eSEric Joyner ixgbe_vf_que_index(int mode, int vfnum, int num)
1498eb6488eSEric Joyner {
1508eb6488eSEric Joyner 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
1518eb6488eSEric Joyner }
1528eb6488eSEric Joyner 
1538eb6488eSEric Joyner static inline void
154b1d5caf3SKevin Bowling ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
1558eb6488eSEric Joyner {
156b1d5caf3SKevin Bowling 	if (sc->max_frame_size < max_frame)
157b1d5caf3SKevin Bowling 		sc->max_frame_size = max_frame;
1588eb6488eSEric Joyner }
1598eb6488eSEric Joyner 
1608eb6488eSEric Joyner inline u32
1618eb6488eSEric Joyner ixgbe_get_mrqc(int iov_mode)
1628eb6488eSEric Joyner {
1638eb6488eSEric Joyner 	u32 mrqc;
1648eb6488eSEric Joyner 
1658eb6488eSEric Joyner 	switch (iov_mode) {
1668eb6488eSEric Joyner 	case IXGBE_64_VM:
1678eb6488eSEric Joyner 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
1688eb6488eSEric Joyner 		break;
1698eb6488eSEric Joyner 	case IXGBE_32_VM:
1708eb6488eSEric Joyner 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
1718eb6488eSEric Joyner 		break;
1728eb6488eSEric Joyner 	case IXGBE_NO_VM:
1738eb6488eSEric Joyner 		mrqc = 0;
1748eb6488eSEric Joyner 		break;
1758eb6488eSEric Joyner 	default:
1768eb6488eSEric Joyner 		panic("Unexpected SR-IOV mode %d", iov_mode);
1778eb6488eSEric Joyner 	}
1788eb6488eSEric Joyner 
1798eb6488eSEric Joyner 	return mrqc;
1808eb6488eSEric Joyner }
1818eb6488eSEric Joyner 
1828eb6488eSEric Joyner 
1838eb6488eSEric Joyner inline u32
1848eb6488eSEric Joyner ixgbe_get_mtqc(int iov_mode)
1858eb6488eSEric Joyner {
1868eb6488eSEric Joyner 	uint32_t mtqc;
1878eb6488eSEric Joyner 
1888eb6488eSEric Joyner 	switch (iov_mode) {
1898eb6488eSEric Joyner 	case IXGBE_64_VM:
1908eb6488eSEric Joyner 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
1918eb6488eSEric Joyner 		break;
1928eb6488eSEric Joyner 	case IXGBE_32_VM:
1938eb6488eSEric Joyner 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
1948eb6488eSEric Joyner 		break;
1958eb6488eSEric Joyner 	case IXGBE_NO_VM:
1968eb6488eSEric Joyner 		mtqc = IXGBE_MTQC_64Q_1PB;
1978eb6488eSEric Joyner 		break;
1988eb6488eSEric Joyner 	default:
1998eb6488eSEric Joyner 		panic("Unexpected SR-IOV mode %d", iov_mode);
2008eb6488eSEric Joyner 	}
2018eb6488eSEric Joyner 
2028eb6488eSEric Joyner 	return mtqc;
2038eb6488eSEric Joyner }
2048eb6488eSEric Joyner 
2058eb6488eSEric Joyner void
206b1d5caf3SKevin Bowling ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
2078eb6488eSEric Joyner {
2088eb6488eSEric Joyner 	struct ixgbe_vf *vf;
2098eb6488eSEric Joyner 
210b1d5caf3SKevin Bowling 	for (int i = 0; i < sc->num_vfs; i++) {
211b1d5caf3SKevin Bowling 		vf = &sc->vfs[i];
2128eb6488eSEric Joyner 		if (vf->flags & IXGBE_VF_ACTIVE)
213*b6cd053eSKevin Bowling 			ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
2148eb6488eSEric Joyner 	}
2158eb6488eSEric Joyner } /* ixgbe_ping_all_vfs */
2168eb6488eSEric Joyner 
2178eb6488eSEric Joyner 
2188eb6488eSEric Joyner static void
219b1d5caf3SKevin Bowling ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
2208eb6488eSEric Joyner                           uint16_t tag)
2218eb6488eSEric Joyner {
2228eb6488eSEric Joyner 	struct ixgbe_hw *hw;
2238eb6488eSEric Joyner 	uint32_t vmolr, vmvir;
2248eb6488eSEric Joyner 
225b1d5caf3SKevin Bowling 	hw = &sc->hw;
2268eb6488eSEric Joyner 
2278eb6488eSEric Joyner 	vf->vlan_tag = tag;
2288eb6488eSEric Joyner 
2298eb6488eSEric Joyner 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
2308eb6488eSEric Joyner 
2318eb6488eSEric Joyner 	/* Do not receive packets that pass inexact filters. */
2328eb6488eSEric Joyner 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
2338eb6488eSEric Joyner 
2348eb6488eSEric Joyner 	/* Disable Multicast Promicuous Mode. */
2358eb6488eSEric Joyner 	vmolr &= ~IXGBE_VMOLR_MPE;
2368eb6488eSEric Joyner 
2378eb6488eSEric Joyner 	/* Accept broadcasts. */
2388eb6488eSEric Joyner 	vmolr |= IXGBE_VMOLR_BAM;
2398eb6488eSEric Joyner 
2408eb6488eSEric Joyner 	if (tag == 0) {
2418eb6488eSEric Joyner 		/* Accept non-vlan tagged traffic. */
242c19c7afeSEric Joyner 		vmolr |= IXGBE_VMOLR_AUPE;
2438eb6488eSEric Joyner 
2448eb6488eSEric Joyner 		/* Allow VM to tag outgoing traffic; no default tag. */
2458eb6488eSEric Joyner 		vmvir = 0;
2468eb6488eSEric Joyner 	} else {
2478eb6488eSEric Joyner 		/* Require vlan-tagged traffic. */
2488eb6488eSEric Joyner 		vmolr &= ~IXGBE_VMOLR_AUPE;
2498eb6488eSEric Joyner 
2508eb6488eSEric Joyner 		/* Tag all traffic with provided vlan tag. */
2518eb6488eSEric Joyner 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
2528eb6488eSEric Joyner 	}
2538eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
2548eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
2558eb6488eSEric Joyner } /* ixgbe_vf_set_default_vlan */
2568eb6488eSEric Joyner 
257*b6cd053eSKevin Bowling static void
258*b6cd053eSKevin Bowling ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259*b6cd053eSKevin Bowling {
260*b6cd053eSKevin Bowling 	struct ixgbe_hw *hw = &sc->hw;
261*b6cd053eSKevin Bowling 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262*b6cd053eSKevin Bowling 	uint16_t mbx_size = hw->mbx.size;
263*b6cd053eSKevin Bowling 	uint16_t i;
264*b6cd053eSKevin Bowling 
265*b6cd053eSKevin Bowling 	for (i = 0; i < mbx_size; ++i)
266*b6cd053eSKevin Bowling 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
267*b6cd053eSKevin Bowling } /* ixgbe_clear_vfmbmem */
2688eb6488eSEric Joyner 
2698eb6488eSEric Joyner static boolean_t
270b1d5caf3SKevin Bowling ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
2718eb6488eSEric Joyner {
2728eb6488eSEric Joyner 
2738eb6488eSEric Joyner 	/*
2748eb6488eSEric Joyner 	 * Frame size compatibility between PF and VF is only a problem on
2758eb6488eSEric Joyner 	 * 82599-based cards.  X540 and later support any combination of jumbo
2768eb6488eSEric Joyner 	 * frames on PFs and VFs.
2778eb6488eSEric Joyner 	 */
278b1d5caf3SKevin Bowling 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
27979b36ec9SKevin Bowling 		return (true);
2808eb6488eSEric Joyner 
2818eb6488eSEric Joyner 	switch (vf->api_ver) {
2828eb6488eSEric Joyner 	case IXGBE_API_VER_1_0:
2838eb6488eSEric Joyner 	case IXGBE_API_VER_UNKNOWN:
2848eb6488eSEric Joyner 		/*
2858eb6488eSEric Joyner 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
2868eb6488eSEric Joyner 		 * frames on either the PF or the VF.
2878eb6488eSEric Joyner 		 */
288b1d5caf3SKevin Bowling 		if (sc->max_frame_size > ETHER_MAX_LEN ||
289c19c7afeSEric Joyner 		    vf->maximum_frame_size > ETHER_MAX_LEN)
29079b36ec9SKevin Bowling 			return (false);
2918eb6488eSEric Joyner 
29279b36ec9SKevin Bowling 		return (true);
2938eb6488eSEric Joyner 
2948eb6488eSEric Joyner 		break;
2958eb6488eSEric Joyner 	case IXGBE_API_VER_1_1:
2968eb6488eSEric Joyner 	default:
2978eb6488eSEric Joyner 		/*
2988eb6488eSEric Joyner 		 * 1.1 or later VF versions always work if they aren't using
2998eb6488eSEric Joyner 		 * jumbo frames.
3008eb6488eSEric Joyner 		 */
301c19c7afeSEric Joyner 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
30279b36ec9SKevin Bowling 			return (true);
3038eb6488eSEric Joyner 
3048eb6488eSEric Joyner 		/*
3058eb6488eSEric Joyner 		 * Jumbo frames only work with VFs if the PF is also using jumbo
3068eb6488eSEric Joyner 		 * frames.
3078eb6488eSEric Joyner 		 */
308b1d5caf3SKevin Bowling 		if (sc->max_frame_size <= ETHER_MAX_LEN)
30979b36ec9SKevin Bowling 			return (true);
3108eb6488eSEric Joyner 
31179b36ec9SKevin Bowling 		return (false);
3128eb6488eSEric Joyner 	}
3138eb6488eSEric Joyner } /* ixgbe_vf_frame_size_compatible */
3148eb6488eSEric Joyner 
3158eb6488eSEric Joyner 
3168eb6488eSEric Joyner static void
317b1d5caf3SKevin Bowling ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
3188eb6488eSEric Joyner {
319b1d5caf3SKevin Bowling 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
3208eb6488eSEric Joyner 
3218eb6488eSEric Joyner 	// XXX clear multicast addresses
3228eb6488eSEric Joyner 
323b1d5caf3SKevin Bowling 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
324*b6cd053eSKevin Bowling 	ixgbe_clear_vfmbmem(sc, vf);
325*b6cd053eSKevin Bowling 	ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
3268eb6488eSEric Joyner 
3278eb6488eSEric Joyner 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
3288eb6488eSEric Joyner } /* ixgbe_process_vf_reset */
3298eb6488eSEric Joyner 
3308eb6488eSEric Joyner 
3318eb6488eSEric Joyner static void
332b1d5caf3SKevin Bowling ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
3338eb6488eSEric Joyner {
3348eb6488eSEric Joyner 	struct ixgbe_hw *hw;
3358eb6488eSEric Joyner 	uint32_t vf_index, vfte;
3368eb6488eSEric Joyner 
337b1d5caf3SKevin Bowling 	hw = &sc->hw;
3388eb6488eSEric Joyner 
3398eb6488eSEric Joyner 	vf_index = IXGBE_VF_INDEX(vf->pool);
3408eb6488eSEric Joyner 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
3418eb6488eSEric Joyner 	vfte |= IXGBE_VF_BIT(vf->pool);
3428eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
3438eb6488eSEric Joyner } /* ixgbe_vf_enable_transmit */
3448eb6488eSEric Joyner 
3458eb6488eSEric Joyner 
3468eb6488eSEric Joyner static void
347b1d5caf3SKevin Bowling ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
3488eb6488eSEric Joyner {
3498eb6488eSEric Joyner 	struct ixgbe_hw *hw;
3508eb6488eSEric Joyner 	uint32_t vf_index, vfre;
3518eb6488eSEric Joyner 
352b1d5caf3SKevin Bowling 	hw = &sc->hw;
3538eb6488eSEric Joyner 
3548eb6488eSEric Joyner 	vf_index = IXGBE_VF_INDEX(vf->pool);
3558eb6488eSEric Joyner 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
356b1d5caf3SKevin Bowling 	if (ixgbe_vf_frame_size_compatible(sc, vf))
3578eb6488eSEric Joyner 		vfre |= IXGBE_VF_BIT(vf->pool);
3588eb6488eSEric Joyner 	else
3598eb6488eSEric Joyner 		vfre &= ~IXGBE_VF_BIT(vf->pool);
3608eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
3618eb6488eSEric Joyner } /* ixgbe_vf_enable_receive */
3628eb6488eSEric Joyner 
3638eb6488eSEric Joyner 
3648eb6488eSEric Joyner static void
365b1d5caf3SKevin Bowling ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
3668eb6488eSEric Joyner {
3678eb6488eSEric Joyner 	struct ixgbe_hw *hw;
3688eb6488eSEric Joyner 	uint32_t ack;
3698eb6488eSEric Joyner 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
3708eb6488eSEric Joyner 
371b1d5caf3SKevin Bowling 	hw = &sc->hw;
3728eb6488eSEric Joyner 
373b1d5caf3SKevin Bowling 	ixgbe_process_vf_reset(sc, vf);
3748eb6488eSEric Joyner 
3758eb6488eSEric Joyner 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
376b1d5caf3SKevin Bowling 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
37779b36ec9SKevin Bowling 		    vf->pool, true);
37810746040SJakub Chylkowski 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
3798eb6488eSEric Joyner 	} else
38010746040SJakub Chylkowski 		ack = IXGBE_VT_MSGTYPE_FAILURE;
3818eb6488eSEric Joyner 
382b1d5caf3SKevin Bowling 	ixgbe_vf_enable_transmit(sc, vf);
383b1d5caf3SKevin Bowling 	ixgbe_vf_enable_receive(sc, vf);
3848eb6488eSEric Joyner 
3858eb6488eSEric Joyner 	vf->flags |= IXGBE_VF_CTS;
3868eb6488eSEric Joyner 
387*b6cd053eSKevin Bowling 	resp[0] = IXGBE_VF_RESET | ack;
3888eb6488eSEric Joyner 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
3898eb6488eSEric Joyner 	resp[3] = hw->mac.mc_filter_type;
39036c516b3SKevin Bowling 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
3918eb6488eSEric Joyner } /* ixgbe_vf_reset_msg */
3928eb6488eSEric Joyner 
3938eb6488eSEric Joyner 
3948eb6488eSEric Joyner static void
395b1d5caf3SKevin Bowling ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
3968eb6488eSEric Joyner {
3978eb6488eSEric Joyner 	uint8_t *mac;
3988eb6488eSEric Joyner 
3998eb6488eSEric Joyner 	mac = (uint8_t*)&msg[1];
4008eb6488eSEric Joyner 
4018eb6488eSEric Joyner 	/* Check that the VF has permission to change the MAC address. */
4028eb6488eSEric Joyner 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
40336c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
4048eb6488eSEric Joyner 		return;
4058eb6488eSEric Joyner 	}
4068eb6488eSEric Joyner 
4078eb6488eSEric Joyner 	if (ixgbe_validate_mac_addr(mac) != 0) {
40836c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
4098eb6488eSEric Joyner 		return;
4108eb6488eSEric Joyner 	}
4118eb6488eSEric Joyner 
4128eb6488eSEric Joyner 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
4138eb6488eSEric Joyner 
414b1d5caf3SKevin Bowling 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
41579b36ec9SKevin Bowling 	    true);
4168eb6488eSEric Joyner 
41736c516b3SKevin Bowling 	ixgbe_send_vf_success(sc, vf, msg[0]);
4188eb6488eSEric Joyner } /* ixgbe_vf_set_mac */
4198eb6488eSEric Joyner 
4208eb6488eSEric Joyner 
4218eb6488eSEric Joyner /*
4228eb6488eSEric Joyner  * VF multicast addresses are set by using the appropriate bit in
4238eb6488eSEric Joyner  * 1 of 128 32 bit addresses (4096 possible).
4248eb6488eSEric Joyner  */
4258eb6488eSEric Joyner static void
426b1d5caf3SKevin Bowling ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
4278eb6488eSEric Joyner {
4288eb6488eSEric Joyner 	u16	*list = (u16*)&msg[1];
4298eb6488eSEric Joyner 	int	entries;
4308eb6488eSEric Joyner 	u32	vmolr, vec_bit, vec_reg, mta_reg;
4318eb6488eSEric Joyner 
4328eb6488eSEric Joyner 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
4338eb6488eSEric Joyner 	entries = min(entries, IXGBE_MAX_VF_MC);
4348eb6488eSEric Joyner 
435b1d5caf3SKevin Bowling 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
4368eb6488eSEric Joyner 
4378eb6488eSEric Joyner 	vf->num_mc_hashes = entries;
4388eb6488eSEric Joyner 
4398eb6488eSEric Joyner 	/* Set the appropriate MTA bit */
4408eb6488eSEric Joyner 	for (int i = 0; i < entries; i++) {
4418eb6488eSEric Joyner 		vf->mc_hash[i] = list[i];
4428eb6488eSEric Joyner 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
4438eb6488eSEric Joyner 		vec_bit = vf->mc_hash[i] & 0x1F;
444b1d5caf3SKevin Bowling 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
4458eb6488eSEric Joyner 		mta_reg |= (1 << vec_bit);
446b1d5caf3SKevin Bowling 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
4478eb6488eSEric Joyner 	}
4488eb6488eSEric Joyner 
4498eb6488eSEric Joyner 	vmolr |= IXGBE_VMOLR_ROMPE;
450b1d5caf3SKevin Bowling 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
45136c516b3SKevin Bowling 	ixgbe_send_vf_success(sc, vf, msg[0]);
4528eb6488eSEric Joyner } /* ixgbe_vf_set_mc_addr */
4538eb6488eSEric Joyner 
4548eb6488eSEric Joyner 
4558eb6488eSEric Joyner static void
456b1d5caf3SKevin Bowling ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
4578eb6488eSEric Joyner {
4588eb6488eSEric Joyner 	struct ixgbe_hw *hw;
4598eb6488eSEric Joyner 	int enable;
4608eb6488eSEric Joyner 	uint16_t tag;
4618eb6488eSEric Joyner 
462b1d5caf3SKevin Bowling 	hw = &sc->hw;
4638eb6488eSEric Joyner 	enable = IXGBE_VT_MSGINFO(msg[0]);
4648eb6488eSEric Joyner 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
4658eb6488eSEric Joyner 
4668eb6488eSEric Joyner 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
46736c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
4688eb6488eSEric Joyner 		return;
4698eb6488eSEric Joyner 	}
4708eb6488eSEric Joyner 
4718eb6488eSEric Joyner 	/* It is illegal to enable vlan tag 0. */
4728eb6488eSEric Joyner 	if (tag == 0 && enable != 0) {
47336c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
4748eb6488eSEric Joyner 		return;
4758eb6488eSEric Joyner 	}
4768eb6488eSEric Joyner 
4778eb6488eSEric Joyner 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
47836c516b3SKevin Bowling 	ixgbe_send_vf_success(sc, vf, msg[0]);
4798eb6488eSEric Joyner } /* ixgbe_vf_set_vlan */
4808eb6488eSEric Joyner 
4818eb6488eSEric Joyner 
4828eb6488eSEric Joyner static void
483b1d5caf3SKevin Bowling ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
4848eb6488eSEric Joyner {
4858eb6488eSEric Joyner 	struct ixgbe_hw *hw;
4868eb6488eSEric Joyner 	uint32_t vf_max_size, pf_max_size, mhadd;
4878eb6488eSEric Joyner 
488b1d5caf3SKevin Bowling 	hw = &sc->hw;
4898eb6488eSEric Joyner 	vf_max_size = msg[1];
4908eb6488eSEric Joyner 
4918eb6488eSEric Joyner 	if (vf_max_size < ETHER_CRC_LEN) {
4928eb6488eSEric Joyner 		/* We intentionally ACK invalid LPE requests. */
49336c516b3SKevin Bowling 		ixgbe_send_vf_success(sc, vf, msg[0]);
4948eb6488eSEric Joyner 		return;
4958eb6488eSEric Joyner 	}
4968eb6488eSEric Joyner 
4978eb6488eSEric Joyner 	vf_max_size -= ETHER_CRC_LEN;
4988eb6488eSEric Joyner 
4998eb6488eSEric Joyner 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
5008eb6488eSEric Joyner 		/* We intentionally ACK invalid LPE requests. */
50136c516b3SKevin Bowling 		ixgbe_send_vf_success(sc, vf, msg[0]);
5028eb6488eSEric Joyner 		return;
5038eb6488eSEric Joyner 	}
5048eb6488eSEric Joyner 
505c19c7afeSEric Joyner 	vf->maximum_frame_size = vf_max_size;
506b1d5caf3SKevin Bowling 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
5078eb6488eSEric Joyner 
5088eb6488eSEric Joyner 	/*
5098eb6488eSEric Joyner 	 * We might have to disable reception to this VF if the frame size is
5108eb6488eSEric Joyner 	 * not compatible with the config on the PF.
5118eb6488eSEric Joyner 	 */
512b1d5caf3SKevin Bowling 	ixgbe_vf_enable_receive(sc, vf);
5138eb6488eSEric Joyner 
5148eb6488eSEric Joyner 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
5158eb6488eSEric Joyner 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
5168eb6488eSEric Joyner 
517b1d5caf3SKevin Bowling 	if (pf_max_size < sc->max_frame_size) {
5188eb6488eSEric Joyner 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
519b1d5caf3SKevin Bowling 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
5208eb6488eSEric Joyner 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
5218eb6488eSEric Joyner 	}
5228eb6488eSEric Joyner 
52336c516b3SKevin Bowling 	ixgbe_send_vf_success(sc, vf, msg[0]);
5248eb6488eSEric Joyner } /* ixgbe_vf_set_lpe */
5258eb6488eSEric Joyner 
5268eb6488eSEric Joyner 
5278eb6488eSEric Joyner static void
528b1d5caf3SKevin Bowling ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
5298eb6488eSEric Joyner                      uint32_t *msg)
5308eb6488eSEric Joyner {
5318eb6488eSEric Joyner 	//XXX implement this
53236c516b3SKevin Bowling 	ixgbe_send_vf_failure(sc, vf, msg[0]);
5338eb6488eSEric Joyner } /* ixgbe_vf_set_macvlan */
5348eb6488eSEric Joyner 
5358eb6488eSEric Joyner 
5368eb6488eSEric Joyner static void
537b1d5caf3SKevin Bowling ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
5388eb6488eSEric Joyner     uint32_t *msg)
5398eb6488eSEric Joyner {
5408eb6488eSEric Joyner 
5418eb6488eSEric Joyner 	switch (msg[1]) {
5428eb6488eSEric Joyner 	case IXGBE_API_VER_1_0:
5438eb6488eSEric Joyner 	case IXGBE_API_VER_1_1:
5448eb6488eSEric Joyner 		vf->api_ver = msg[1];
54536c516b3SKevin Bowling 		ixgbe_send_vf_success(sc, vf, msg[0]);
5468eb6488eSEric Joyner 		break;
5478eb6488eSEric Joyner 	default:
5488eb6488eSEric Joyner 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
54936c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
5508eb6488eSEric Joyner 		break;
5518eb6488eSEric Joyner 	}
5528eb6488eSEric Joyner } /* ixgbe_vf_api_negotiate */
5538eb6488eSEric Joyner 
5548eb6488eSEric Joyner 
5558eb6488eSEric Joyner static void
556b1d5caf3SKevin Bowling ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
5578eb6488eSEric Joyner {
5588eb6488eSEric Joyner 	struct ixgbe_hw *hw;
5598eb6488eSEric Joyner 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
5608eb6488eSEric Joyner 	int num_queues;
5618eb6488eSEric Joyner 
562b1d5caf3SKevin Bowling 	hw = &sc->hw;
5638eb6488eSEric Joyner 
5648eb6488eSEric Joyner 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
5658eb6488eSEric Joyner 	switch (msg[0]) {
5668eb6488eSEric Joyner 	case IXGBE_API_VER_1_0:
5678eb6488eSEric Joyner 	case IXGBE_API_VER_UNKNOWN:
56836c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
5698eb6488eSEric Joyner 		return;
5708eb6488eSEric Joyner 	}
5718eb6488eSEric Joyner 
57210746040SJakub Chylkowski 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
5738eb6488eSEric Joyner 	    IXGBE_VT_MSGTYPE_CTS;
5748eb6488eSEric Joyner 
575b1d5caf3SKevin Bowling 	num_queues = ixgbe_vf_queues(sc->iov_mode);
5768eb6488eSEric Joyner 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
5778eb6488eSEric Joyner 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
5788eb6488eSEric Joyner 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
5798eb6488eSEric Joyner 	resp[IXGBE_VF_DEF_QUEUE] = 0;
5808eb6488eSEric Joyner 
58136c516b3SKevin Bowling 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
5828eb6488eSEric Joyner } /* ixgbe_vf_get_queues */
5838eb6488eSEric Joyner 
5848eb6488eSEric Joyner 
5858eb6488eSEric Joyner static void
586c19c7afeSEric Joyner ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
5878eb6488eSEric Joyner {
588b1d5caf3SKevin Bowling 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
589c19c7afeSEric Joyner #ifdef KTR
590ff06a8dbSJustin Hibbits 	if_t		ifp = iflib_get_ifp(ctx);
591c19c7afeSEric Joyner #endif
5928eb6488eSEric Joyner 	struct ixgbe_hw *hw;
5938eb6488eSEric Joyner 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
5948eb6488eSEric Joyner 	int error;
5958eb6488eSEric Joyner 
596b1d5caf3SKevin Bowling 	hw = &sc->hw;
5978eb6488eSEric Joyner 
598*b6cd053eSKevin Bowling 	error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
599*b6cd053eSKevin Bowling 	    vf->pool);
6008eb6488eSEric Joyner 
6018eb6488eSEric Joyner 	if (error != 0)
6028eb6488eSEric Joyner 		return;
6038eb6488eSEric Joyner 
604ff06a8dbSJustin Hibbits 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", if_name(ifp),
605c19c7afeSEric Joyner 	    msg[0], vf->pool);
6068eb6488eSEric Joyner 	if (msg[0] == IXGBE_VF_RESET) {
607b1d5caf3SKevin Bowling 		ixgbe_vf_reset_msg(sc, vf, msg);
6088eb6488eSEric Joyner 		return;
6098eb6488eSEric Joyner 	}
6108eb6488eSEric Joyner 
6118eb6488eSEric Joyner 	if (!(vf->flags & IXGBE_VF_CTS)) {
61236c516b3SKevin Bowling 		ixgbe_send_vf_success(sc, vf, msg[0]);
6138eb6488eSEric Joyner 		return;
6148eb6488eSEric Joyner 	}
6158eb6488eSEric Joyner 
6168eb6488eSEric Joyner 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
6178eb6488eSEric Joyner 	case IXGBE_VF_SET_MAC_ADDR:
618b1d5caf3SKevin Bowling 		ixgbe_vf_set_mac(sc, vf, msg);
6198eb6488eSEric Joyner 		break;
6208eb6488eSEric Joyner 	case IXGBE_VF_SET_MULTICAST:
621b1d5caf3SKevin Bowling 		ixgbe_vf_set_mc_addr(sc, vf, msg);
6228eb6488eSEric Joyner 		break;
6238eb6488eSEric Joyner 	case IXGBE_VF_SET_VLAN:
624b1d5caf3SKevin Bowling 		ixgbe_vf_set_vlan(sc, vf, msg);
6258eb6488eSEric Joyner 		break;
6268eb6488eSEric Joyner 	case IXGBE_VF_SET_LPE:
627b1d5caf3SKevin Bowling 		ixgbe_vf_set_lpe(sc, vf, msg);
6288eb6488eSEric Joyner 		break;
6298eb6488eSEric Joyner 	case IXGBE_VF_SET_MACVLAN:
630b1d5caf3SKevin Bowling 		ixgbe_vf_set_macvlan(sc, vf, msg);
6318eb6488eSEric Joyner 		break;
6328eb6488eSEric Joyner 	case IXGBE_VF_API_NEGOTIATE:
633b1d5caf3SKevin Bowling 		ixgbe_vf_api_negotiate(sc, vf, msg);
6348eb6488eSEric Joyner 		break;
6358eb6488eSEric Joyner 	case IXGBE_VF_GET_QUEUES:
636b1d5caf3SKevin Bowling 		ixgbe_vf_get_queues(sc, vf, msg);
6378eb6488eSEric Joyner 		break;
6388eb6488eSEric Joyner 	default:
63936c516b3SKevin Bowling 		ixgbe_send_vf_failure(sc, vf, msg[0]);
6408eb6488eSEric Joyner 	}
6418eb6488eSEric Joyner } /* ixgbe_process_vf_msg */
6428eb6488eSEric Joyner 
6438eb6488eSEric Joyner 
6448eb6488eSEric Joyner /* Tasklet for handling VF -> PF mailbox messages */
6458eb6488eSEric Joyner void
646c19c7afeSEric Joyner ixgbe_handle_mbx(void *context)
6478eb6488eSEric Joyner {
648c19c7afeSEric Joyner 	if_ctx_t        ctx = context;
649b1d5caf3SKevin Bowling 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
6508eb6488eSEric Joyner 	struct ixgbe_hw *hw;
6518eb6488eSEric Joyner 	struct ixgbe_vf *vf;
6528eb6488eSEric Joyner 	int i;
6538eb6488eSEric Joyner 
654b1d5caf3SKevin Bowling 	hw = &sc->hw;
6558eb6488eSEric Joyner 
656b1d5caf3SKevin Bowling 	for (i = 0; i < sc->num_vfs; i++) {
657b1d5caf3SKevin Bowling 		vf = &sc->vfs[i];
6588eb6488eSEric Joyner 
6598eb6488eSEric Joyner 		if (vf->flags & IXGBE_VF_ACTIVE) {
660*b6cd053eSKevin Bowling 			if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
661b1d5caf3SKevin Bowling 				ixgbe_process_vf_reset(sc, vf);
6628eb6488eSEric Joyner 
663*b6cd053eSKevin Bowling 			if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
664c19c7afeSEric Joyner 				ixgbe_process_vf_msg(ctx, vf);
6658eb6488eSEric Joyner 
666*b6cd053eSKevin Bowling 			if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
667b1d5caf3SKevin Bowling 				ixgbe_process_vf_ack(sc, vf);
6688eb6488eSEric Joyner 		}
6698eb6488eSEric Joyner 	}
6708eb6488eSEric Joyner } /* ixgbe_handle_mbx */
6718eb6488eSEric Joyner 
6728eb6488eSEric Joyner int
673c19c7afeSEric Joyner ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
6748eb6488eSEric Joyner {
675b1d5caf3SKevin Bowling 	struct ixgbe_softc *sc;
6768eb6488eSEric Joyner 	int retval = 0;
6778eb6488eSEric Joyner 
678b1d5caf3SKevin Bowling 	sc = iflib_get_softc(ctx);
679b1d5caf3SKevin Bowling 	sc->iov_mode = IXGBE_NO_VM;
6808eb6488eSEric Joyner 
6818eb6488eSEric Joyner 	if (num_vfs == 0) {
6828eb6488eSEric Joyner 		/* Would we ever get num_vfs = 0? */
6838eb6488eSEric Joyner 		retval = EINVAL;
6848eb6488eSEric Joyner 		goto err_init_iov;
6858eb6488eSEric Joyner 	}
6868eb6488eSEric Joyner 
6878eb6488eSEric Joyner 	/*
6888eb6488eSEric Joyner 	 * We've got to reserve a VM's worth of queues for the PF,
6898eb6488eSEric Joyner 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
6908eb6488eSEric Joyner 	 * With 64 VFs, you can only have two queues per VF.
6918eb6488eSEric Joyner 	 * With 32 VFs, you can have up to four queues per VF.
6928eb6488eSEric Joyner 	 */
6938eb6488eSEric Joyner 	if (num_vfs >= IXGBE_32_VM)
694b1d5caf3SKevin Bowling 		sc->iov_mode = IXGBE_64_VM;
6958eb6488eSEric Joyner 	else
696b1d5caf3SKevin Bowling 		sc->iov_mode = IXGBE_32_VM;
6978eb6488eSEric Joyner 
6988eb6488eSEric Joyner 	/* Again, reserving 1 VM's worth of queues for the PF */
699b1d5caf3SKevin Bowling 	sc->pool = sc->iov_mode - 1;
7008eb6488eSEric Joyner 
701b1d5caf3SKevin Bowling 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
7028eb6488eSEric Joyner 		retval = ENOSPC;
7038eb6488eSEric Joyner 		goto err_init_iov;
7048eb6488eSEric Joyner 	}
7058eb6488eSEric Joyner 
706b1d5caf3SKevin Bowling 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
7078eb6488eSEric Joyner 	    M_NOWAIT | M_ZERO);
7088eb6488eSEric Joyner 
709b1d5caf3SKevin Bowling 	if (sc->vfs == NULL) {
7108eb6488eSEric Joyner 		retval = ENOMEM;
7118eb6488eSEric Joyner 		goto err_init_iov;
7128eb6488eSEric Joyner 	}
7138eb6488eSEric Joyner 
714b1d5caf3SKevin Bowling 	sc->num_vfs = num_vfs;
715*b6cd053eSKevin Bowling 	ixgbe_init_mbx_params_pf(&sc->hw);
716*b6cd053eSKevin Bowling 
717b1d5caf3SKevin Bowling 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
718*b6cd053eSKevin Bowling 	ixgbe_if_init(sc->ctx);
7198eb6488eSEric Joyner 
720c19c7afeSEric Joyner 	return (retval);
7218eb6488eSEric Joyner 
7228eb6488eSEric Joyner err_init_iov:
723b1d5caf3SKevin Bowling 	sc->num_vfs = 0;
724b1d5caf3SKevin Bowling 	sc->pool = 0;
725b1d5caf3SKevin Bowling 	sc->iov_mode = IXGBE_NO_VM;
7268eb6488eSEric Joyner 
727c19c7afeSEric Joyner 	return (retval);
728c19c7afeSEric Joyner } /* ixgbe_if_iov_init */
7298eb6488eSEric Joyner 
7308eb6488eSEric Joyner void
731c19c7afeSEric Joyner ixgbe_if_iov_uninit(if_ctx_t ctx)
7328eb6488eSEric Joyner {
7338eb6488eSEric Joyner 	struct ixgbe_hw *hw;
734b1d5caf3SKevin Bowling 	struct ixgbe_softc *sc;
7358eb6488eSEric Joyner 	uint32_t pf_reg, vf_reg;
7368eb6488eSEric Joyner 
737b1d5caf3SKevin Bowling 	sc = iflib_get_softc(ctx);
738b1d5caf3SKevin Bowling 	hw = &sc->hw;
7398eb6488eSEric Joyner 
7408eb6488eSEric Joyner 	/* Enable rx/tx for the PF and disable it for all VFs. */
741b1d5caf3SKevin Bowling 	pf_reg = IXGBE_VF_INDEX(sc->pool);
742b1d5caf3SKevin Bowling 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
743b1d5caf3SKevin Bowling 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
7448eb6488eSEric Joyner 
7458eb6488eSEric Joyner 	if (pf_reg == 0)
7468eb6488eSEric Joyner 		vf_reg = 1;
7478eb6488eSEric Joyner 	else
7488eb6488eSEric Joyner 		vf_reg = 0;
7498eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
7508eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
7518eb6488eSEric Joyner 
7528eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
7538eb6488eSEric Joyner 
754b1d5caf3SKevin Bowling 	free(sc->vfs, M_IXGBE_SRIOV);
755b1d5caf3SKevin Bowling 	sc->vfs = NULL;
756b1d5caf3SKevin Bowling 	sc->num_vfs = 0;
757b1d5caf3SKevin Bowling 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
758c19c7afeSEric Joyner } /* ixgbe_if_iov_uninit */
7598eb6488eSEric Joyner 
7608eb6488eSEric Joyner static void
761b1d5caf3SKevin Bowling ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
7628eb6488eSEric Joyner {
7638eb6488eSEric Joyner 	struct ixgbe_hw *hw;
7648eb6488eSEric Joyner 	uint32_t vf_index, pfmbimr;
7658eb6488eSEric Joyner 
766b1d5caf3SKevin Bowling 	hw = &sc->hw;
7678eb6488eSEric Joyner 
7688eb6488eSEric Joyner 	if (!(vf->flags & IXGBE_VF_ACTIVE))
7698eb6488eSEric Joyner 		return;
7708eb6488eSEric Joyner 
7718eb6488eSEric Joyner 	vf_index = IXGBE_VF_INDEX(vf->pool);
7728eb6488eSEric Joyner 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
7738eb6488eSEric Joyner 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
7748eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
7758eb6488eSEric Joyner 
776b1d5caf3SKevin Bowling 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
7778eb6488eSEric Joyner 
7788eb6488eSEric Joyner 	// XXX multicast addresses
7798eb6488eSEric Joyner 
7808eb6488eSEric Joyner 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
781b1d5caf3SKevin Bowling 		ixgbe_set_rar(&sc->hw, vf->rar_index,
78279b36ec9SKevin Bowling 		    vf->ether_addr, vf->pool, true);
7838eb6488eSEric Joyner 	}
7848eb6488eSEric Joyner 
785b1d5caf3SKevin Bowling 	ixgbe_vf_enable_transmit(sc, vf);
786b1d5caf3SKevin Bowling 	ixgbe_vf_enable_receive(sc, vf);
7878eb6488eSEric Joyner 
788*b6cd053eSKevin Bowling 	ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
7898eb6488eSEric Joyner } /* ixgbe_init_vf */
7908eb6488eSEric Joyner 
7918eb6488eSEric Joyner void
792b1d5caf3SKevin Bowling ixgbe_initialize_iov(struct ixgbe_softc *sc)
7938eb6488eSEric Joyner {
794b1d5caf3SKevin Bowling 	struct ixgbe_hw *hw = &sc->hw;
7958eb6488eSEric Joyner 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
7968eb6488eSEric Joyner 	int i;
7978eb6488eSEric Joyner 
798b1d5caf3SKevin Bowling 	if (sc->iov_mode == IXGBE_NO_VM)
7998eb6488eSEric Joyner 		return;
8008eb6488eSEric Joyner 
8018eb6488eSEric Joyner 	/* RMW appropriate registers based on IOV mode */
8028eb6488eSEric Joyner 	/* Read... */
8038eb6488eSEric Joyner 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
8048eb6488eSEric Joyner 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
8058eb6488eSEric Joyner 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
8068eb6488eSEric Joyner 	/* Modify... */
8078eb6488eSEric Joyner 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
8088eb6488eSEric Joyner 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
8098eb6488eSEric Joyner 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
8108eb6488eSEric Joyner 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
8118eb6488eSEric Joyner 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
812b1d5caf3SKevin Bowling 	switch (sc->iov_mode) {
8138eb6488eSEric Joyner 	case IXGBE_64_VM:
8148eb6488eSEric Joyner 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
8158eb6488eSEric Joyner 		mtqc    |= IXGBE_MTQC_64VF;
8168eb6488eSEric Joyner 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
8178eb6488eSEric Joyner 		gpie    |= IXGBE_GPIE_VTMODE_64;
8188eb6488eSEric Joyner 		break;
8198eb6488eSEric Joyner 	case IXGBE_32_VM:
8208eb6488eSEric Joyner 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
8218eb6488eSEric Joyner 		mtqc    |= IXGBE_MTQC_32VF;
8228eb6488eSEric Joyner 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
8238eb6488eSEric Joyner 		gpie    |= IXGBE_GPIE_VTMODE_32;
8248eb6488eSEric Joyner 		break;
8258eb6488eSEric Joyner 	default:
826b1d5caf3SKevin Bowling 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
8278eb6488eSEric Joyner 	}
8288eb6488eSEric Joyner 	/* Write... */
8298eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
8308eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
8318eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
8328eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
8338eb6488eSEric Joyner 
8348eb6488eSEric Joyner 	/* Enable rx/tx for the PF. */
835b1d5caf3SKevin Bowling 	vf_reg = IXGBE_VF_INDEX(sc->pool);
836b1d5caf3SKevin Bowling 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
837b1d5caf3SKevin Bowling 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
8388eb6488eSEric Joyner 
8398eb6488eSEric Joyner 	/* Allow VM-to-VM communication. */
8408eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
8418eb6488eSEric Joyner 
8428eb6488eSEric Joyner 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
843b1d5caf3SKevin Bowling 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
8448eb6488eSEric Joyner 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
8458eb6488eSEric Joyner 
846b1d5caf3SKevin Bowling 	for (i = 0; i < sc->num_vfs; i++)
847b1d5caf3SKevin Bowling 		ixgbe_init_vf(sc, &sc->vfs[i]);
8488eb6488eSEric Joyner } /* ixgbe_initialize_iov */
8498eb6488eSEric Joyner 
8508eb6488eSEric Joyner 
8518eb6488eSEric Joyner /* Check the max frame setting of all active VF's */
8528eb6488eSEric Joyner void
853b1d5caf3SKevin Bowling ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
8548eb6488eSEric Joyner {
8558eb6488eSEric Joyner 	struct ixgbe_vf *vf;
8568eb6488eSEric Joyner 
857b1d5caf3SKevin Bowling 	for (int i = 0; i < sc->num_vfs; i++) {
858b1d5caf3SKevin Bowling 		vf = &sc->vfs[i];
8598eb6488eSEric Joyner 		if (vf->flags & IXGBE_VF_ACTIVE)
860b1d5caf3SKevin Bowling 			ixgbe_update_max_frame(sc, vf->maximum_frame_size);
8618eb6488eSEric Joyner 	}
8628eb6488eSEric Joyner } /* ixgbe_recalculate_max_frame */
8638eb6488eSEric Joyner 
8648eb6488eSEric Joyner int
865c19c7afeSEric Joyner ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
8668eb6488eSEric Joyner {
867b1d5caf3SKevin Bowling 	struct ixgbe_softc *sc;
8688eb6488eSEric Joyner 	struct ixgbe_vf *vf;
8698eb6488eSEric Joyner 	const void *mac;
8708eb6488eSEric Joyner 
871b1d5caf3SKevin Bowling 	sc = iflib_get_softc(ctx);
8728eb6488eSEric Joyner 
873b1d5caf3SKevin Bowling 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
874b1d5caf3SKevin Bowling 	    vfnum, sc->num_vfs));
8758eb6488eSEric Joyner 
876b1d5caf3SKevin Bowling 	vf = &sc->vfs[vfnum];
8778eb6488eSEric Joyner 	vf->pool= vfnum;
8788eb6488eSEric Joyner 
8798eb6488eSEric Joyner 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
8808eb6488eSEric Joyner 	vf->rar_index = vfnum + 1;
8818eb6488eSEric Joyner 	vf->default_vlan = 0;
882c19c7afeSEric Joyner 	vf->maximum_frame_size = ETHER_MAX_LEN;
883b1d5caf3SKevin Bowling 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
8848eb6488eSEric Joyner 
8858eb6488eSEric Joyner 	if (nvlist_exists_binary(config, "mac-addr")) {
8868eb6488eSEric Joyner 		mac = nvlist_get_binary(config, "mac-addr", NULL);
8878eb6488eSEric Joyner 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
8888eb6488eSEric Joyner 		if (nvlist_get_bool(config, "allow-set-mac"))
8898eb6488eSEric Joyner 			vf->flags |= IXGBE_VF_CAP_MAC;
8908eb6488eSEric Joyner 	} else
8918eb6488eSEric Joyner 		/*
8928eb6488eSEric Joyner 		 * If the administrator has not specified a MAC address then
8938eb6488eSEric Joyner 		 * we must allow the VF to choose one.
8948eb6488eSEric Joyner 		 */
8958eb6488eSEric Joyner 		vf->flags |= IXGBE_VF_CAP_MAC;
8968eb6488eSEric Joyner 
8978eb6488eSEric Joyner 	vf->flags |= IXGBE_VF_ACTIVE;
8988eb6488eSEric Joyner 
899b1d5caf3SKevin Bowling 	ixgbe_init_vf(sc, vf);
9008eb6488eSEric Joyner 
9018eb6488eSEric Joyner 	return (0);
902c19c7afeSEric Joyner } /* ixgbe_if_iov_vf_add */
9038eb6488eSEric Joyner 
9048eb6488eSEric Joyner #else
9058eb6488eSEric Joyner 
9068eb6488eSEric Joyner void
907c19c7afeSEric Joyner ixgbe_handle_mbx(void *context)
9088eb6488eSEric Joyner {
909c19c7afeSEric Joyner 	UNREFERENCED_PARAMETER(context);
9108eb6488eSEric Joyner } /* ixgbe_handle_mbx */
9118eb6488eSEric Joyner 
9128eb6488eSEric Joyner #endif
913