xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision f39bffc62c1395bde25d152c7f68fdf7cbaab414)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static bool	ixl_zero_mac(const uint8_t *addr);
43 static bool	ixl_bcast_mac(const uint8_t *addr);
44 
45 static int	ixl_vc_opcode_level(uint16_t opcode);
46 
47 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
48 
49 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67     enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
80 
81 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
82 
83 void
84 ixl_initialize_sriov(struct ixl_pf *pf)
85 {
86 	device_t dev = pf->dev;
87 	struct i40e_hw *hw = &pf->hw;
88 	nvlist_t	*pf_schema, *vf_schema;
89 	int		iov_error;
90 
91 	/* SR-IOV is only supported when MSI-X is in use. */
92 	if (pf->msix <= 1)
93 		return;
94 
95 	pf_schema = pci_iov_schema_alloc_node();
96 	vf_schema = pci_iov_schema_alloc_node();
97 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
98 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
99 	    IOV_SCHEMA_HASDEFAULT, TRUE);
100 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
101 	    IOV_SCHEMA_HASDEFAULT, FALSE);
102 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
103 	    IOV_SCHEMA_HASDEFAULT, FALSE);
104 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
105 	    IOV_SCHEMA_HASDEFAULT,
106 	    max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
107 
108 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
109 	if (iov_error != 0) {
110 		device_printf(dev,
111 		    "Failed to initialize SR-IOV (error=%d)\n",
112 		    iov_error);
113 	} else
114 		device_printf(dev, "SR-IOV ready\n");
115 
116 	pf->vc_debug_lvl = 1;
117 }
118 
119 /*
120  * Allocate the VSI for a VF.
121  */
122 static int
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
124 {
125 	device_t dev;
126 	struct i40e_hw *hw;
127 	struct ixl_vsi *vsi;
128 	struct i40e_vsi_context vsi_ctx;
129 	int i;
130 	enum i40e_status_code code;
131 
132 	hw = &pf->hw;
133 	vsi = &pf->vsi;
134 	dev = pf->dev;
135 
136 	vsi_ctx.pf_num = hw->pf_id;
137 	vsi_ctx.uplink_seid = pf->veb_seid;
138 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
141 
142 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
143 
144 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 	vsi_ctx.info.switch_id = htole16(0);
146 
147 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 	vsi_ctx.info.sec_flags = 0;
149 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
151 
152 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
155 
156 	vsi_ctx.info.valid_sections |=
157 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
159 
160 	/* ERJ: Only scattered allocation is supported for VFs right now */
161 	for (i = 0; i < vf->qtag.num_active; i++)
162 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
165 
166 	vsi_ctx.info.tc_mapping[0] = htole16(
167 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 	    (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
169 
170 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 	if (code != I40E_SUCCESS)
172 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 	vf->vsi.seid = vsi_ctx.seid;
174 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 	// vf->vsi.first_queue = vf->qtag.qidx[0];
176 	vf->vsi.num_queues = vf->qtag.num_active;
177 
178 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
179 	if (code != I40E_SUCCESS)
180 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
181 
182 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
183 	if (code != I40E_SUCCESS) {
184 		device_printf(dev, "Failed to disable BW limit: %d\n",
185 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
186 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 	}
188 
189 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
190 	return (0);
191 }
192 
193 static int
194 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
195 {
196 	struct i40e_hw *hw;
197 	int error;
198 
199 	hw = &pf->hw;
200 
201 	error = ixl_vf_alloc_vsi(pf, vf);
202 	if (error != 0)
203 		return (error);
204 
205 	vf->vsi.hw_filters_add = 0;
206 	vf->vsi.hw_filters_del = 0;
207 	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
208 	ixl_reconfigure_filters(&vf->vsi);
209 
210 	return (0);
211 }
212 
213 static void
214 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
215     uint32_t val)
216 {
217 	uint32_t qtable;
218 	int index, shift;
219 
220 	/*
221 	 * Two queues are mapped in a single register, so we have to do some
222 	 * gymnastics to convert the queue number into a register index and
223 	 * shift.
224 	 */
225 	index = qnum / 2;
226 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
227 
228 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
229 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
230 	qtable |= val << shift;
231 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
232 }
233 
234 static void
235 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
236 {
237 	struct i40e_hw *hw;
238 	uint32_t qtable;
239 	int i;
240 
241 	hw = &pf->hw;
242 
243 	/*
244 	 * Contiguous mappings aren't actually supported by the hardware,
245 	 * so we have to use non-contiguous mappings.
246 	 */
247 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
248 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
249 
250 	/* Enable LAN traffic on this VF */
251 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
252 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
253 
254 	/* Program index of each VF queue into PF queue space
255 	 * (This is only needed if QTABLE is enabled) */
256 	for (i = 0; i < vf->vsi.num_queues; i++) {
257 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
258 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
259 
260 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
261 	}
262 	for (; i < IXL_MAX_VSI_QUEUES; i++)
263 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
264 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
265 
266 	/* Map queues allocated to VF to its VSI;
267 	 * This mapping matches the VF-wide mapping since the VF
268 	 * is only given a single VSI */
269 	for (i = 0; i < vf->vsi.num_queues; i++)
270 		ixl_vf_map_vsi_queue(hw, vf, i,
271 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
272 
273 	/* Set rest of VSI queues as unused. */
274 	for (; i < IXL_MAX_VSI_QUEUES; i++)
275 		ixl_vf_map_vsi_queue(hw, vf, i,
276 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
277 
278 	ixl_flush(hw);
279 }
280 
281 static void
282 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
283 {
284 	struct i40e_hw *hw;
285 
286 	hw = &pf->hw;
287 
288 	if (vsi->seid == 0)
289 		return;
290 
291 	i40e_aq_delete_element(hw, vsi->seid, NULL);
292 }
293 
294 static void
295 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
296 {
297 
298 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
299 	ixl_flush(hw);
300 }
301 
302 static void
303 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
304 {
305 
306 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
307 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
308 	ixl_flush(hw);
309 }
310 
311 static void
312 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
313 {
314 	struct i40e_hw *hw;
315 	uint32_t vfint_reg, vpint_reg;
316 	int i;
317 
318 	hw = &pf->hw;
319 
320 	ixl_vf_vsi_release(pf, &vf->vsi);
321 
322 	/* Index 0 has a special register. */
323 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
324 
325 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
326 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
327 		ixl_vf_disable_queue_intr(hw, vfint_reg);
328 	}
329 
330 	/* Index 0 has a special register. */
331 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
332 
333 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
334 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
335 		ixl_vf_unregister_intr(hw, vpint_reg);
336 	}
337 
338 	vf->vsi.num_queues = 0;
339 }
340 
341 static int
342 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
343 {
344 	struct i40e_hw *hw;
345 	int i;
346 	uint16_t global_vf_num;
347 	uint32_t ciad;
348 
349 	hw = &pf->hw;
350 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
351 
352 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
353 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
354 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
355 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
356 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
357 			return (0);
358 		DELAY(1);
359 	}
360 
361 	return (ETIMEDOUT);
362 }
363 
364 static void
365 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
366 {
367 	struct i40e_hw *hw;
368 	uint32_t vfrtrig;
369 
370 	hw = &pf->hw;
371 
372 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
373 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
374 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
375 	ixl_flush(hw);
376 
377 	ixl_reinit_vf(pf, vf);
378 }
379 
380 static void
381 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
382 {
383 	struct i40e_hw *hw;
384 	uint32_t vfrstat, vfrtrig;
385 	int i, error;
386 
387 	hw = &pf->hw;
388 
389 	error = ixl_flush_pcie(pf, vf);
390 	if (error != 0)
391 		device_printf(pf->dev,
392 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
393 		    vf->vf_num);
394 
395 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
396 		DELAY(10);
397 
398 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
399 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
400 			break;
401 	}
402 
403 	if (i == IXL_VF_RESET_TIMEOUT)
404 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
405 
406 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
407 
408 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
409 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
410 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
411 
412 	if (vf->vsi.seid != 0)
413 		ixl_disable_rings(&vf->vsi);
414 
415 	ixl_vf_release_resources(pf, vf);
416 	ixl_vf_setup_vsi(pf, vf);
417 	ixl_vf_map_queues(pf, vf);
418 
419 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
420 	ixl_flush(hw);
421 }
422 
423 static int
424 ixl_vc_opcode_level(uint16_t opcode)
425 {
426 	switch (opcode) {
427 	case VIRTCHNL_OP_GET_STATS:
428 		return (10);
429 	default:
430 		return (5);
431 	}
432 }
433 
434 static void
435 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
436     enum i40e_status_code status, void *msg, uint16_t len)
437 {
438 	struct i40e_hw *hw;
439 	int global_vf_id;
440 
441 	hw = &pf->hw;
442 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
443 
444 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
445 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
446 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
447 
448 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
449 }
450 
451 static void
452 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
453 {
454 
455 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
456 }
457 
458 static void
459 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
460     enum i40e_status_code status, const char *file, int line)
461 {
462 
463 	I40E_VC_DEBUG(pf, 1,
464 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
465 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
466 	    status, vf->vf_num, file, line);
467 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
468 }
469 
470 static void
471 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
472     uint16_t msg_size)
473 {
474 	struct virtchnl_version_info reply;
475 
476 	if (msg_size != sizeof(struct virtchnl_version_info)) {
477 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
478 		    I40E_ERR_PARAM);
479 		return;
480 	}
481 
482 	vf->version = ((struct virtchnl_version_info *)msg)->minor;
483 
484 	reply.major = VIRTCHNL_VERSION_MAJOR;
485 	reply.minor = VIRTCHNL_VERSION_MINOR;
486 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
487 	    sizeof(reply));
488 }
489 
490 static void
491 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
492     uint16_t msg_size)
493 {
494 
495 	if (msg_size != 0) {
496 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
497 		    I40E_ERR_PARAM);
498 		return;
499 	}
500 
501 	ixl_reset_vf(pf, vf);
502 
503 	/* No response to a reset message. */
504 }
505 
506 static void
507 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
508     uint16_t msg_size)
509 {
510 	struct virtchnl_vf_resource reply;
511 
512 	if ((vf->version == 0 && msg_size != 0) ||
513 	    (vf->version == 1 && msg_size != 4)) {
514 		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
515 		    " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
516 		    vf->version);
517 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
518 		    I40E_ERR_PARAM);
519 		return;
520 	}
521 
522 	bzero(&reply, sizeof(reply));
523 
524 	if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
525 		reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
526 					 VIRTCHNL_VF_OFFLOAD_RSS_REG |
527 					 VIRTCHNL_VF_OFFLOAD_VLAN;
528 	else
529 		/* Force VF RSS setup by PF in 1.1+ VFs */
530 		reply.vf_cap_flags = *(u32 *)msg & (
531 					 VIRTCHNL_VF_OFFLOAD_L2 |
532 					 VIRTCHNL_VF_OFFLOAD_RSS_PF |
533 					 VIRTCHNL_VF_OFFLOAD_VLAN);
534 
535 	reply.num_vsis = 1;
536 	reply.num_queue_pairs = vf->vsi.num_queues;
537 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
538 	reply.rss_key_size = 52;
539 	reply.rss_lut_size = 64;
540 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
541 	reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
542 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
543 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
544 
545 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
546 	    I40E_SUCCESS, &reply, sizeof(reply));
547 }
548 
549 static int
550 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
551     struct virtchnl_txq_info *info)
552 {
553 	struct i40e_hw *hw;
554 	struct i40e_hmc_obj_txq txq;
555 	uint16_t global_queue_num, global_vf_num;
556 	enum i40e_status_code status;
557 	uint32_t qtx_ctl;
558 
559 	hw = &pf->hw;
560 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
561 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
562 	bzero(&txq, sizeof(txq));
563 
564 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
565 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
566 
567 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
568 	if (status != I40E_SUCCESS)
569 		return (EINVAL);
570 
571 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
572 
573 	txq.head_wb_ena = info->headwb_enabled;
574 	txq.head_wb_addr = info->dma_headwb_addr;
575 	txq.qlen = info->ring_len;
576 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
577 	txq.rdylist_act = 0;
578 
579 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
580 	if (status != I40E_SUCCESS)
581 		return (EINVAL);
582 
583 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
584 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
585 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
586 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
587 	ixl_flush(hw);
588 
589 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
590 
591 	return (0);
592 }
593 
594 static int
595 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
596     struct virtchnl_rxq_info *info)
597 {
598 	struct i40e_hw *hw;
599 	struct i40e_hmc_obj_rxq rxq;
600 	uint16_t global_queue_num;
601 	enum i40e_status_code status;
602 
603 	hw = &pf->hw;
604 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
605 	bzero(&rxq, sizeof(rxq));
606 
607 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
608 	    vf->vf_num, global_queue_num, info->queue_id);
609 
610 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
611 		return (EINVAL);
612 
613 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
614 	    info->max_pkt_size < ETHER_MIN_LEN)
615 		return (EINVAL);
616 
617 	if (info->splithdr_enabled) {
618 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
619 			return (EINVAL);
620 
621 		rxq.hsplit_0 = info->rx_split_pos &
622 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
623 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
624 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
625 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
626 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
627 
628 		rxq.dtype = 2;
629 	}
630 
631 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
632 	if (status != I40E_SUCCESS)
633 		return (EINVAL);
634 
635 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
636 	rxq.qlen = info->ring_len;
637 
638 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
639 
640 	rxq.dsize = 1;
641 	rxq.crcstrip = 1;
642 	rxq.l2tsel = 1;
643 
644 	rxq.rxmax = info->max_pkt_size;
645 	rxq.tphrdesc_ena = 1;
646 	rxq.tphwdesc_ena = 1;
647 	rxq.tphdata_ena = 1;
648 	rxq.tphhead_ena = 1;
649 	rxq.lrxqthresh = 2;
650 	rxq.prefena = 1;
651 
652 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
653 	if (status != I40E_SUCCESS)
654 		return (EINVAL);
655 
656 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
657 
658 	return (0);
659 }
660 
661 static void
662 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
663     uint16_t msg_size)
664 {
665 	struct virtchnl_vsi_queue_config_info *info;
666 	struct virtchnl_queue_pair_info *pair;
667 	uint16_t expected_msg_size;
668 	int i;
669 
670 	if (msg_size < sizeof(*info)) {
671 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
672 		    I40E_ERR_PARAM);
673 		return;
674 	}
675 
676 	info = msg;
677 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
678 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
679 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
680 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
681 		    I40E_ERR_PARAM);
682 		return;
683 	}
684 
685 	expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
686 	if (msg_size != expected_msg_size) {
687 		device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
688 		    vf->vf_num, msg_size, expected_msg_size);
689 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
690 		    I40E_ERR_PARAM);
691 		return;
692 	}
693 
694 	if (info->vsi_id != vf->vsi.vsi_num) {
695 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
696 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
697 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
698 		    I40E_ERR_PARAM);
699 		return;
700 	}
701 
702 	for (i = 0; i < info->num_queue_pairs; i++) {
703 		pair = &info->qpair[i];
704 
705 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
706 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
707 		    pair->txq.queue_id != pair->rxq.queue_id ||
708 		    pair->txq.queue_id >= vf->vsi.num_queues) {
709 
710 			i40e_send_vf_nack(pf, vf,
711 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
712 			return;
713 		}
714 
715 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
716 			i40e_send_vf_nack(pf, vf,
717 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
718 			return;
719 		}
720 
721 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
722 			i40e_send_vf_nack(pf, vf,
723 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
724 			return;
725 		}
726 	}
727 
728 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
729 }
730 
731 static void
732 ixl_vf_set_qctl(struct ixl_pf *pf,
733     const struct virtchnl_vector_map *vector,
734     enum i40e_queue_type cur_type, uint16_t cur_queue,
735     enum i40e_queue_type *last_type, uint16_t *last_queue)
736 {
737 	uint32_t offset, qctl;
738 	uint16_t itr_indx;
739 
740 	if (cur_type == I40E_QUEUE_TYPE_RX) {
741 		offset = I40E_QINT_RQCTL(cur_queue);
742 		itr_indx = vector->rxitr_idx;
743 	} else {
744 		offset = I40E_QINT_TQCTL(cur_queue);
745 		itr_indx = vector->txitr_idx;
746 	}
747 
748 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
749 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
750 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
751 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
752 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
753 
754 	wr32(&pf->hw, offset, qctl);
755 
756 	*last_type = cur_type;
757 	*last_queue = cur_queue;
758 }
759 
760 static void
761 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
762     const struct virtchnl_vector_map *vector)
763 {
764 	struct i40e_hw *hw;
765 	u_int qindex;
766 	enum i40e_queue_type type, last_type;
767 	uint32_t lnklst_reg;
768 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
769 
770 	hw = &pf->hw;
771 
772 	rxq_map = vector->rxq_map;
773 	txq_map = vector->txq_map;
774 
775 	last_queue = IXL_END_OF_INTR_LNKLST;
776 	last_type = I40E_QUEUE_TYPE_RX;
777 
778 	/*
779 	 * The datasheet says to optimize performance, RX queues and TX queues
780 	 * should be interleaved in the interrupt linked list, so we process
781 	 * both at once here.
782 	 */
783 	while ((rxq_map != 0) || (txq_map != 0)) {
784 		if (txq_map != 0) {
785 			qindex = ffs(txq_map) - 1;
786 			type = I40E_QUEUE_TYPE_TX;
787 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
788 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
789 			    &last_type, &last_queue);
790 			txq_map &= ~(1 << qindex);
791 		}
792 
793 		if (rxq_map != 0) {
794 			qindex = ffs(rxq_map) - 1;
795 			type = I40E_QUEUE_TYPE_RX;
796 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
797 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
798 			    &last_type, &last_queue);
799 			rxq_map &= ~(1 << qindex);
800 		}
801 	}
802 
803 	if (vector->vector_id == 0)
804 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
805 	else
806 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
807 		    vf->vf_num);
808 	wr32(hw, lnklst_reg,
809 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
810 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
811 
812 	ixl_flush(hw);
813 }
814 
815 static void
816 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
817     uint16_t msg_size)
818 {
819 	struct virtchnl_irq_map_info *map;
820 	struct virtchnl_vector_map *vector;
821 	struct i40e_hw *hw;
822 	int i, largest_txq, largest_rxq;
823 
824 	hw = &pf->hw;
825 
826 	if (msg_size < sizeof(*map)) {
827 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
828 		    I40E_ERR_PARAM);
829 		return;
830 	}
831 
832 	map = msg;
833 	if (map->num_vectors == 0) {
834 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
835 		    I40E_ERR_PARAM);
836 		return;
837 	}
838 
839 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
840 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
841 		    I40E_ERR_PARAM);
842 		return;
843 	}
844 
845 	for (i = 0; i < map->num_vectors; i++) {
846 		vector = &map->vecmap[i];
847 
848 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
849 		    vector->vsi_id != vf->vsi.vsi_num) {
850 			i40e_send_vf_nack(pf, vf,
851 			    VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
852 			return;
853 		}
854 
855 		if (vector->rxq_map != 0) {
856 			largest_rxq = fls(vector->rxq_map) - 1;
857 			if (largest_rxq >= vf->vsi.num_queues) {
858 				i40e_send_vf_nack(pf, vf,
859 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
860 				    I40E_ERR_PARAM);
861 				return;
862 			}
863 		}
864 
865 		if (vector->txq_map != 0) {
866 			largest_txq = fls(vector->txq_map) - 1;
867 			if (largest_txq >= vf->vsi.num_queues) {
868 				i40e_send_vf_nack(pf, vf,
869 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
870 				    I40E_ERR_PARAM);
871 				return;
872 			}
873 		}
874 
875 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
876 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
877 			i40e_send_vf_nack(pf, vf,
878 			    VIRTCHNL_OP_CONFIG_IRQ_MAP,
879 			    I40E_ERR_PARAM);
880 			return;
881 		}
882 
883 		ixl_vf_config_vector(pf, vf, vector);
884 	}
885 
886 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
887 }
888 
889 static void
890 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
891     uint16_t msg_size)
892 {
893 	struct virtchnl_queue_select *select;
894 	int error = 0;
895 
896 	if (msg_size != sizeof(*select)) {
897 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
898 		    I40E_ERR_PARAM);
899 		return;
900 	}
901 
902 	select = msg;
903 	if (select->vsi_id != vf->vsi.vsi_num ||
904 	    select->rx_queues == 0 || select->tx_queues == 0) {
905 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
906 		    I40E_ERR_PARAM);
907 		return;
908 	}
909 
910 	/* Enable TX rings selected by the VF */
911 	for (int i = 0; i < 32; i++) {
912 		if ((1 << i) & select->tx_queues) {
913 			/* Warn if queue is out of VF allocation range */
914 			if (i >= vf->vsi.num_queues) {
915 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
916 				    vf->vf_num, i);
917 				break;
918 			}
919 			/* Skip this queue if it hasn't been configured */
920 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
921 				continue;
922 			/* Warn if this queue is already marked as enabled */
923 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
924 				device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
925 				    vf->vf_num, i);
926 
927 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
928 			if (error)
929 				break;
930 			else
931 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
932 		}
933 	}
934 
935 	/* Enable RX rings selected by the VF */
936 	for (int i = 0; i < 32; i++) {
937 		if ((1 << i) & select->rx_queues) {
938 			/* Warn if queue is out of VF allocation range */
939 			if (i >= vf->vsi.num_queues) {
940 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
941 				    vf->vf_num, i);
942 				break;
943 			}
944 			/* Skip this queue if it hasn't been configured */
945 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
946 				continue;
947 			/* Warn if this queue is already marked as enabled */
948 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
949 				device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
950 				    vf->vf_num, i);
951 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
952 			if (error)
953 				break;
954 			else
955 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
956 		}
957 	}
958 
959 	if (error) {
960 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
961 		    I40E_ERR_TIMEOUT);
962 		return;
963 	}
964 
965 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
966 }
967 
968 static void
969 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
970     void *msg, uint16_t msg_size)
971 {
972 	struct virtchnl_queue_select *select;
973 	int error = 0;
974 
975 	if (msg_size != sizeof(*select)) {
976 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
977 		    I40E_ERR_PARAM);
978 		return;
979 	}
980 
981 	select = msg;
982 	if (select->vsi_id != vf->vsi.vsi_num ||
983 	    select->rx_queues == 0 || select->tx_queues == 0) {
984 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
985 		    I40E_ERR_PARAM);
986 		return;
987 	}
988 
989 	/* Disable TX rings selected by the VF */
990 	for (int i = 0; i < 32; i++) {
991 		if ((1 << i) & select->tx_queues) {
992 			/* Warn if queue is out of VF allocation range */
993 			if (i >= vf->vsi.num_queues) {
994 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
995 				    vf->vf_num, i);
996 				break;
997 			}
998 			/* Skip this queue if it hasn't been configured */
999 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1000 				continue;
1001 			/* Warn if this queue is already marked as disabled */
1002 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1003 				device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1004 				    vf->vf_num, i);
1005 				continue;
1006 			}
1007 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1008 			if (error)
1009 				break;
1010 			else
1011 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1012 		}
1013 	}
1014 
1015 	/* Enable RX rings selected by the VF */
1016 	for (int i = 0; i < 32; i++) {
1017 		if ((1 << i) & select->rx_queues) {
1018 			/* Warn if queue is out of VF allocation range */
1019 			if (i >= vf->vsi.num_queues) {
1020 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1021 				    vf->vf_num, i);
1022 				break;
1023 			}
1024 			/* Skip this queue if it hasn't been configured */
1025 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1026 				continue;
1027 			/* Warn if this queue is already marked as disabled */
1028 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1029 				device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1030 				    vf->vf_num, i);
1031 				continue;
1032 			}
1033 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1034 			if (error)
1035 				break;
1036 			else
1037 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1038 		}
1039 	}
1040 
1041 	if (error) {
1042 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1043 		    I40E_ERR_TIMEOUT);
1044 		return;
1045 	}
1046 
1047 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1048 }
1049 
1050 static bool
1051 ixl_zero_mac(const uint8_t *addr)
1052 {
1053 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1054 
1055 	return (cmp_etheraddr(addr, zero));
1056 }
1057 
1058 static bool
1059 ixl_bcast_mac(const uint8_t *addr)
1060 {
1061 
1062 	return (cmp_etheraddr(addr, ixl_bcast_addr));
1063 }
1064 
1065 static int
1066 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1067 {
1068 
1069 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1070 		return (EINVAL);
1071 
1072 	/*
1073 	 * If the VF is not allowed to change its MAC address, don't let it
1074 	 * set a MAC filter for an address that is not a multicast address and
1075 	 * is not its assigned MAC.
1076 	 */
1077 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1078 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1079 		return (EPERM);
1080 
1081 	return (0);
1082 }
1083 
1084 static void
1085 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1086     uint16_t msg_size)
1087 {
1088 	struct virtchnl_ether_addr_list *addr_list;
1089 	struct virtchnl_ether_addr *addr;
1090 	struct ixl_vsi *vsi;
1091 	int i;
1092 	size_t expected_size;
1093 
1094 	vsi = &vf->vsi;
1095 
1096 	if (msg_size < sizeof(*addr_list)) {
1097 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1098 		    I40E_ERR_PARAM);
1099 		return;
1100 	}
1101 
1102 	addr_list = msg;
1103 	expected_size = sizeof(*addr_list) +
1104 	    addr_list->num_elements * sizeof(*addr);
1105 
1106 	if (addr_list->num_elements == 0 ||
1107 	    addr_list->vsi_id != vsi->vsi_num ||
1108 	    msg_size != expected_size) {
1109 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1110 		    I40E_ERR_PARAM);
1111 		return;
1112 	}
1113 
1114 	for (i = 0; i < addr_list->num_elements; i++) {
1115 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1116 			i40e_send_vf_nack(pf, vf,
1117 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1118 			return;
1119 		}
1120 	}
1121 
1122 	for (i = 0; i < addr_list->num_elements; i++) {
1123 		addr = &addr_list->list[i];
1124 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1125 	}
1126 
1127 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1128 }
1129 
1130 static void
1131 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1132     uint16_t msg_size)
1133 {
1134 	struct virtchnl_ether_addr_list *addr_list;
1135 	struct virtchnl_ether_addr *addr;
1136 	size_t expected_size;
1137 	int i;
1138 
1139 	if (msg_size < sizeof(*addr_list)) {
1140 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1141 		    I40E_ERR_PARAM);
1142 		return;
1143 	}
1144 
1145 	addr_list = msg;
1146 	expected_size = sizeof(*addr_list) +
1147 	    addr_list->num_elements * sizeof(*addr);
1148 
1149 	if (addr_list->num_elements == 0 ||
1150 	    addr_list->vsi_id != vf->vsi.vsi_num ||
1151 	    msg_size != expected_size) {
1152 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1153 		    I40E_ERR_PARAM);
1154 		return;
1155 	}
1156 
1157 	for (i = 0; i < addr_list->num_elements; i++) {
1158 		addr = &addr_list->list[i];
1159 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1160 			i40e_send_vf_nack(pf, vf,
1161 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1162 			return;
1163 		}
1164 	}
1165 
1166 	for (i = 0; i < addr_list->num_elements; i++) {
1167 		addr = &addr_list->list[i];
1168 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1169 	}
1170 
1171 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1172 }
1173 
1174 static enum i40e_status_code
1175 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1176 {
1177 	struct i40e_vsi_context vsi_ctx;
1178 
1179 	vsi_ctx.seid = vf->vsi.seid;
1180 
1181 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1182 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1183 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1184 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1185 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1186 }
1187 
1188 static void
1189 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1190     uint16_t msg_size)
1191 {
1192 	struct virtchnl_vlan_filter_list *filter_list;
1193 	enum i40e_status_code code;
1194 	size_t expected_size;
1195 	int i;
1196 
1197 	if (msg_size < sizeof(*filter_list)) {
1198 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1199 		    I40E_ERR_PARAM);
1200 		return;
1201 	}
1202 
1203 	filter_list = msg;
1204 	expected_size = sizeof(*filter_list) +
1205 	    filter_list->num_elements * sizeof(uint16_t);
1206 	if (filter_list->num_elements == 0 ||
1207 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1208 	    msg_size != expected_size) {
1209 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1210 		    I40E_ERR_PARAM);
1211 		return;
1212 	}
1213 
1214 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1215 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1216 		    I40E_ERR_PARAM);
1217 		return;
1218 	}
1219 
1220 	for (i = 0; i < filter_list->num_elements; i++) {
1221 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1222 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1223 			    I40E_ERR_PARAM);
1224 			return;
1225 		}
1226 	}
1227 
1228 	code = ixl_vf_enable_vlan_strip(pf, vf);
1229 	if (code != I40E_SUCCESS) {
1230 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1231 		    I40E_ERR_PARAM);
1232 	}
1233 
1234 	for (i = 0; i < filter_list->num_elements; i++)
1235 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1236 
1237 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1238 }
1239 
1240 static void
1241 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1242     uint16_t msg_size)
1243 {
1244 	struct virtchnl_vlan_filter_list *filter_list;
1245 	int i;
1246 	size_t expected_size;
1247 
1248 	if (msg_size < sizeof(*filter_list)) {
1249 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1250 		    I40E_ERR_PARAM);
1251 		return;
1252 	}
1253 
1254 	filter_list = msg;
1255 	expected_size = sizeof(*filter_list) +
1256 	    filter_list->num_elements * sizeof(uint16_t);
1257 	if (filter_list->num_elements == 0 ||
1258 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1259 	    msg_size != expected_size) {
1260 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1261 		    I40E_ERR_PARAM);
1262 		return;
1263 	}
1264 
1265 	for (i = 0; i < filter_list->num_elements; i++) {
1266 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1267 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1268 			    I40E_ERR_PARAM);
1269 			return;
1270 		}
1271 	}
1272 
1273 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1274 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1275 		    I40E_ERR_PARAM);
1276 		return;
1277 	}
1278 
1279 	for (i = 0; i < filter_list->num_elements; i++)
1280 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1281 
1282 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1283 }
1284 
1285 static void
1286 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1287     void *msg, uint16_t msg_size)
1288 {
1289 	struct virtchnl_promisc_info *info;
1290 	enum i40e_status_code code;
1291 
1292 	if (msg_size != sizeof(*info)) {
1293 		i40e_send_vf_nack(pf, vf,
1294 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1295 		return;
1296 	}
1297 
1298 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1299 		i40e_send_vf_nack(pf, vf,
1300 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1301 		return;
1302 	}
1303 
1304 	info = msg;
1305 	if (info->vsi_id != vf->vsi.vsi_num) {
1306 		i40e_send_vf_nack(pf, vf,
1307 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1308 		return;
1309 	}
1310 
1311 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1312 	    info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1313 	if (code != I40E_SUCCESS) {
1314 		i40e_send_vf_nack(pf, vf,
1315 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1316 		return;
1317 	}
1318 
1319 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1320 	    info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1321 	if (code != I40E_SUCCESS) {
1322 		i40e_send_vf_nack(pf, vf,
1323 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1324 		return;
1325 	}
1326 
1327 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1328 }
1329 
1330 static void
1331 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1332     uint16_t msg_size)
1333 {
1334 	struct virtchnl_queue_select *queue;
1335 
1336 	if (msg_size != sizeof(*queue)) {
1337 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1338 		    I40E_ERR_PARAM);
1339 		return;
1340 	}
1341 
1342 	queue = msg;
1343 	if (queue->vsi_id != vf->vsi.vsi_num) {
1344 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1345 		    I40E_ERR_PARAM);
1346 		return;
1347 	}
1348 
1349 	ixl_update_eth_stats(&vf->vsi);
1350 
1351 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1352 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1353 }
1354 
1355 static void
1356 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1357     uint16_t msg_size)
1358 {
1359 	struct i40e_hw *hw;
1360 	struct virtchnl_rss_key *key;
1361 	struct i40e_aqc_get_set_rss_key_data key_data;
1362 	enum i40e_status_code status;
1363 
1364 	hw = &pf->hw;
1365 
1366 	if (msg_size < sizeof(*key)) {
1367 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1368 		    I40E_ERR_PARAM);
1369 		return;
1370 	}
1371 
1372 	key = msg;
1373 
1374 	if (key->key_len > 52) {
1375 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1376 		    vf->vf_num, key->key_len, 52);
1377 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1378 		    I40E_ERR_PARAM);
1379 		return;
1380 	}
1381 
1382 	if (key->vsi_id != vf->vsi.vsi_num) {
1383 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1384 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1385 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1386 		    I40E_ERR_PARAM);
1387 		return;
1388 	}
1389 
1390 	/* Fill out hash using MAC-dependent method */
1391 	if (hw->mac.type == I40E_MAC_X722) {
1392 		bzero(&key_data, sizeof(key_data));
1393 		if (key->key_len <= 40)
1394 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1395 		else {
1396 			bcopy(key->key, key_data.standard_rss_key, 40);
1397 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1398 		}
1399 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1400 		if (status) {
1401 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1402 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1403 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1404 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1405 			return;
1406 		}
1407 	} else {
1408 		for (int i = 0; i < (key->key_len / 4); i++)
1409 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1410 	}
1411 
1412 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1413 	    vf->vf_num, key->key[0]);
1414 
1415 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1416 }
1417 
1418 static void
1419 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1420     uint16_t msg_size)
1421 {
1422 	struct i40e_hw *hw;
1423 	struct virtchnl_rss_lut *lut;
1424 	enum i40e_status_code status;
1425 
1426 	hw = &pf->hw;
1427 
1428 	if (msg_size < sizeof(*lut)) {
1429 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1430 		    I40E_ERR_PARAM);
1431 		return;
1432 	}
1433 
1434 	lut = msg;
1435 
1436 	if (lut->lut_entries > 64) {
1437 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1438 		    vf->vf_num, lut->lut_entries, 64);
1439 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1440 		    I40E_ERR_PARAM);
1441 		return;
1442 	}
1443 
1444 	if (lut->vsi_id != vf->vsi.vsi_num) {
1445 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1446 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1447 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1448 		    I40E_ERR_PARAM);
1449 		return;
1450 	}
1451 
1452 	/* Fill out LUT using MAC-dependent method */
1453 	if (hw->mac.type == I40E_MAC_X722) {
1454 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1455 		if (status) {
1456 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1457 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1458 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1459 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1460 			return;
1461 		}
1462 	} else {
1463 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1464 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1465 	}
1466 
1467 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1468 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1469 
1470 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1471 }
1472 
1473 static void
1474 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1475     uint16_t msg_size)
1476 {
1477 	struct i40e_hw *hw;
1478 	struct virtchnl_rss_hena *hena;
1479 
1480 	hw = &pf->hw;
1481 
1482 	if (msg_size < sizeof(*hena)) {
1483 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1484 		    I40E_ERR_PARAM);
1485 		return;
1486 	}
1487 
1488 	hena = msg;
1489 
1490 	/* Set HENA */
1491 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1492 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1493 
1494 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1495 	    vf->vf_num, hena->hena);
1496 
1497 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1498 }
1499 
1500 static void
1501 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1502 {
1503 	struct virtchnl_pf_event event;
1504 	struct i40e_hw *hw;
1505 
1506 	hw = &pf->hw;
1507 	event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1508 	event.severity = PF_EVENT_SEVERITY_INFO;
1509 	event.event_data.link_event.link_status = pf->vsi.link_active;
1510 	event.event_data.link_event.link_speed =
1511 		(enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1512 
1513 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1514 			sizeof(event));
1515 }
1516 
1517 void
1518 ixl_broadcast_link_state(struct ixl_pf *pf)
1519 {
1520 	int i;
1521 
1522 	for (i = 0; i < pf->num_vfs; i++)
1523 		ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1524 }
1525 
1526 void
1527 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1528 {
1529 	struct ixl_vf *vf;
1530 	void *msg;
1531 	uint16_t vf_num, msg_size;
1532 	uint32_t opcode;
1533 
1534 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1535 	opcode = le32toh(event->desc.cookie_high);
1536 
1537 	if (vf_num >= pf->num_vfs) {
1538 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1539 		return;
1540 	}
1541 
1542 	vf = &pf->vfs[vf_num];
1543 	msg = event->msg_buf;
1544 	msg_size = event->msg_len;
1545 
1546 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1547 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1548 	    ixl_vc_opcode_str(opcode), opcode,
1549 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1550 	    vf_num, msg_size);
1551 
1552 	/* This must be a stray msg from a previously destroyed VF. */
1553 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1554 		return;
1555 
1556 	switch (opcode) {
1557 	case VIRTCHNL_OP_VERSION:
1558 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1559 		break;
1560 	case VIRTCHNL_OP_RESET_VF:
1561 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1562 		break;
1563 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1564 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1565 		/* Notify VF of link state after it obtains queues, as this is
1566 		 * the last thing it will do as part of initialization
1567 		 */
1568 		ixl_notify_vf_link_state(pf, vf);
1569 		break;
1570 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1571 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1572 		break;
1573 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1574 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1575 		break;
1576 	case VIRTCHNL_OP_ENABLE_QUEUES:
1577 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1578 		/* Notify VF of link state after it obtains queues, as this is
1579 		 * the last thing it will do as part of initialization
1580 		 */
1581 		ixl_notify_vf_link_state(pf, vf);
1582 		break;
1583 	case VIRTCHNL_OP_DISABLE_QUEUES:
1584 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1585 		break;
1586 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1587 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1588 		break;
1589 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1590 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1591 		break;
1592 	case VIRTCHNL_OP_ADD_VLAN:
1593 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1594 		break;
1595 	case VIRTCHNL_OP_DEL_VLAN:
1596 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1597 		break;
1598 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1599 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1600 		break;
1601 	case VIRTCHNL_OP_GET_STATS:
1602 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1603 		break;
1604 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1605 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1606 		break;
1607 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1608 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1609 		break;
1610 	case VIRTCHNL_OP_SET_RSS_HENA:
1611 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1612 		break;
1613 
1614 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1615 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1616 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1617 	default:
1618 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1619 		break;
1620 	}
1621 }
1622 
1623 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1624 void
1625 ixl_handle_vflr(void *arg, int pending)
1626 {
1627 	struct ixl_pf *pf;
1628 	struct ixl_vf *vf;
1629 	struct i40e_hw *hw;
1630 	uint16_t global_vf_num;
1631 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1632 	int i;
1633 
1634 	pf = arg;
1635 	hw = &pf->hw;
1636 
1637 	IXL_PF_LOCK(pf);
1638 	for (i = 0; i < pf->num_vfs; i++) {
1639 		global_vf_num = hw->func_caps.vf_base_id + i;
1640 
1641 		vf = &pf->vfs[i];
1642 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1643 			continue;
1644 
1645 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1646 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1647 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1648 		if (vflrstat & vflrstat_mask) {
1649 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1650 			    vflrstat_mask);
1651 
1652 			ixl_reinit_vf(pf, vf);
1653 		}
1654 	}
1655 
1656 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1657 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1658 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1659 	ixl_flush(hw);
1660 
1661 	IXL_PF_UNLOCK(pf);
1662 }
1663 
1664 static int
1665 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1666 {
1667 
1668 	switch (err) {
1669 	case I40E_AQ_RC_EPERM:
1670 		return (EPERM);
1671 	case I40E_AQ_RC_ENOENT:
1672 		return (ENOENT);
1673 	case I40E_AQ_RC_ESRCH:
1674 		return (ESRCH);
1675 	case I40E_AQ_RC_EINTR:
1676 		return (EINTR);
1677 	case I40E_AQ_RC_EIO:
1678 		return (EIO);
1679 	case I40E_AQ_RC_ENXIO:
1680 		return (ENXIO);
1681 	case I40E_AQ_RC_E2BIG:
1682 		return (E2BIG);
1683 	case I40E_AQ_RC_EAGAIN:
1684 		return (EAGAIN);
1685 	case I40E_AQ_RC_ENOMEM:
1686 		return (ENOMEM);
1687 	case I40E_AQ_RC_EACCES:
1688 		return (EACCES);
1689 	case I40E_AQ_RC_EFAULT:
1690 		return (EFAULT);
1691 	case I40E_AQ_RC_EBUSY:
1692 		return (EBUSY);
1693 	case I40E_AQ_RC_EEXIST:
1694 		return (EEXIST);
1695 	case I40E_AQ_RC_EINVAL:
1696 		return (EINVAL);
1697 	case I40E_AQ_RC_ENOTTY:
1698 		return (ENOTTY);
1699 	case I40E_AQ_RC_ENOSPC:
1700 		return (ENOSPC);
1701 	case I40E_AQ_RC_ENOSYS:
1702 		return (ENOSYS);
1703 	case I40E_AQ_RC_ERANGE:
1704 		return (ERANGE);
1705 	case I40E_AQ_RC_EFLUSHED:
1706 		return (EINVAL);	/* No exact equivalent in errno.h */
1707 	case I40E_AQ_RC_BAD_ADDR:
1708 		return (EFAULT);
1709 	case I40E_AQ_RC_EMODE:
1710 		return (EPERM);
1711 	case I40E_AQ_RC_EFBIG:
1712 		return (EFBIG);
1713 	default:
1714 		return (EINVAL);
1715 	}
1716 }
1717 
1718 int
1719 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1720 {
1721 	struct ixl_pf *pf;
1722 	struct i40e_hw *hw;
1723 	struct ixl_vsi *pf_vsi;
1724 	enum i40e_status_code ret;
1725 	int i, error;
1726 
1727 	pf = device_get_softc(dev);
1728 	hw = &pf->hw;
1729 	pf_vsi = &pf->vsi;
1730 
1731 	IXL_PF_LOCK(pf);
1732 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1733 	    M_ZERO);
1734 
1735 	if (pf->vfs == NULL) {
1736 		error = ENOMEM;
1737 		goto fail;
1738 	}
1739 
1740 	for (i = 0; i < num_vfs; i++)
1741 		sysctl_ctx_init(&pf->vfs[i].ctx);
1742 
1743 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1744 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1745 	if (ret != I40E_SUCCESS) {
1746 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1747 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1748 		    error);
1749 		goto fail;
1750 	}
1751 
1752 	pf->num_vfs = num_vfs;
1753 	IXL_PF_UNLOCK(pf);
1754 	return (0);
1755 
1756 fail:
1757 	free(pf->vfs, M_IXL);
1758 	pf->vfs = NULL;
1759 	IXL_PF_UNLOCK(pf);
1760 	return (error);
1761 }
1762 
1763 void
1764 ixl_iov_uninit(device_t dev)
1765 {
1766 	struct ixl_pf *pf;
1767 	struct i40e_hw *hw;
1768 	struct ixl_vsi *vsi;
1769 	struct ifnet *ifp;
1770 	struct ixl_vf *vfs;
1771 	int i, num_vfs;
1772 
1773 	pf = device_get_softc(dev);
1774 	hw = &pf->hw;
1775 	vsi = &pf->vsi;
1776 	ifp = vsi->ifp;
1777 
1778 	IXL_PF_LOCK(pf);
1779 	for (i = 0; i < pf->num_vfs; i++) {
1780 		if (pf->vfs[i].vsi.seid != 0)
1781 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1782 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1783 		ixl_free_mac_filters(&pf->vfs[i].vsi);
1784 		DDPRINTF(dev, "VF %d: %d released\n",
1785 		    i, pf->vfs[i].qtag.num_allocated);
1786 		DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1787 	}
1788 
1789 	if (pf->veb_seid != 0) {
1790 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1791 		pf->veb_seid = 0;
1792 	}
1793 
1794 	vfs = pf->vfs;
1795 	num_vfs = pf->num_vfs;
1796 
1797 	pf->vfs = NULL;
1798 	pf->num_vfs = 0;
1799 	IXL_PF_UNLOCK(pf);
1800 
1801 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
1802 	for (i = 0; i < num_vfs; i++)
1803 		sysctl_ctx_free(&vfs[i].ctx);
1804 	free(vfs, M_IXL);
1805 }
1806 
1807 static int
1808 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1809 {
1810 	device_t dev = pf->dev;
1811 	int error;
1812 
1813 	/* Validate, and clamp value if invalid */
1814 	if (num_queues < 1 || num_queues > 16)
1815 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1816 		    num_queues, vf->vf_num);
1817 	if (num_queues < 1) {
1818 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1819 		num_queues = 1;
1820 	} else if (num_queues > 16) {
1821 		device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1822 		num_queues = 16;
1823 	}
1824 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1825 	if (error) {
1826 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1827 		    num_queues, vf->vf_num);
1828 		return (ENOSPC);
1829 	}
1830 
1831 	DDPRINTF(dev, "VF %d: %d allocated, %d active",
1832 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1833 	DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1834 
1835 	return (0);
1836 }
1837 
1838 int
1839 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1840 {
1841 	char sysctl_name[QUEUE_NAME_LEN];
1842 	struct ixl_pf *pf;
1843 	struct ixl_vf *vf;
1844 	const void *mac;
1845 	size_t size;
1846 	int error;
1847 	int vf_num_queues;
1848 
1849 	pf = device_get_softc(dev);
1850 	vf = &pf->vfs[vfnum];
1851 
1852 	IXL_PF_LOCK(pf);
1853 	vf->vf_num = vfnum;
1854 
1855 	vf->vsi.back = pf;
1856 	vf->vf_flags = VF_FLAG_ENABLED;
1857 	SLIST_INIT(&vf->vsi.ftl);
1858 
1859 	/* Reserve queue allocation from PF */
1860 	vf_num_queues = nvlist_get_number(params, "num-queues");
1861 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1862 	if (error != 0)
1863 		goto out;
1864 
1865 	error = ixl_vf_setup_vsi(pf, vf);
1866 	if (error != 0)
1867 		goto out;
1868 
1869 	if (nvlist_exists_binary(params, "mac-addr")) {
1870 		mac = nvlist_get_binary(params, "mac-addr", &size);
1871 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1872 
1873 		if (nvlist_get_bool(params, "allow-set-mac"))
1874 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1875 	} else
1876 		/*
1877 		 * If the administrator has not specified a MAC address then
1878 		 * we must allow the VF to choose one.
1879 		 */
1880 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1881 
1882 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1883 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1884 
1885 	if (nvlist_get_bool(params, "allow-promisc"))
1886 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1887 
1888 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1889 
1890 	ixl_reset_vf(pf, vf);
1891 out:
1892 	IXL_PF_UNLOCK(pf);
1893 	if (error == 0) {
1894 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1895 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1896 	}
1897 
1898 	return (error);
1899 }
1900 
1901