xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static int	ixl_vc_opcode_level(uint16_t opcode);
43 
44 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
45 
46 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
49 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
50 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
51 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
54 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
55 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
56 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
57 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
60 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
61 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
62 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
64     enum i40e_queue_type *last_type, uint16_t *last_queue);
65 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
66 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
72 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
77 static int	ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
78 
79 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
80 
81 /*
82  * TODO: Move pieces of this into iflib and call the rest in a handler?
83  *
84  * e.g. ixl_if_iov_set_schema
85  *
86  * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
87  * in the driver.
88  */
89 void
90 ixl_initialize_sriov(struct ixl_pf *pf)
91 {
92 	device_t dev = pf->dev;
93 	struct i40e_hw *hw = &pf->hw;
94 	nvlist_t	*pf_schema, *vf_schema;
95 	int		iov_error;
96 
97 	pf_schema = pci_iov_schema_alloc_node();
98 	vf_schema = pci_iov_schema_alloc_node();
99 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
100 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
101 	    IOV_SCHEMA_HASDEFAULT, TRUE);
102 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
103 	    IOV_SCHEMA_HASDEFAULT, FALSE);
104 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
105 	    IOV_SCHEMA_HASDEFAULT, FALSE);
106 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
107 	    IOV_SCHEMA_HASDEFAULT,
108 	    max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
109 
110 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
111 	if (iov_error != 0) {
112 		device_printf(dev,
113 		    "Failed to initialize SR-IOV (error=%d)\n",
114 		    iov_error);
115 	} else
116 		device_printf(dev, "SR-IOV ready\n");
117 
118 	pf->vc_debug_lvl = 1;
119 }
120 
121 /*
122  * Allocate the VSI for a VF.
123  */
124 static int
125 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
126 {
127 	device_t dev;
128 	struct i40e_hw *hw;
129 	struct i40e_vsi_context vsi_ctx;
130 	int i;
131 	enum i40e_status_code code;
132 
133 	hw = &pf->hw;
134 	dev = pf->dev;
135 
136 	vsi_ctx.pf_num = hw->pf_id;
137 	vsi_ctx.uplink_seid = pf->veb_seid;
138 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
141 
142 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
143 
144 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 	if (pf->enable_vf_loopback)
146 		vsi_ctx.info.switch_id =
147 		   htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
148 
149 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
150 	vsi_ctx.info.sec_flags = 0;
151 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
152 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
153 
154 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
155 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
156 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
157 
158 	vsi_ctx.info.valid_sections |=
159 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
160 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
161 
162 	/* XXX: Only scattered allocation is supported for VFs right now */
163 	for (i = 0; i < vf->qtag.num_active; i++)
164 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
165 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
166 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
167 
168 	vsi_ctx.info.tc_mapping[0] = htole16(
169 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
170 	    ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
171 
172 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
173 	if (code != I40E_SUCCESS)
174 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
175 	vf->vsi.seid = vsi_ctx.seid;
176 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
177 	vf->vsi.num_rx_queues = vf->qtag.num_active;
178 	vf->vsi.num_tx_queues = vf->qtag.num_active;
179 
180 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
181 	if (code != I40E_SUCCESS)
182 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
183 
184 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
185 	if (code != I40E_SUCCESS) {
186 		device_printf(dev, "Failed to disable BW limit: %d\n",
187 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
189 	}
190 
191 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
192 	return (0);
193 }
194 
195 static int
196 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
197 {
198 	struct i40e_hw *hw;
199 	int error;
200 
201 	hw = &pf->hw;
202 	vf->vsi.flags |= IXL_FLAGS_IS_VF;
203 
204 	error = ixl_vf_alloc_vsi(pf, vf);
205 	if (error != 0)
206 		return (error);
207 
208 	vf->vsi.dev = pf->dev;
209 
210 	ixl_init_filters(&vf->vsi);
211 	/* Let VF receive broadcast Ethernet frames */
212 	error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
213 	if (error)
214 		device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
215 	/* Re-add VF's MAC/VLAN filters to its VSI */
216 	ixl_reconfigure_filters(&vf->vsi);
217 
218 	return (0);
219 }
220 
221 static void
222 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
223     uint32_t val)
224 {
225 	uint32_t qtable;
226 	int index, shift;
227 
228 	/*
229 	 * Two queues are mapped in a single register, so we have to do some
230 	 * gymnastics to convert the queue number into a register index and
231 	 * shift.
232 	 */
233 	index = qnum / 2;
234 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
235 
236 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
237 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
238 	qtable |= val << shift;
239 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
240 }
241 
242 static void
243 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
244 {
245 	struct i40e_hw *hw;
246 	uint32_t qtable;
247 	int i;
248 
249 	hw = &pf->hw;
250 
251 	/*
252 	 * Contiguous mappings aren't actually supported by the hardware,
253 	 * so we have to use non-contiguous mappings.
254 	 */
255 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
256 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
257 
258 	/* Enable LAN traffic on this VF */
259 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
260 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
261 
262 	/* Program index of each VF queue into PF queue space
263 	 * (This is only needed if QTABLE is enabled) */
264 	for (i = 0; i < vf->vsi.num_tx_queues; i++) {
265 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
266 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
267 
268 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
269 	}
270 	for (; i < IXL_MAX_VSI_QUEUES; i++)
271 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
272 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
273 
274 	/* Map queues allocated to VF to its VSI;
275 	 * This mapping matches the VF-wide mapping since the VF
276 	 * is only given a single VSI */
277 	for (i = 0; i < vf->vsi.num_tx_queues; i++)
278 		ixl_vf_map_vsi_queue(hw, vf, i,
279 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
280 
281 	/* Set rest of VSI queues as unused. */
282 	for (; i < IXL_MAX_VSI_QUEUES; i++)
283 		ixl_vf_map_vsi_queue(hw, vf, i,
284 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
285 
286 	ixl_flush(hw);
287 }
288 
289 static void
290 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
291 {
292 	struct i40e_hw *hw;
293 
294 	hw = &pf->hw;
295 
296 	if (vsi->seid == 0)
297 		return;
298 
299 	i40e_aq_delete_element(hw, vsi->seid, NULL);
300 }
301 
302 static void
303 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
304 {
305 
306 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
307 	ixl_flush(hw);
308 }
309 
310 static void
311 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
312 {
313 
314 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
315 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
316 	ixl_flush(hw);
317 }
318 
319 static void
320 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
321 {
322 	struct i40e_hw *hw;
323 	uint32_t vfint_reg, vpint_reg;
324 	int i;
325 
326 	hw = &pf->hw;
327 
328 	ixl_vf_vsi_release(pf, &vf->vsi);
329 
330 	/* Index 0 has a special register. */
331 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
332 
333 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
334 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
335 		ixl_vf_disable_queue_intr(hw, vfint_reg);
336 	}
337 
338 	/* Index 0 has a special register. */
339 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
340 
341 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
342 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
343 		ixl_vf_unregister_intr(hw, vpint_reg);
344 	}
345 
346 	vf->vsi.num_tx_queues = 0;
347 	vf->vsi.num_rx_queues = 0;
348 }
349 
350 static int
351 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
352 {
353 	struct i40e_hw *hw;
354 	int i;
355 	uint16_t global_vf_num;
356 	uint32_t ciad;
357 
358 	hw = &pf->hw;
359 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
360 
361 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
362 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
363 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
364 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
365 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
366 			return (0);
367 		DELAY(1);
368 	}
369 
370 	return (ETIMEDOUT);
371 }
372 
373 static void
374 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
375 {
376 	struct i40e_hw *hw;
377 	uint32_t vfrtrig;
378 
379 	hw = &pf->hw;
380 
381 	ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
382 
383 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
384 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
385 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
386 	ixl_flush(hw);
387 
388 	ixl_reinit_vf(pf, vf);
389 
390 	ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
391 }
392 
393 static void
394 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
395 {
396 	struct i40e_hw *hw;
397 	uint32_t vfrstat, vfrtrig;
398 	int i, error;
399 
400 	hw = &pf->hw;
401 
402 	error = ixl_flush_pcie(pf, vf);
403 	if (error != 0)
404 		device_printf(pf->dev,
405 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
406 		    vf->vf_num);
407 
408 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
409 		DELAY(10);
410 
411 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
412 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
413 			break;
414 	}
415 
416 	if (i == IXL_VF_RESET_TIMEOUT)
417 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
418 
419 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
420 
421 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
422 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
423 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
424 
425 	if (vf->vsi.seid != 0)
426 		ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
427 	ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
428 
429 	ixl_vf_release_resources(pf, vf);
430 	ixl_vf_setup_vsi(pf, vf);
431 	ixl_vf_map_queues(pf, vf);
432 
433 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
434 	ixl_flush(hw);
435 }
436 
437 static int
438 ixl_vc_opcode_level(uint16_t opcode)
439 {
440 	switch (opcode) {
441 	case VIRTCHNL_OP_GET_STATS:
442 		return (10);
443 	default:
444 		return (5);
445 	}
446 }
447 
448 static void
449 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
450     enum i40e_status_code status, void *msg, uint16_t len)
451 {
452 	struct i40e_hw *hw;
453 	int global_vf_id;
454 
455 	hw = &pf->hw;
456 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
457 
458 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
459 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
460 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
461 
462 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
463 }
464 
465 static void
466 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
467 {
468 
469 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
470 }
471 
472 static void
473 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
474     enum i40e_status_code status, const char *file, int line)
475 {
476 
477 	I40E_VC_DEBUG(pf, 1,
478 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
479 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
480 	    status, vf->vf_num, file, line);
481 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
482 }
483 
484 static void
485 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
486     uint16_t msg_size)
487 {
488 	struct virtchnl_version_info *recv_vf_version;
489 	device_t dev = pf->dev;
490 
491 	recv_vf_version = (struct virtchnl_version_info *)msg;
492 
493 	/* VFs running the 1.0 API expect to get 1.0 back */
494 	if (VF_IS_V10(recv_vf_version)) {
495 		vf->version.major = 1;
496 		vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
497 	} else {
498 		vf->version.major = VIRTCHNL_VERSION_MAJOR;
499 		vf->version.minor = VIRTCHNL_VERSION_MINOR;
500 
501 		if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
502 		    (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
503 		    device_printf(dev,
504 		        "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
505 			__func__, vf->vf_num,
506 			recv_vf_version->major, recv_vf_version->minor,
507 			VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
508 	}
509 
510 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
511 	    &vf->version, sizeof(vf->version));
512 }
513 
514 static void
515 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
516     uint16_t msg_size)
517 {
518 	ixl_reset_vf(pf, vf);
519 
520 	/* No response to a reset message. */
521 }
522 
523 static void
524 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
525     uint16_t msg_size)
526 {
527 	struct virtchnl_vf_resource reply;
528 
529 	bzero(&reply, sizeof(reply));
530 
531 	if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
532 		reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
533 					 VIRTCHNL_VF_OFFLOAD_RSS_REG |
534 					 VIRTCHNL_VF_OFFLOAD_VLAN;
535 	else
536 		/* Force VF RSS setup by PF in 1.1+ VFs */
537 		reply.vf_cap_flags = *(u32 *)msg & (
538 					 VIRTCHNL_VF_OFFLOAD_L2 |
539 					 VIRTCHNL_VF_OFFLOAD_RSS_PF |
540 					 VIRTCHNL_VF_OFFLOAD_VLAN);
541 
542 	reply.num_vsis = 1;
543 	reply.num_queue_pairs = vf->vsi.num_tx_queues;
544 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
545 	reply.rss_key_size = 52;
546 	reply.rss_lut_size = 64;
547 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
548 	reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
549 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
550 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
551 
552 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
553 	    I40E_SUCCESS, &reply, sizeof(reply));
554 }
555 
556 static int
557 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
558     struct virtchnl_txq_info *info)
559 {
560 	struct i40e_hw *hw;
561 	struct i40e_hmc_obj_txq txq;
562 	uint16_t global_queue_num, global_vf_num;
563 	enum i40e_status_code status;
564 	uint32_t qtx_ctl;
565 
566 	hw = &pf->hw;
567 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
568 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
569 	bzero(&txq, sizeof(txq));
570 
571 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
572 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
573 
574 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
575 	if (status != I40E_SUCCESS)
576 		return (EINVAL);
577 
578 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
579 
580 	txq.head_wb_ena = info->headwb_enabled;
581 	txq.head_wb_addr = info->dma_headwb_addr;
582 	txq.qlen = info->ring_len;
583 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
584 	txq.rdylist_act = 0;
585 
586 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
587 	if (status != I40E_SUCCESS)
588 		return (EINVAL);
589 
590 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
591 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
592 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
593 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
594 	ixl_flush(hw);
595 
596 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
597 
598 	return (0);
599 }
600 
601 static int
602 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
603     struct virtchnl_rxq_info *info)
604 {
605 	struct i40e_hw *hw;
606 	struct i40e_hmc_obj_rxq rxq;
607 	uint16_t global_queue_num;
608 	enum i40e_status_code status;
609 
610 	hw = &pf->hw;
611 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
612 	bzero(&rxq, sizeof(rxq));
613 
614 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
615 	    vf->vf_num, global_queue_num, info->queue_id);
616 
617 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
618 		return (EINVAL);
619 
620 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
621 	    info->max_pkt_size < ETHER_MIN_LEN)
622 		return (EINVAL);
623 
624 	if (info->splithdr_enabled) {
625 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
626 			return (EINVAL);
627 
628 		rxq.hsplit_0 = info->rx_split_pos &
629 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
630 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
631 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
632 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
633 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
634 
635 		rxq.dtype = 2;
636 	}
637 
638 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
639 	if (status != I40E_SUCCESS)
640 		return (EINVAL);
641 
642 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
643 	rxq.qlen = info->ring_len;
644 
645 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
646 
647 	rxq.dsize = 1;
648 	rxq.crcstrip = 1;
649 	rxq.l2tsel = 1;
650 
651 	rxq.rxmax = info->max_pkt_size;
652 	rxq.tphrdesc_ena = 1;
653 	rxq.tphwdesc_ena = 1;
654 	rxq.tphdata_ena = 1;
655 	rxq.tphhead_ena = 1;
656 	rxq.lrxqthresh = 1;
657 	rxq.prefena = 1;
658 
659 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
660 	if (status != I40E_SUCCESS)
661 		return (EINVAL);
662 
663 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
664 
665 	return (0);
666 }
667 
668 static void
669 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
670     uint16_t msg_size)
671 {
672 	struct virtchnl_vsi_queue_config_info *info;
673 	struct virtchnl_queue_pair_info *pair;
674 	int i;
675 
676 	info = msg;
677 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
678 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
679 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
680 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
681 		    I40E_ERR_PARAM);
682 		return;
683 	}
684 
685 	if (info->vsi_id != vf->vsi.vsi_num) {
686 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
687 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
688 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
689 		    I40E_ERR_PARAM);
690 		return;
691 	}
692 
693 	for (i = 0; i < info->num_queue_pairs; i++) {
694 		pair = &info->qpair[i];
695 
696 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
697 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
698 		    pair->txq.queue_id != pair->rxq.queue_id ||
699 		    pair->txq.queue_id >= vf->vsi.num_tx_queues) {
700 
701 			i40e_send_vf_nack(pf, vf,
702 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
703 			return;
704 		}
705 
706 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
707 			i40e_send_vf_nack(pf, vf,
708 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
709 			return;
710 		}
711 
712 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
713 			i40e_send_vf_nack(pf, vf,
714 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
715 			return;
716 		}
717 	}
718 
719 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
720 }
721 
722 static void
723 ixl_vf_set_qctl(struct ixl_pf *pf,
724     const struct virtchnl_vector_map *vector,
725     enum i40e_queue_type cur_type, uint16_t cur_queue,
726     enum i40e_queue_type *last_type, uint16_t *last_queue)
727 {
728 	uint32_t offset, qctl;
729 	uint16_t itr_indx;
730 
731 	if (cur_type == I40E_QUEUE_TYPE_RX) {
732 		offset = I40E_QINT_RQCTL(cur_queue);
733 		itr_indx = vector->rxitr_idx;
734 	} else {
735 		offset = I40E_QINT_TQCTL(cur_queue);
736 		itr_indx = vector->txitr_idx;
737 	}
738 
739 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
740 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
741 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
742 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
743 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
744 
745 	wr32(&pf->hw, offset, qctl);
746 
747 	*last_type = cur_type;
748 	*last_queue = cur_queue;
749 }
750 
751 static void
752 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
753     const struct virtchnl_vector_map *vector)
754 {
755 	struct i40e_hw *hw;
756 	u_int qindex;
757 	enum i40e_queue_type type, last_type;
758 	uint32_t lnklst_reg;
759 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
760 
761 	hw = &pf->hw;
762 
763 	rxq_map = vector->rxq_map;
764 	txq_map = vector->txq_map;
765 
766 	last_queue = IXL_END_OF_INTR_LNKLST;
767 	last_type = I40E_QUEUE_TYPE_RX;
768 
769 	/*
770 	 * The datasheet says to optimize performance, RX queues and TX queues
771 	 * should be interleaved in the interrupt linked list, so we process
772 	 * both at once here.
773 	 */
774 	while ((rxq_map != 0) || (txq_map != 0)) {
775 		if (txq_map != 0) {
776 			qindex = ffs(txq_map) - 1;
777 			type = I40E_QUEUE_TYPE_TX;
778 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
779 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
780 			    &last_type, &last_queue);
781 			txq_map &= ~(1 << qindex);
782 		}
783 
784 		if (rxq_map != 0) {
785 			qindex = ffs(rxq_map) - 1;
786 			type = I40E_QUEUE_TYPE_RX;
787 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
788 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
789 			    &last_type, &last_queue);
790 			rxq_map &= ~(1 << qindex);
791 		}
792 	}
793 
794 	if (vector->vector_id == 0)
795 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
796 	else
797 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
798 		    vf->vf_num);
799 	wr32(hw, lnklst_reg,
800 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
801 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
802 
803 	ixl_flush(hw);
804 }
805 
806 static void
807 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
808     uint16_t msg_size)
809 {
810 	struct virtchnl_irq_map_info *map;
811 	struct virtchnl_vector_map *vector;
812 	struct i40e_hw *hw;
813 	int i, largest_txq, largest_rxq;
814 
815 	hw = &pf->hw;
816 	map = msg;
817 
818 	for (i = 0; i < map->num_vectors; i++) {
819 		vector = &map->vecmap[i];
820 
821 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
822 		    vector->vsi_id != vf->vsi.vsi_num) {
823 			i40e_send_vf_nack(pf, vf,
824 			    VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
825 			return;
826 		}
827 
828 		if (vector->rxq_map != 0) {
829 			largest_rxq = fls(vector->rxq_map) - 1;
830 			if (largest_rxq >= vf->vsi.num_rx_queues) {
831 				i40e_send_vf_nack(pf, vf,
832 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
833 				    I40E_ERR_PARAM);
834 				return;
835 			}
836 		}
837 
838 		if (vector->txq_map != 0) {
839 			largest_txq = fls(vector->txq_map) - 1;
840 			if (largest_txq >= vf->vsi.num_tx_queues) {
841 				i40e_send_vf_nack(pf, vf,
842 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
843 				    I40E_ERR_PARAM);
844 				return;
845 			}
846 		}
847 
848 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
849 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
850 			i40e_send_vf_nack(pf, vf,
851 			    VIRTCHNL_OP_CONFIG_IRQ_MAP,
852 			    I40E_ERR_PARAM);
853 			return;
854 		}
855 
856 		ixl_vf_config_vector(pf, vf, vector);
857 	}
858 
859 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
860 }
861 
862 static void
863 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
864     uint16_t msg_size)
865 {
866 	struct virtchnl_queue_select *select;
867 	int error = 0;
868 
869 	select = msg;
870 
871 	if (select->vsi_id != vf->vsi.vsi_num ||
872 	    select->rx_queues == 0 || select->tx_queues == 0) {
873 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
874 		    I40E_ERR_PARAM);
875 		return;
876 	}
877 
878 	/* Enable TX rings selected by the VF */
879 	for (int i = 0; i < 32; i++) {
880 		if ((1 << i) & select->tx_queues) {
881 			/* Warn if queue is out of VF allocation range */
882 			if (i >= vf->vsi.num_tx_queues) {
883 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
884 				    vf->vf_num, i);
885 				break;
886 			}
887 			/* Skip this queue if it hasn't been configured */
888 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
889 				continue;
890 			/* Warn if this queue is already marked as enabled */
891 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
892 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
893 				    vf->vf_num, i);
894 
895 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
896 			if (error)
897 				break;
898 			else
899 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
900 		}
901 	}
902 
903 	/* Enable RX rings selected by the VF */
904 	for (int i = 0; i < 32; i++) {
905 		if ((1 << i) & select->rx_queues) {
906 			/* Warn if queue is out of VF allocation range */
907 			if (i >= vf->vsi.num_rx_queues) {
908 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
909 				    vf->vf_num, i);
910 				break;
911 			}
912 			/* Skip this queue if it hasn't been configured */
913 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
914 				continue;
915 			/* Warn if this queue is already marked as enabled */
916 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
917 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
918 				    vf->vf_num, i);
919 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
920 			if (error)
921 				break;
922 			else
923 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
924 		}
925 	}
926 
927 	if (error) {
928 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
929 		    I40E_ERR_TIMEOUT);
930 		return;
931 	}
932 
933 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
934 }
935 
936 static void
937 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
938     void *msg, uint16_t msg_size)
939 {
940 	struct virtchnl_queue_select *select;
941 	int error = 0;
942 
943 	select = msg;
944 
945 	if (select->vsi_id != vf->vsi.vsi_num ||
946 	    select->rx_queues == 0 || select->tx_queues == 0) {
947 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
948 		    I40E_ERR_PARAM);
949 		return;
950 	}
951 
952 	/* Disable TX rings selected by the VF */
953 	for (int i = 0; i < 32; i++) {
954 		if ((1 << i) & select->tx_queues) {
955 			/* Warn if queue is out of VF allocation range */
956 			if (i >= vf->vsi.num_tx_queues) {
957 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
958 				    vf->vf_num, i);
959 				break;
960 			}
961 			/* Skip this queue if it hasn't been configured */
962 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
963 				continue;
964 			/* Warn if this queue is already marked as disabled */
965 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
966 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
967 				    vf->vf_num, i);
968 				continue;
969 			}
970 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
971 			if (error)
972 				break;
973 			else
974 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
975 		}
976 	}
977 
978 	/* Enable RX rings selected by the VF */
979 	for (int i = 0; i < 32; i++) {
980 		if ((1 << i) & select->rx_queues) {
981 			/* Warn if queue is out of VF allocation range */
982 			if (i >= vf->vsi.num_rx_queues) {
983 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
984 				    vf->vf_num, i);
985 				break;
986 			}
987 			/* Skip this queue if it hasn't been configured */
988 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
989 				continue;
990 			/* Warn if this queue is already marked as disabled */
991 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
992 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
993 				    vf->vf_num, i);
994 				continue;
995 			}
996 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
997 			if (error)
998 				break;
999 			else
1000 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1001 		}
1002 	}
1003 
1004 	if (error) {
1005 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1006 		    I40E_ERR_TIMEOUT);
1007 		return;
1008 	}
1009 
1010 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1011 }
1012 
1013 static int
1014 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1015 {
1016 
1017 	if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
1018 		return (EINVAL);
1019 
1020 	/*
1021 	 * If the VF is not allowed to change its MAC address, don't let it
1022 	 * set a MAC filter for an address that is not a multicast address and
1023 	 * is not its assigned MAC.
1024 	 */
1025 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1026 	    !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1027 		return (EPERM);
1028 
1029 	return (0);
1030 }
1031 
1032 static void
1033 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1034     uint16_t msg_size)
1035 {
1036 	struct virtchnl_ether_addr_list *addr_list;
1037 	struct virtchnl_ether_addr *addr;
1038 	struct ixl_vsi *vsi;
1039 	int i;
1040 
1041 	vsi = &vf->vsi;
1042 	addr_list = msg;
1043 
1044 	if (addr_list->vsi_id != vsi->vsi_num) {
1045 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1046 		    I40E_ERR_PARAM);
1047 		return;
1048 	}
1049 
1050 	for (i = 0; i < addr_list->num_elements; i++) {
1051 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1052 			i40e_send_vf_nack(pf, vf,
1053 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1054 			return;
1055 		}
1056 	}
1057 
1058 	for (i = 0; i < addr_list->num_elements; i++) {
1059 		addr = &addr_list->list[i];
1060 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1061 	}
1062 
1063 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1064 }
1065 
1066 static void
1067 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1068     uint16_t msg_size)
1069 {
1070 	struct virtchnl_ether_addr_list *addr_list;
1071 	struct virtchnl_ether_addr *addr;
1072 	struct ixl_vsi *vsi;
1073 	int i;
1074 
1075 	vsi = &vf->vsi;
1076 	addr_list = msg;
1077 
1078 	if (addr_list->vsi_id != vsi->vsi_num) {
1079 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1080 		    I40E_ERR_PARAM);
1081 		return;
1082 	}
1083 
1084 	for (i = 0; i < addr_list->num_elements; i++) {
1085 		addr = &addr_list->list[i];
1086 		if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
1087 			i40e_send_vf_nack(pf, vf,
1088 			    VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
1089 			return;
1090 		}
1091 	}
1092 
1093 	for (i = 0; i < addr_list->num_elements; i++) {
1094 		addr = &addr_list->list[i];
1095 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1096 	}
1097 
1098 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1099 }
1100 
1101 static enum i40e_status_code
1102 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1103 {
1104 	struct i40e_vsi_context vsi_ctx;
1105 
1106 	vsi_ctx.seid = vf->vsi.seid;
1107 
1108 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1109 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1110 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1111 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1112 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1113 }
1114 
1115 static void
1116 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1117     uint16_t msg_size)
1118 {
1119 	struct virtchnl_vlan_filter_list *filter_list;
1120 	enum i40e_status_code code;
1121 	int i;
1122 
1123 	filter_list = msg;
1124 
1125 	if (filter_list->vsi_id != vf->vsi.vsi_num) {
1126 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1127 		    I40E_ERR_PARAM);
1128 		return;
1129 	}
1130 
1131 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1132 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1133 		    I40E_ERR_PARAM);
1134 		return;
1135 	}
1136 
1137 	for (i = 0; i < filter_list->num_elements; i++) {
1138 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1139 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1140 			    I40E_ERR_PARAM);
1141 			return;
1142 		}
1143 	}
1144 
1145 	code = ixl_vf_enable_vlan_strip(pf, vf);
1146 	if (code != I40E_SUCCESS) {
1147 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1148 		    I40E_ERR_PARAM);
1149 	}
1150 
1151 	for (i = 0; i < filter_list->num_elements; i++)
1152 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1153 
1154 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1155 }
1156 
1157 static void
1158 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1159     uint16_t msg_size)
1160 {
1161 	struct virtchnl_vlan_filter_list *filter_list;
1162 	int i;
1163 
1164 	filter_list = msg;
1165 
1166 	if (filter_list->vsi_id != vf->vsi.vsi_num) {
1167 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1168 		    I40E_ERR_PARAM);
1169 		return;
1170 	}
1171 
1172 	for (i = 0; i < filter_list->num_elements; i++) {
1173 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1174 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1175 			    I40E_ERR_PARAM);
1176 			return;
1177 		}
1178 	}
1179 
1180 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1181 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1182 		    I40E_ERR_PARAM);
1183 		return;
1184 	}
1185 
1186 	for (i = 0; i < filter_list->num_elements; i++)
1187 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1188 
1189 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1190 }
1191 
1192 static void
1193 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1194     void *msg, uint16_t msg_size)
1195 {
1196 	struct virtchnl_promisc_info *info;
1197 	struct i40e_hw *hw = &pf->hw;
1198 	enum i40e_status_code code;
1199 
1200 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1201 		/*
1202 		 * Do the same thing as the Linux PF driver -- lie to the VF
1203 		 */
1204 		ixl_send_vf_ack(pf, vf,
1205 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1206 		return;
1207 	}
1208 
1209 	info = msg;
1210 	if (info->vsi_id != vf->vsi.vsi_num) {
1211 		i40e_send_vf_nack(pf, vf,
1212 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1213 		return;
1214 	}
1215 
1216 	code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1217 	    info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1218 	if (code != I40E_SUCCESS) {
1219 		device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1220 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1221 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1222 		i40e_send_vf_nack(pf, vf,
1223 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1224 		return;
1225 	}
1226 
1227 	code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1228 	    info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1229 	if (code != I40E_SUCCESS) {
1230 		device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1231 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1232 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1233 		i40e_send_vf_nack(pf, vf,
1234 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1235 		return;
1236 	}
1237 
1238 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1239 }
1240 
1241 static void
1242 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1243     uint16_t msg_size)
1244 {
1245 	struct virtchnl_queue_select *queue;
1246 
1247 	queue = msg;
1248 	if (queue->vsi_id != vf->vsi.vsi_num) {
1249 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1250 		    I40E_ERR_PARAM);
1251 		return;
1252 	}
1253 
1254 	ixl_update_eth_stats(&vf->vsi);
1255 
1256 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1257 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1258 }
1259 
1260 static void
1261 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1262     uint16_t msg_size)
1263 {
1264 	struct i40e_hw *hw;
1265 	struct virtchnl_rss_key *key;
1266 	struct i40e_aqc_get_set_rss_key_data key_data;
1267 	enum i40e_status_code status;
1268 
1269 	hw = &pf->hw;
1270 
1271 	key = msg;
1272 
1273 	if (key->key_len > 52) {
1274 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1275 		    vf->vf_num, key->key_len, 52);
1276 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1277 		    I40E_ERR_PARAM);
1278 		return;
1279 	}
1280 
1281 	if (key->vsi_id != vf->vsi.vsi_num) {
1282 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1283 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1284 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1285 		    I40E_ERR_PARAM);
1286 		return;
1287 	}
1288 
1289 	/* Fill out hash using MAC-dependent method */
1290 	if (hw->mac.type == I40E_MAC_X722) {
1291 		bzero(&key_data, sizeof(key_data));
1292 		if (key->key_len <= 40)
1293 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1294 		else {
1295 			bcopy(key->key, key_data.standard_rss_key, 40);
1296 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1297 		}
1298 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1299 		if (status) {
1300 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1301 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1302 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1303 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1304 			return;
1305 		}
1306 	} else {
1307 		for (int i = 0; i < (key->key_len / 4); i++)
1308 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1309 	}
1310 
1311 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1312 	    vf->vf_num, key->key[0]);
1313 
1314 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1315 }
1316 
1317 static void
1318 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1319     uint16_t msg_size)
1320 {
1321 	struct i40e_hw *hw;
1322 	struct virtchnl_rss_lut *lut;
1323 	enum i40e_status_code status;
1324 
1325 	hw = &pf->hw;
1326 
1327 	lut = msg;
1328 
1329 	if (lut->lut_entries > 64) {
1330 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1331 		    vf->vf_num, lut->lut_entries, 64);
1332 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1333 		    I40E_ERR_PARAM);
1334 		return;
1335 	}
1336 
1337 	if (lut->vsi_id != vf->vsi.vsi_num) {
1338 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1339 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1340 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1341 		    I40E_ERR_PARAM);
1342 		return;
1343 	}
1344 
1345 	/* Fill out LUT using MAC-dependent method */
1346 	if (hw->mac.type == I40E_MAC_X722) {
1347 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1348 		if (status) {
1349 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1350 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1351 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1352 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1353 			return;
1354 		}
1355 	} else {
1356 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1357 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1358 	}
1359 
1360 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1361 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1362 
1363 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1364 }
1365 
1366 static void
1367 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1368     uint16_t msg_size)
1369 {
1370 	struct i40e_hw *hw;
1371 	struct virtchnl_rss_hena *hena;
1372 
1373 	hw = &pf->hw;
1374 	hena = msg;
1375 
1376 	/* Set HENA */
1377 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1378 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1379 
1380 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1381 	    vf->vf_num, hena->hena);
1382 
1383 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1384 }
1385 
1386 static void
1387 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1388 {
1389 	struct virtchnl_pf_event event;
1390 	struct i40e_hw *hw;
1391 
1392 	hw = &pf->hw;
1393 	event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1394 	event.severity = PF_EVENT_SEVERITY_INFO;
1395 	event.event_data.link_event.link_status = pf->vsi.link_active;
1396 	event.event_data.link_event.link_speed =
1397 	    i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
1398 
1399 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1400 			sizeof(event));
1401 }
1402 
1403 void
1404 ixl_broadcast_link_state(struct ixl_pf *pf)
1405 {
1406 	int i;
1407 
1408 	for (i = 0; i < pf->num_vfs; i++)
1409 		ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1410 }
1411 
1412 void
1413 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1414 {
1415 	device_t dev = pf->dev;
1416 	struct ixl_vf *vf;
1417 	uint16_t vf_num, msg_size;
1418 	uint32_t opcode;
1419 	void *msg;
1420 	int err;
1421 
1422 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1423 	opcode = le32toh(event->desc.cookie_high);
1424 
1425 	if (vf_num >= pf->num_vfs) {
1426 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1427 		return;
1428 	}
1429 
1430 	vf = &pf->vfs[vf_num];
1431 	msg = event->msg_buf;
1432 	msg_size = event->msg_len;
1433 
1434 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1435 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1436 	    ixl_vc_opcode_str(opcode), opcode,
1437 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1438 	    vf_num, msg_size);
1439 
1440 	/* Perform basic checks on the msg */
1441 	err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1442 	if (err) {
1443 		device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
1444 		    __func__, vf->vf_num, opcode, msg_size, err);
1445 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1446 		return;
1447 	}
1448 
1449 	/* This must be a stray msg from a previously destroyed VF. */
1450 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1451 		return;
1452 
1453 	switch (opcode) {
1454 	case VIRTCHNL_OP_VERSION:
1455 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1456 		break;
1457 	case VIRTCHNL_OP_RESET_VF:
1458 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1459 		break;
1460 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1461 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1462 		/* Notify VF of link state after it obtains queues, as this is
1463 		 * the last thing it will do as part of initialization
1464 		 */
1465 		ixl_notify_vf_link_state(pf, vf);
1466 		break;
1467 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1468 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1469 		break;
1470 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1471 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1472 		break;
1473 	case VIRTCHNL_OP_ENABLE_QUEUES:
1474 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1475 		/* Notify VF of link state after it obtains queues, as this is
1476 		 * the last thing it will do as part of initialization
1477 		 */
1478 		ixl_notify_vf_link_state(pf, vf);
1479 		break;
1480 	case VIRTCHNL_OP_DISABLE_QUEUES:
1481 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1482 		break;
1483 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1484 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1485 		break;
1486 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1487 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1488 		break;
1489 	case VIRTCHNL_OP_ADD_VLAN:
1490 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1491 		break;
1492 	case VIRTCHNL_OP_DEL_VLAN:
1493 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1494 		break;
1495 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1496 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1497 		break;
1498 	case VIRTCHNL_OP_GET_STATS:
1499 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1500 		break;
1501 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1502 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1503 		break;
1504 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1505 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1506 		break;
1507 	case VIRTCHNL_OP_SET_RSS_HENA:
1508 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1509 		break;
1510 
1511 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1512 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1513 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1514 	default:
1515 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1516 		break;
1517 	}
1518 }
1519 
1520 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1521 void
1522 ixl_handle_vflr(struct ixl_pf *pf)
1523 {
1524 	struct ixl_vf *vf;
1525 	struct i40e_hw *hw;
1526 	uint16_t global_vf_num;
1527 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1528 	int i;
1529 
1530 	hw = &pf->hw;
1531 
1532 	ixl_dbg_iov(pf, "%s: begin\n", __func__);
1533 
1534 	/* Re-enable VFLR interrupt cause so driver doesn't miss a
1535 	 * reset interrupt for another VF */
1536 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1537 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1538 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1539 	ixl_flush(hw);
1540 
1541 	for (i = 0; i < pf->num_vfs; i++) {
1542 		global_vf_num = hw->func_caps.vf_base_id + i;
1543 
1544 		vf = &pf->vfs[i];
1545 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1546 			continue;
1547 
1548 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1549 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1550 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1551 		if (vflrstat & vflrstat_mask) {
1552 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1553 			    vflrstat_mask);
1554 
1555 			ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1556 			ixl_reinit_vf(pf, vf);
1557 			ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1558 		}
1559 	}
1560 
1561 }
1562 
1563 static int
1564 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1565 {
1566 
1567 	switch (err) {
1568 	case I40E_AQ_RC_EPERM:
1569 		return (EPERM);
1570 	case I40E_AQ_RC_ENOENT:
1571 		return (ENOENT);
1572 	case I40E_AQ_RC_ESRCH:
1573 		return (ESRCH);
1574 	case I40E_AQ_RC_EINTR:
1575 		return (EINTR);
1576 	case I40E_AQ_RC_EIO:
1577 		return (EIO);
1578 	case I40E_AQ_RC_ENXIO:
1579 		return (ENXIO);
1580 	case I40E_AQ_RC_E2BIG:
1581 		return (E2BIG);
1582 	case I40E_AQ_RC_EAGAIN:
1583 		return (EAGAIN);
1584 	case I40E_AQ_RC_ENOMEM:
1585 		return (ENOMEM);
1586 	case I40E_AQ_RC_EACCES:
1587 		return (EACCES);
1588 	case I40E_AQ_RC_EFAULT:
1589 		return (EFAULT);
1590 	case I40E_AQ_RC_EBUSY:
1591 		return (EBUSY);
1592 	case I40E_AQ_RC_EEXIST:
1593 		return (EEXIST);
1594 	case I40E_AQ_RC_EINVAL:
1595 		return (EINVAL);
1596 	case I40E_AQ_RC_ENOTTY:
1597 		return (ENOTTY);
1598 	case I40E_AQ_RC_ENOSPC:
1599 		return (ENOSPC);
1600 	case I40E_AQ_RC_ENOSYS:
1601 		return (ENOSYS);
1602 	case I40E_AQ_RC_ERANGE:
1603 		return (ERANGE);
1604 	case I40E_AQ_RC_EFLUSHED:
1605 		return (EINVAL);	/* No exact equivalent in errno.h */
1606 	case I40E_AQ_RC_BAD_ADDR:
1607 		return (EFAULT);
1608 	case I40E_AQ_RC_EMODE:
1609 		return (EPERM);
1610 	case I40E_AQ_RC_EFBIG:
1611 		return (EFBIG);
1612 	default:
1613 		return (EINVAL);
1614 	}
1615 }
1616 
1617 static int
1618 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1619 {
1620 	struct i40e_hw *hw = &pf->hw;
1621 	device_t dev = pf->dev;
1622 	struct ixl_vsi *vsi = &pf->vsi;
1623 	struct i40e_vsi_context	ctxt;
1624 	int error;
1625 
1626 	memset(&ctxt, 0, sizeof(ctxt));
1627 
1628 	ctxt.seid = vsi->seid;
1629 	if (pf->veb_seid != 0)
1630 		ctxt.uplink_seid = pf->veb_seid;
1631 	ctxt.pf_num = hw->pf_id;
1632 	ctxt.connection_type = IXL_VSI_DATA_PORT;
1633 
1634 	ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1635 	ctxt.info.switch_id = (enable) ?
1636 	    htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1637 
1638 	/* error is set to 0 on success */
1639 	error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1640 	if (error) {
1641 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1642 		    " aq_error %d\n", error, hw->aq.asq_last_status);
1643 	}
1644 
1645 	return (error);
1646 }
1647 
1648 int
1649 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1650 {
1651 	struct ixl_pf *pf = iflib_get_softc(ctx);
1652 	device_t dev = iflib_get_dev(ctx);
1653 	struct i40e_hw *hw;
1654 	struct ixl_vsi *pf_vsi;
1655 	enum i40e_status_code ret;
1656 	int error;
1657 
1658 	hw = &pf->hw;
1659 	pf_vsi = &pf->vsi;
1660 
1661 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1662 	    M_ZERO);
1663 	if (pf->vfs == NULL) {
1664 		error = ENOMEM;
1665 		goto fail;
1666 	}
1667 
1668 	/*
1669 	 * Add the VEB and ...
1670 	 * - do nothing: VEPA mode
1671 	 * - enable loopback mode on connected VSIs: VEB mode
1672 	 */
1673 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1674 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1675 	if (ret != I40E_SUCCESS) {
1676 		error = hw->aq.asq_last_status;
1677 		device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1678 		    i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1679 		goto fail;
1680 	}
1681 	if (pf->enable_vf_loopback)
1682 		ixl_config_pf_vsi_loopback(pf, true);
1683 
1684 	/*
1685 	 * Adding a VEB brings back the default MAC filter(s). Remove them,
1686 	 * and let the driver add the proper filters back.
1687 	 */
1688 	ixl_del_default_hw_filters(pf_vsi);
1689 	ixl_reconfigure_filters(pf_vsi);
1690 
1691 	pf->num_vfs = num_vfs;
1692 	return (0);
1693 
1694 fail:
1695 	free(pf->vfs, M_IXL);
1696 	pf->vfs = NULL;
1697 	return (error);
1698 }
1699 
1700 void
1701 ixl_if_iov_uninit(if_ctx_t ctx)
1702 {
1703 	struct ixl_pf *pf = iflib_get_softc(ctx);
1704 	struct i40e_hw *hw;
1705 	struct ixl_vf *vfs;
1706 	int i, num_vfs;
1707 
1708 	hw = &pf->hw;
1709 
1710 	for (i = 0; i < pf->num_vfs; i++) {
1711 		if (pf->vfs[i].vsi.seid != 0)
1712 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1713 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1714 		ixl_free_filters(&pf->vfs[i].vsi.ftl);
1715 		ixl_dbg_iov(pf, "VF %d: %d released\n",
1716 		    i, pf->vfs[i].qtag.num_allocated);
1717 		ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1718 	}
1719 
1720 	if (pf->veb_seid != 0) {
1721 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1722 		pf->veb_seid = 0;
1723 	}
1724 	/* Reset PF VSI loopback mode */
1725 	if (pf->enable_vf_loopback)
1726 		ixl_config_pf_vsi_loopback(pf, false);
1727 
1728 	vfs = pf->vfs;
1729 	num_vfs = pf->num_vfs;
1730 
1731 	pf->vfs = NULL;
1732 	pf->num_vfs = 0;
1733 
1734 	/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1735 	for (i = 0; i < num_vfs; i++)
1736 		sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1737 	free(vfs, M_IXL);
1738 }
1739 
1740 static int
1741 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1742 {
1743 	device_t dev = pf->dev;
1744 	int error;
1745 
1746 	/* Validate, and clamp value if invalid */
1747 	if (num_queues < 1 || num_queues > 16)
1748 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1749 		    num_queues, vf->vf_num);
1750 	if (num_queues < 1) {
1751 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1752 		num_queues = 1;
1753 	} else if (num_queues > IAVF_MAX_QUEUES) {
1754 		device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1755 		num_queues = IAVF_MAX_QUEUES;
1756 	}
1757 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1758 	if (error) {
1759 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1760 		    num_queues, vf->vf_num);
1761 		return (ENOSPC);
1762 	}
1763 
1764 	ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1765 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1766 	ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1767 
1768 	return (0);
1769 }
1770 
1771 int
1772 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1773 {
1774 	struct ixl_pf *pf = iflib_get_softc(ctx);
1775 	char sysctl_name[IXL_QUEUE_NAME_LEN];
1776 	struct ixl_vf *vf;
1777 	const void *mac;
1778 	size_t size;
1779 	int error;
1780 	int vf_num_queues;
1781 
1782 	vf = &pf->vfs[vfnum];
1783 	vf->vf_num = vfnum;
1784 	vf->vsi.back = pf;
1785 	vf->vf_flags = VF_FLAG_ENABLED;
1786 
1787 	/* Reserve queue allocation from PF */
1788 	vf_num_queues = nvlist_get_number(params, "num-queues");
1789 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1790 	if (error != 0)
1791 		goto out;
1792 
1793 	error = ixl_vf_setup_vsi(pf, vf);
1794 	if (error != 0)
1795 		goto out;
1796 
1797 	if (nvlist_exists_binary(params, "mac-addr")) {
1798 		mac = nvlist_get_binary(params, "mac-addr", &size);
1799 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1800 
1801 		if (nvlist_get_bool(params, "allow-set-mac"))
1802 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1803 	} else
1804 		/*
1805 		 * If the administrator has not specified a MAC address then
1806 		 * we must allow the VF to choose one.
1807 		 */
1808 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1809 
1810 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1811 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1812 
1813 	if (nvlist_get_bool(params, "allow-promisc"))
1814 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1815 
1816 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1817 
1818 	/* VF needs to be reset before it can be used */
1819 	ixl_reset_vf(pf, vf);
1820 out:
1821 	if (error == 0) {
1822 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1823 		ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
1824 	}
1825 
1826 	return (error);
1827 }
1828 
1829