1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "ixl_pf_iov.h"
35
36 /* Private functions */
37 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
38 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
39 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
40
41 static int ixl_vc_opcode_level(uint16_t opcode);
42
43 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
44
45 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
46 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
49 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
54 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
55 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
56 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
57 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
60 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
61 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
63 enum i40e_queue_type *last_type, uint16_t *last_queue);
64 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
65 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
71 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
76 static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
77
78 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
79
80 /*
81 * TODO: Move pieces of this into iflib and call the rest in a handler?
82 *
83 * e.g. ixl_if_iov_set_schema
84 *
85 * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
86 * in the driver.
87 */
88 void
ixl_initialize_sriov(struct ixl_pf * pf)89 ixl_initialize_sriov(struct ixl_pf *pf)
90 {
91 device_t dev = pf->dev;
92 struct i40e_hw *hw = &pf->hw;
93 nvlist_t *pf_schema, *vf_schema;
94 int iov_error;
95
96 pf_schema = pci_iov_schema_alloc_node();
97 vf_schema = pci_iov_schema_alloc_node();
98 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100 IOV_SCHEMA_HASDEFAULT, TRUE);
101 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102 IOV_SCHEMA_HASDEFAULT, FALSE);
103 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104 IOV_SCHEMA_HASDEFAULT, FALSE);
105 pci_iov_schema_add_uint16(vf_schema, "num-queues",
106 IOV_SCHEMA_HASDEFAULT,
107 max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
108
109 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110 if (iov_error != 0) {
111 device_printf(dev,
112 "Failed to initialize SR-IOV (error=%d)\n",
113 iov_error);
114 } else
115 device_printf(dev, "SR-IOV ready\n");
116
117 pf->vc_debug_lvl = 1;
118 }
119
120 /*
121 * Allocate the VSI for a VF.
122 */
123 static int
ixl_vf_alloc_vsi(struct ixl_pf * pf,struct ixl_vf * vf)124 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
125 {
126 device_t dev;
127 struct i40e_hw *hw;
128 struct i40e_vsi_context vsi_ctx;
129 int i;
130 enum i40e_status_code code;
131
132 hw = &pf->hw;
133 dev = pf->dev;
134
135 vsi_ctx.pf_num = hw->pf_id;
136 vsi_ctx.uplink_seid = pf->veb_seid;
137 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
138 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
139 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
140
141 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
142
143 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
144 if (pf->enable_vf_loopback)
145 vsi_ctx.info.switch_id =
146 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
147
148 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149 vsi_ctx.info.sec_flags = 0;
150 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156
157 vsi_ctx.info.valid_sections |=
158 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160
161 /* XXX: Only scattered allocation is supported for VFs right now */
162 for (i = 0; i < vf->qtag.num_active; i++)
163 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166
167 vsi_ctx.info.tc_mapping[0] = htole16(
168 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170
171 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172 if (code != I40E_SUCCESS)
173 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174 vf->vsi.seid = vsi_ctx.seid;
175 vf->vsi.vsi_num = vsi_ctx.vsi_number;
176 vf->vsi.num_rx_queues = vf->qtag.num_active;
177 vf->vsi.num_tx_queues = vf->qtag.num_active;
178
179 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180 if (code != I40E_SUCCESS)
181 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182
183 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184 if (code != I40E_SUCCESS) {
185 device_printf(dev, "Failed to disable BW limit: %d\n",
186 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 }
189
190 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
191 return (0);
192 }
193
194 static int
ixl_vf_setup_vsi(struct ixl_pf * pf,struct ixl_vf * vf)195 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
196 {
197 struct i40e_hw *hw;
198 int error;
199
200 hw = &pf->hw;
201 vf->vsi.flags |= IXL_FLAGS_IS_VF;
202
203 error = ixl_vf_alloc_vsi(pf, vf);
204 if (error != 0)
205 return (error);
206
207 vf->vsi.dev = pf->dev;
208
209 ixl_init_filters(&vf->vsi);
210 /* Let VF receive broadcast Ethernet frames */
211 error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
212 if (error)
213 device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
214 /* Re-add VF's MAC/VLAN filters to its VSI */
215 ixl_reconfigure_filters(&vf->vsi);
216
217 return (0);
218 }
219
220 static void
ixl_vf_map_vsi_queue(struct i40e_hw * hw,struct ixl_vf * vf,int qnum,uint32_t val)221 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
222 uint32_t val)
223 {
224 uint32_t qtable;
225 int index, shift;
226
227 /*
228 * Two queues are mapped in a single register, so we have to do some
229 * gymnastics to convert the queue number into a register index and
230 * shift.
231 */
232 index = qnum / 2;
233 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
234
235 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
236 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
237 qtable |= val << shift;
238 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
239 }
240
241 static void
ixl_vf_map_queues(struct ixl_pf * pf,struct ixl_vf * vf)242 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
243 {
244 struct i40e_hw *hw;
245 uint32_t qtable;
246 int i;
247
248 hw = &pf->hw;
249
250 /*
251 * Contiguous mappings aren't actually supported by the hardware,
252 * so we have to use non-contiguous mappings.
253 */
254 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
255 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
256
257 /* Enable LAN traffic on this VF */
258 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
259 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
260
261 /* Program index of each VF queue into PF queue space
262 * (This is only needed if QTABLE is enabled) */
263 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
264 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
265 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
266
267 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
268 }
269 for (; i < IXL_MAX_VSI_QUEUES; i++)
270 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
271 I40E_VPLAN_QTABLE_QINDEX_MASK);
272
273 /* Map queues allocated to VF to its VSI;
274 * This mapping matches the VF-wide mapping since the VF
275 * is only given a single VSI */
276 for (i = 0; i < vf->vsi.num_tx_queues; i++)
277 ixl_vf_map_vsi_queue(hw, vf, i,
278 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
279
280 /* Set rest of VSI queues as unused. */
281 for (; i < IXL_MAX_VSI_QUEUES; i++)
282 ixl_vf_map_vsi_queue(hw, vf, i,
283 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
284
285 ixl_flush(hw);
286 }
287
288 static void
ixl_vf_vsi_release(struct ixl_pf * pf,struct ixl_vsi * vsi)289 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
290 {
291 struct i40e_hw *hw;
292
293 hw = &pf->hw;
294
295 if (vsi->seid == 0)
296 return;
297
298 i40e_aq_delete_element(hw, vsi->seid, NULL);
299 }
300
301 static void
ixl_vf_disable_queue_intr(struct i40e_hw * hw,uint32_t vfint_reg)302 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
303 {
304
305 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
306 ixl_flush(hw);
307 }
308
309 static void
ixl_vf_unregister_intr(struct i40e_hw * hw,uint32_t vpint_reg)310 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
311 {
312
313 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
314 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
315 ixl_flush(hw);
316 }
317
318 static void
ixl_vf_release_resources(struct ixl_pf * pf,struct ixl_vf * vf)319 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
320 {
321 struct i40e_hw *hw;
322 uint32_t vfint_reg, vpint_reg;
323 int i;
324
325 hw = &pf->hw;
326
327 ixl_vf_vsi_release(pf, &vf->vsi);
328
329 /* Index 0 has a special register. */
330 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
331
332 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
333 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
334 ixl_vf_disable_queue_intr(hw, vfint_reg);
335 }
336
337 /* Index 0 has a special register. */
338 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
339
340 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
341 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
342 ixl_vf_unregister_intr(hw, vpint_reg);
343 }
344
345 vf->vsi.num_tx_queues = 0;
346 vf->vsi.num_rx_queues = 0;
347 }
348
349 static int
ixl_flush_pcie(struct ixl_pf * pf,struct ixl_vf * vf)350 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
351 {
352 struct i40e_hw *hw;
353 int i;
354 uint16_t global_vf_num;
355 uint32_t ciad;
356
357 hw = &pf->hw;
358 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
359
360 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
361 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
362 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
363 ciad = rd32(hw, I40E_PF_PCI_CIAD);
364 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
365 return (0);
366 DELAY(1);
367 }
368
369 return (ETIMEDOUT);
370 }
371
372 static void
ixl_reset_vf(struct ixl_pf * pf,struct ixl_vf * vf)373 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
374 {
375 struct i40e_hw *hw;
376 uint32_t vfrtrig;
377
378 hw = &pf->hw;
379
380 ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
381
382 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
383 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
384 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
385 ixl_flush(hw);
386
387 ixl_reinit_vf(pf, vf);
388
389 ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
390 }
391
392 static void
ixl_reinit_vf(struct ixl_pf * pf,struct ixl_vf * vf)393 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
394 {
395 struct i40e_hw *hw;
396 uint32_t vfrstat, vfrtrig;
397 int i, error;
398
399 hw = &pf->hw;
400
401 error = ixl_flush_pcie(pf, vf);
402 if (error != 0)
403 device_printf(pf->dev,
404 "Timed out waiting for PCIe activity to stop on VF-%d\n",
405 vf->vf_num);
406
407 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
408 DELAY(10);
409
410 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
411 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
412 break;
413 }
414
415 if (i == IXL_VF_RESET_TIMEOUT)
416 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
417
418 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
419
420 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
421 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
422 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
423
424 if (vf->vsi.seid != 0)
425 ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
426 ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
427
428 ixl_vf_release_resources(pf, vf);
429 ixl_vf_setup_vsi(pf, vf);
430 ixl_vf_map_queues(pf, vf);
431
432 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
433 ixl_flush(hw);
434 }
435
436 static int
ixl_vc_opcode_level(uint16_t opcode)437 ixl_vc_opcode_level(uint16_t opcode)
438 {
439 switch (opcode) {
440 case VIRTCHNL_OP_GET_STATS:
441 return (10);
442 default:
443 return (5);
444 }
445 }
446
447 static void
ixl_send_vf_msg(struct ixl_pf * pf,struct ixl_vf * vf,uint16_t op,enum i40e_status_code status,void * msg,uint16_t len)448 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
449 enum i40e_status_code status, void *msg, uint16_t len)
450 {
451 struct i40e_hw *hw;
452 int global_vf_id;
453
454 hw = &pf->hw;
455 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
456
457 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
458 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
459 ixl_vc_opcode_str(op), op, status, vf->vf_num);
460
461 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
462 }
463
464 static void
ixl_send_vf_ack(struct ixl_pf * pf,struct ixl_vf * vf,uint16_t op)465 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
466 {
467
468 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
469 }
470
471 static void
ixl_send_vf_nack_msg(struct ixl_pf * pf,struct ixl_vf * vf,uint16_t op,enum i40e_status_code status,const char * file,int line)472 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
473 enum i40e_status_code status, const char *file, int line)
474 {
475
476 I40E_VC_DEBUG(pf, 1,
477 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
478 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
479 status, vf->vf_num, file, line);
480 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
481 }
482
483 static void
ixl_vf_version_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)484 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
485 uint16_t msg_size)
486 {
487 struct virtchnl_version_info *recv_vf_version;
488 device_t dev = pf->dev;
489
490 recv_vf_version = (struct virtchnl_version_info *)msg;
491
492 /* VFs running the 1.0 API expect to get 1.0 back */
493 if (VF_IS_V10(recv_vf_version)) {
494 vf->version.major = 1;
495 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
496 } else {
497 vf->version.major = VIRTCHNL_VERSION_MAJOR;
498 vf->version.minor = VIRTCHNL_VERSION_MINOR;
499
500 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
501 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
502 device_printf(dev,
503 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
504 __func__, vf->vf_num,
505 recv_vf_version->major, recv_vf_version->minor,
506 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
507 }
508
509 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
510 &vf->version, sizeof(vf->version));
511 }
512
513 static void
ixl_vf_reset_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)514 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
515 uint16_t msg_size)
516 {
517 ixl_reset_vf(pf, vf);
518
519 /* No response to a reset message. */
520 }
521
522 static void
ixl_vf_get_resources_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)523 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
524 uint16_t msg_size)
525 {
526 struct virtchnl_vf_resource reply;
527
528 bzero(&reply, sizeof(reply));
529
530 if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
531 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
532 VIRTCHNL_VF_OFFLOAD_RSS_REG |
533 VIRTCHNL_VF_OFFLOAD_VLAN;
534 else
535 /* Force VF RSS setup by PF in 1.1+ VFs */
536 reply.vf_cap_flags = *(u32 *)msg & (
537 VIRTCHNL_VF_OFFLOAD_L2 |
538 VIRTCHNL_VF_OFFLOAD_RSS_PF |
539 VIRTCHNL_VF_OFFLOAD_VLAN);
540
541 reply.num_vsis = 1;
542 reply.num_queue_pairs = vf->vsi.num_tx_queues;
543 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
544 reply.rss_key_size = 52;
545 reply.rss_lut_size = 64;
546 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
547 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
548 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
549 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
550
551 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
552 I40E_SUCCESS, &reply, sizeof(reply));
553 }
554
555 static int
ixl_vf_config_tx_queue(struct ixl_pf * pf,struct ixl_vf * vf,struct virtchnl_txq_info * info)556 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
557 struct virtchnl_txq_info *info)
558 {
559 struct i40e_hw *hw;
560 struct i40e_hmc_obj_txq txq;
561 uint16_t global_queue_num, global_vf_num;
562 enum i40e_status_code status;
563 uint32_t qtx_ctl;
564
565 hw = &pf->hw;
566 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
567 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
568 bzero(&txq, sizeof(txq));
569
570 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
571 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
572
573 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
574 if (status != I40E_SUCCESS)
575 return (EINVAL);
576
577 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
578
579 txq.head_wb_ena = info->headwb_enabled;
580 txq.head_wb_addr = info->dma_headwb_addr;
581 txq.qlen = info->ring_len;
582 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
583 txq.rdylist_act = 0;
584
585 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
586 if (status != I40E_SUCCESS)
587 return (EINVAL);
588
589 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
590 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
591 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
592 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
593 ixl_flush(hw);
594
595 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
596
597 return (0);
598 }
599
600 static int
ixl_vf_config_rx_queue(struct ixl_pf * pf,struct ixl_vf * vf,struct virtchnl_rxq_info * info)601 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
602 struct virtchnl_rxq_info *info)
603 {
604 struct i40e_hw *hw;
605 struct i40e_hmc_obj_rxq rxq;
606 uint16_t global_queue_num;
607 enum i40e_status_code status;
608
609 hw = &pf->hw;
610 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
611 bzero(&rxq, sizeof(rxq));
612
613 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
614 vf->vf_num, global_queue_num, info->queue_id);
615
616 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
617 return (EINVAL);
618
619 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
620 info->max_pkt_size < ETHER_MIN_LEN)
621 return (EINVAL);
622
623 if (info->splithdr_enabled) {
624 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
625 return (EINVAL);
626
627 rxq.hsplit_0 = info->rx_split_pos &
628 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
629 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
630 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
631 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
632 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
633
634 rxq.dtype = 2;
635 }
636
637 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
638 if (status != I40E_SUCCESS)
639 return (EINVAL);
640
641 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
642 rxq.qlen = info->ring_len;
643
644 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
645
646 rxq.dsize = 1;
647 rxq.crcstrip = 1;
648 rxq.l2tsel = 1;
649
650 rxq.rxmax = info->max_pkt_size;
651 rxq.tphrdesc_ena = 1;
652 rxq.tphwdesc_ena = 1;
653 rxq.tphdata_ena = 1;
654 rxq.tphhead_ena = 1;
655 rxq.lrxqthresh = 1;
656 rxq.prefena = 1;
657
658 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
659 if (status != I40E_SUCCESS)
660 return (EINVAL);
661
662 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
663
664 return (0);
665 }
666
667 static void
ixl_vf_config_vsi_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)668 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
669 uint16_t msg_size)
670 {
671 struct virtchnl_vsi_queue_config_info *info;
672 struct virtchnl_queue_pair_info *pair;
673 int i;
674
675 info = msg;
676 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
677 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
678 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
679 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
680 I40E_ERR_PARAM);
681 return;
682 }
683
684 if (info->vsi_id != vf->vsi.vsi_num) {
685 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
686 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
687 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
688 I40E_ERR_PARAM);
689 return;
690 }
691
692 for (i = 0; i < info->num_queue_pairs; i++) {
693 pair = &info->qpair[i];
694
695 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
696 pair->rxq.vsi_id != vf->vsi.vsi_num ||
697 pair->txq.queue_id != pair->rxq.queue_id ||
698 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
699
700 i40e_send_vf_nack(pf, vf,
701 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
702 return;
703 }
704
705 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
706 i40e_send_vf_nack(pf, vf,
707 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
708 return;
709 }
710
711 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
712 i40e_send_vf_nack(pf, vf,
713 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
714 return;
715 }
716 }
717
718 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
719 }
720
721 static void
ixl_vf_set_qctl(struct ixl_pf * pf,const struct virtchnl_vector_map * vector,enum i40e_queue_type cur_type,uint16_t cur_queue,enum i40e_queue_type * last_type,uint16_t * last_queue)722 ixl_vf_set_qctl(struct ixl_pf *pf,
723 const struct virtchnl_vector_map *vector,
724 enum i40e_queue_type cur_type, uint16_t cur_queue,
725 enum i40e_queue_type *last_type, uint16_t *last_queue)
726 {
727 uint32_t offset, qctl;
728 uint16_t itr_indx;
729
730 if (cur_type == I40E_QUEUE_TYPE_RX) {
731 offset = I40E_QINT_RQCTL(cur_queue);
732 itr_indx = vector->rxitr_idx;
733 } else {
734 offset = I40E_QINT_TQCTL(cur_queue);
735 itr_indx = vector->txitr_idx;
736 }
737
738 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
739 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
740 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
741 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
742 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
743
744 wr32(&pf->hw, offset, qctl);
745
746 *last_type = cur_type;
747 *last_queue = cur_queue;
748 }
749
750 static void
ixl_vf_config_vector(struct ixl_pf * pf,struct ixl_vf * vf,const struct virtchnl_vector_map * vector)751 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
752 const struct virtchnl_vector_map *vector)
753 {
754 struct i40e_hw *hw;
755 u_int qindex;
756 enum i40e_queue_type type, last_type;
757 uint32_t lnklst_reg;
758 uint16_t rxq_map, txq_map, cur_queue, last_queue;
759
760 hw = &pf->hw;
761
762 rxq_map = vector->rxq_map;
763 txq_map = vector->txq_map;
764
765 last_queue = IXL_END_OF_INTR_LNKLST;
766 last_type = I40E_QUEUE_TYPE_RX;
767
768 /*
769 * The datasheet says to optimize performance, RX queues and TX queues
770 * should be interleaved in the interrupt linked list, so we process
771 * both at once here.
772 */
773 while ((rxq_map != 0) || (txq_map != 0)) {
774 if (txq_map != 0) {
775 qindex = ffs(txq_map) - 1;
776 type = I40E_QUEUE_TYPE_TX;
777 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
778 ixl_vf_set_qctl(pf, vector, type, cur_queue,
779 &last_type, &last_queue);
780 txq_map &= ~(1 << qindex);
781 }
782
783 if (rxq_map != 0) {
784 qindex = ffs(rxq_map) - 1;
785 type = I40E_QUEUE_TYPE_RX;
786 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
787 ixl_vf_set_qctl(pf, vector, type, cur_queue,
788 &last_type, &last_queue);
789 rxq_map &= ~(1 << qindex);
790 }
791 }
792
793 if (vector->vector_id == 0)
794 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
795 else
796 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
797 vf->vf_num);
798 wr32(hw, lnklst_reg,
799 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
800 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
801
802 ixl_flush(hw);
803 }
804
805 static void
ixl_vf_config_irq_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)806 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
807 uint16_t msg_size)
808 {
809 struct virtchnl_irq_map_info *map;
810 struct virtchnl_vector_map *vector;
811 struct i40e_hw *hw;
812 int i, largest_txq, largest_rxq;
813
814 hw = &pf->hw;
815 map = msg;
816
817 for (i = 0; i < map->num_vectors; i++) {
818 vector = &map->vecmap[i];
819
820 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
821 vector->vsi_id != vf->vsi.vsi_num) {
822 i40e_send_vf_nack(pf, vf,
823 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
824 return;
825 }
826
827 if (vector->rxq_map != 0) {
828 largest_rxq = fls(vector->rxq_map) - 1;
829 if (largest_rxq >= vf->vsi.num_rx_queues) {
830 i40e_send_vf_nack(pf, vf,
831 VIRTCHNL_OP_CONFIG_IRQ_MAP,
832 I40E_ERR_PARAM);
833 return;
834 }
835 }
836
837 if (vector->txq_map != 0) {
838 largest_txq = fls(vector->txq_map) - 1;
839 if (largest_txq >= vf->vsi.num_tx_queues) {
840 i40e_send_vf_nack(pf, vf,
841 VIRTCHNL_OP_CONFIG_IRQ_MAP,
842 I40E_ERR_PARAM);
843 return;
844 }
845 }
846
847 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
848 vector->txitr_idx > IXL_MAX_ITR_IDX) {
849 i40e_send_vf_nack(pf, vf,
850 VIRTCHNL_OP_CONFIG_IRQ_MAP,
851 I40E_ERR_PARAM);
852 return;
853 }
854
855 ixl_vf_config_vector(pf, vf, vector);
856 }
857
858 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
859 }
860
861 static void
ixl_vf_enable_queues_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)862 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
863 uint16_t msg_size)
864 {
865 struct virtchnl_queue_select *select;
866 int error = 0;
867
868 select = msg;
869
870 if (select->vsi_id != vf->vsi.vsi_num ||
871 select->rx_queues == 0 || select->tx_queues == 0) {
872 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
873 I40E_ERR_PARAM);
874 return;
875 }
876
877 /* Enable TX rings selected by the VF */
878 for (int i = 0; i < 32; i++) {
879 if ((1 << i) & select->tx_queues) {
880 /* Warn if queue is out of VF allocation range */
881 if (i >= vf->vsi.num_tx_queues) {
882 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
883 vf->vf_num, i);
884 break;
885 }
886 /* Skip this queue if it hasn't been configured */
887 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
888 continue;
889 /* Warn if this queue is already marked as enabled */
890 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
891 ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
892 vf->vf_num, i);
893
894 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
895 if (error)
896 break;
897 else
898 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
899 }
900 }
901
902 /* Enable RX rings selected by the VF */
903 for (int i = 0; i < 32; i++) {
904 if ((1 << i) & select->rx_queues) {
905 /* Warn if queue is out of VF allocation range */
906 if (i >= vf->vsi.num_rx_queues) {
907 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
908 vf->vf_num, i);
909 break;
910 }
911 /* Skip this queue if it hasn't been configured */
912 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
913 continue;
914 /* Warn if this queue is already marked as enabled */
915 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
916 ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
917 vf->vf_num, i);
918 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
919 if (error)
920 break;
921 else
922 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
923 }
924 }
925
926 if (error) {
927 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
928 I40E_ERR_TIMEOUT);
929 return;
930 }
931
932 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
933 }
934
935 static void
ixl_vf_disable_queues_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)936 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
937 void *msg, uint16_t msg_size)
938 {
939 struct virtchnl_queue_select *select;
940 int error = 0;
941
942 select = msg;
943
944 if (select->vsi_id != vf->vsi.vsi_num ||
945 select->rx_queues == 0 || select->tx_queues == 0) {
946 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
947 I40E_ERR_PARAM);
948 return;
949 }
950
951 /* Disable TX rings selected by the VF */
952 for (int i = 0; i < 32; i++) {
953 if ((1 << i) & select->tx_queues) {
954 /* Warn if queue is out of VF allocation range */
955 if (i >= vf->vsi.num_tx_queues) {
956 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
957 vf->vf_num, i);
958 break;
959 }
960 /* Skip this queue if it hasn't been configured */
961 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
962 continue;
963 /* Warn if this queue is already marked as disabled */
964 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
965 ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
966 vf->vf_num, i);
967 continue;
968 }
969 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
970 if (error)
971 break;
972 else
973 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
974 }
975 }
976
977 /* Enable RX rings selected by the VF */
978 for (int i = 0; i < 32; i++) {
979 if ((1 << i) & select->rx_queues) {
980 /* Warn if queue is out of VF allocation range */
981 if (i >= vf->vsi.num_rx_queues) {
982 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
983 vf->vf_num, i);
984 break;
985 }
986 /* Skip this queue if it hasn't been configured */
987 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
988 continue;
989 /* Warn if this queue is already marked as disabled */
990 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
991 ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
992 vf->vf_num, i);
993 continue;
994 }
995 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
996 if (error)
997 break;
998 else
999 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1000 }
1001 }
1002
1003 if (error) {
1004 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1005 I40E_ERR_TIMEOUT);
1006 return;
1007 }
1008
1009 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1010 }
1011
1012 static int
ixl_vf_mac_valid(struct ixl_vf * vf,const uint8_t * addr)1013 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1014 {
1015
1016 if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
1017 return (EINVAL);
1018
1019 /*
1020 * If the VF is not allowed to change its MAC address, don't let it
1021 * set a MAC filter for an address that is not a multicast address and
1022 * is not its assigned MAC.
1023 */
1024 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1025 !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1026 return (EPERM);
1027
1028 return (0);
1029 }
1030
1031 static void
ixl_vf_add_mac_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1032 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1033 uint16_t msg_size)
1034 {
1035 struct virtchnl_ether_addr_list *addr_list;
1036 struct virtchnl_ether_addr *addr;
1037 struct ixl_vsi *vsi;
1038 int i;
1039
1040 vsi = &vf->vsi;
1041 addr_list = msg;
1042
1043 if (addr_list->vsi_id != vsi->vsi_num) {
1044 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1045 I40E_ERR_PARAM);
1046 return;
1047 }
1048
1049 for (i = 0; i < addr_list->num_elements; i++) {
1050 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1051 i40e_send_vf_nack(pf, vf,
1052 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1053 return;
1054 }
1055 }
1056
1057 for (i = 0; i < addr_list->num_elements; i++) {
1058 addr = &addr_list->list[i];
1059 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1060 }
1061
1062 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1063 }
1064
1065 static void
ixl_vf_del_mac_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1066 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1067 uint16_t msg_size)
1068 {
1069 struct virtchnl_ether_addr_list *addr_list;
1070 struct virtchnl_ether_addr *addr;
1071 struct ixl_vsi *vsi;
1072 int i;
1073
1074 vsi = &vf->vsi;
1075 addr_list = msg;
1076
1077 if (addr_list->vsi_id != vsi->vsi_num) {
1078 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1079 I40E_ERR_PARAM);
1080 return;
1081 }
1082
1083 for (i = 0; i < addr_list->num_elements; i++) {
1084 addr = &addr_list->list[i];
1085 if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
1086 i40e_send_vf_nack(pf, vf,
1087 VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
1088 return;
1089 }
1090 }
1091
1092 for (i = 0; i < addr_list->num_elements; i++) {
1093 addr = &addr_list->list[i];
1094 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1095 }
1096
1097 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1098 }
1099
1100 static enum i40e_status_code
ixl_vf_enable_vlan_strip(struct ixl_pf * pf,struct ixl_vf * vf)1101 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1102 {
1103 struct i40e_vsi_context vsi_ctx;
1104
1105 vsi_ctx.seid = vf->vsi.seid;
1106
1107 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1108 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1109 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1110 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1111 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1112 }
1113
1114 static void
ixl_vf_add_vlan_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1115 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1116 uint16_t msg_size)
1117 {
1118 struct virtchnl_vlan_filter_list *filter_list;
1119 enum i40e_status_code code;
1120 int i;
1121
1122 filter_list = msg;
1123
1124 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1125 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1126 I40E_ERR_PARAM);
1127 return;
1128 }
1129
1130 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1131 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1132 I40E_ERR_PARAM);
1133 return;
1134 }
1135
1136 for (i = 0; i < filter_list->num_elements; i++) {
1137 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1138 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1139 I40E_ERR_PARAM);
1140 return;
1141 }
1142 }
1143
1144 code = ixl_vf_enable_vlan_strip(pf, vf);
1145 if (code != I40E_SUCCESS) {
1146 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1147 I40E_ERR_PARAM);
1148 }
1149
1150 for (i = 0; i < filter_list->num_elements; i++)
1151 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1152
1153 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1154 }
1155
1156 static void
ixl_vf_del_vlan_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1157 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1158 uint16_t msg_size)
1159 {
1160 struct virtchnl_vlan_filter_list *filter_list;
1161 int i;
1162
1163 filter_list = msg;
1164
1165 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1166 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1167 I40E_ERR_PARAM);
1168 return;
1169 }
1170
1171 for (i = 0; i < filter_list->num_elements; i++) {
1172 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1173 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1174 I40E_ERR_PARAM);
1175 return;
1176 }
1177 }
1178
1179 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1180 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1181 I40E_ERR_PARAM);
1182 return;
1183 }
1184
1185 for (i = 0; i < filter_list->num_elements; i++)
1186 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1187
1188 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1189 }
1190
1191 static void
ixl_vf_config_promisc_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1192 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1193 void *msg, uint16_t msg_size)
1194 {
1195 struct virtchnl_promisc_info *info;
1196 struct i40e_hw *hw = &pf->hw;
1197 enum i40e_status_code code;
1198
1199 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1200 /*
1201 * Do the same thing as the Linux PF driver -- lie to the VF
1202 */
1203 ixl_send_vf_ack(pf, vf,
1204 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1205 return;
1206 }
1207
1208 info = msg;
1209 if (info->vsi_id != vf->vsi.vsi_num) {
1210 i40e_send_vf_nack(pf, vf,
1211 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1212 return;
1213 }
1214
1215 code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1216 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1217 if (code != I40E_SUCCESS) {
1218 device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1219 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1220 i40e_aq_str(hw, hw->aq.asq_last_status));
1221 i40e_send_vf_nack(pf, vf,
1222 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1223 return;
1224 }
1225
1226 code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1227 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1228 if (code != I40E_SUCCESS) {
1229 device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1230 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1231 i40e_aq_str(hw, hw->aq.asq_last_status));
1232 i40e_send_vf_nack(pf, vf,
1233 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1234 return;
1235 }
1236
1237 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1238 }
1239
1240 static void
ixl_vf_get_stats_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1241 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1242 uint16_t msg_size)
1243 {
1244 struct virtchnl_queue_select *queue;
1245
1246 queue = msg;
1247 if (queue->vsi_id != vf->vsi.vsi_num) {
1248 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1249 I40E_ERR_PARAM);
1250 return;
1251 }
1252
1253 ixl_update_eth_stats(&vf->vsi);
1254
1255 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1256 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1257 }
1258
1259 static void
ixl_vf_config_rss_key_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1260 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1261 uint16_t msg_size)
1262 {
1263 struct i40e_hw *hw;
1264 struct virtchnl_rss_key *key;
1265 struct i40e_aqc_get_set_rss_key_data key_data;
1266 enum i40e_status_code status;
1267
1268 hw = &pf->hw;
1269
1270 key = msg;
1271
1272 if (key->key_len > 52) {
1273 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1274 vf->vf_num, key->key_len, 52);
1275 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1276 I40E_ERR_PARAM);
1277 return;
1278 }
1279
1280 if (key->vsi_id != vf->vsi.vsi_num) {
1281 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1282 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1283 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1284 I40E_ERR_PARAM);
1285 return;
1286 }
1287
1288 /* Fill out hash using MAC-dependent method */
1289 if (hw->mac.type == I40E_MAC_X722) {
1290 bzero(&key_data, sizeof(key_data));
1291 if (key->key_len <= 40)
1292 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1293 else {
1294 bcopy(key->key, key_data.standard_rss_key, 40);
1295 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1296 }
1297 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1298 if (status) {
1299 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1300 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1301 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1302 I40E_ERR_ADMIN_QUEUE_ERROR);
1303 return;
1304 }
1305 } else {
1306 for (int i = 0; i < (key->key_len / 4); i++)
1307 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1308 }
1309
1310 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1311 vf->vf_num, key->key[0]);
1312
1313 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1314 }
1315
1316 static void
ixl_vf_config_rss_lut_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1317 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1318 uint16_t msg_size)
1319 {
1320 struct i40e_hw *hw;
1321 struct virtchnl_rss_lut *lut;
1322 enum i40e_status_code status;
1323
1324 hw = &pf->hw;
1325
1326 lut = msg;
1327
1328 if (lut->lut_entries > 64) {
1329 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1330 vf->vf_num, lut->lut_entries, 64);
1331 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1332 I40E_ERR_PARAM);
1333 return;
1334 }
1335
1336 if (lut->vsi_id != vf->vsi.vsi_num) {
1337 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1338 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1339 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1340 I40E_ERR_PARAM);
1341 return;
1342 }
1343
1344 /* Fill out LUT using MAC-dependent method */
1345 if (hw->mac.type == I40E_MAC_X722) {
1346 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1347 if (status) {
1348 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1349 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1350 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1351 I40E_ERR_ADMIN_QUEUE_ERROR);
1352 return;
1353 }
1354 } else {
1355 for (int i = 0; i < (lut->lut_entries / 4); i++)
1356 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1357 }
1358
1359 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1360 vf->vf_num, lut->lut[0], lut->lut_entries);
1361
1362 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1363 }
1364
1365 static void
ixl_vf_set_rss_hena_msg(struct ixl_pf * pf,struct ixl_vf * vf,void * msg,uint16_t msg_size)1366 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1367 uint16_t msg_size)
1368 {
1369 struct i40e_hw *hw;
1370 struct virtchnl_rss_hena *hena;
1371
1372 hw = &pf->hw;
1373 hena = msg;
1374
1375 /* Set HENA */
1376 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1377 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1378
1379 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1380 vf->vf_num, hena->hena);
1381
1382 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1383 }
1384
1385 static void
ixl_notify_vf_link_state(struct ixl_pf * pf,struct ixl_vf * vf)1386 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1387 {
1388 struct virtchnl_pf_event event;
1389 struct i40e_hw *hw;
1390
1391 hw = &pf->hw;
1392 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1393 event.severity = PF_EVENT_SEVERITY_INFO;
1394 event.event_data.link_event.link_status = pf->vsi.link_active;
1395 event.event_data.link_event.link_speed =
1396 i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
1397
1398 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1399 sizeof(event));
1400 }
1401
1402 void
ixl_broadcast_link_state(struct ixl_pf * pf)1403 ixl_broadcast_link_state(struct ixl_pf *pf)
1404 {
1405 int i;
1406
1407 for (i = 0; i < pf->num_vfs; i++)
1408 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1409 }
1410
1411 void
ixl_handle_vf_msg(struct ixl_pf * pf,struct i40e_arq_event_info * event)1412 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1413 {
1414 device_t dev = pf->dev;
1415 struct ixl_vf *vf;
1416 uint16_t vf_num, msg_size;
1417 uint32_t opcode;
1418 void *msg;
1419 int err;
1420
1421 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1422 opcode = le32toh(event->desc.cookie_high);
1423
1424 if (vf_num >= pf->num_vfs) {
1425 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1426 return;
1427 }
1428
1429 vf = &pf->vfs[vf_num];
1430 msg = event->msg_buf;
1431 msg_size = event->msg_len;
1432
1433 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1434 "Got msg %s(%d) from%sVF-%d of size %d\n",
1435 ixl_vc_opcode_str(opcode), opcode,
1436 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1437 vf_num, msg_size);
1438
1439 /* Perform basic checks on the msg */
1440 err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1441 if (err) {
1442 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
1443 __func__, vf->vf_num, opcode, msg_size, err);
1444 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1445 return;
1446 }
1447
1448 /* This must be a stray msg from a previously destroyed VF. */
1449 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1450 return;
1451
1452 switch (opcode) {
1453 case VIRTCHNL_OP_VERSION:
1454 ixl_vf_version_msg(pf, vf, msg, msg_size);
1455 break;
1456 case VIRTCHNL_OP_RESET_VF:
1457 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1458 break;
1459 case VIRTCHNL_OP_GET_VF_RESOURCES:
1460 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1461 /* Notify VF of link state after it obtains queues, as this is
1462 * the last thing it will do as part of initialization
1463 */
1464 ixl_notify_vf_link_state(pf, vf);
1465 break;
1466 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1467 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1468 break;
1469 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1470 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1471 break;
1472 case VIRTCHNL_OP_ENABLE_QUEUES:
1473 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1474 /* Notify VF of link state after it obtains queues, as this is
1475 * the last thing it will do as part of initialization
1476 */
1477 ixl_notify_vf_link_state(pf, vf);
1478 break;
1479 case VIRTCHNL_OP_DISABLE_QUEUES:
1480 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1481 break;
1482 case VIRTCHNL_OP_ADD_ETH_ADDR:
1483 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1484 break;
1485 case VIRTCHNL_OP_DEL_ETH_ADDR:
1486 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1487 break;
1488 case VIRTCHNL_OP_ADD_VLAN:
1489 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1490 break;
1491 case VIRTCHNL_OP_DEL_VLAN:
1492 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1493 break;
1494 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1495 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1496 break;
1497 case VIRTCHNL_OP_GET_STATS:
1498 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1499 break;
1500 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1501 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1502 break;
1503 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1504 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1505 break;
1506 case VIRTCHNL_OP_SET_RSS_HENA:
1507 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1508 break;
1509
1510 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1511 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1512 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1513 default:
1514 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1515 break;
1516 }
1517 }
1518
1519 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1520 void
ixl_handle_vflr(struct ixl_pf * pf)1521 ixl_handle_vflr(struct ixl_pf *pf)
1522 {
1523 struct ixl_vf *vf;
1524 struct i40e_hw *hw;
1525 uint16_t global_vf_num;
1526 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1527 int i;
1528
1529 hw = &pf->hw;
1530
1531 ixl_dbg_iov(pf, "%s: begin\n", __func__);
1532
1533 /* Re-enable VFLR interrupt cause so driver doesn't miss a
1534 * reset interrupt for another VF */
1535 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1536 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1537 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1538 ixl_flush(hw);
1539
1540 for (i = 0; i < pf->num_vfs; i++) {
1541 global_vf_num = hw->func_caps.vf_base_id + i;
1542
1543 vf = &pf->vfs[i];
1544 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1545 continue;
1546
1547 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1548 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1549 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1550 if (vflrstat & vflrstat_mask) {
1551 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1552 vflrstat_mask);
1553
1554 ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1555 ixl_reinit_vf(pf, vf);
1556 ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1557 }
1558 }
1559
1560 }
1561
1562 static int
ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)1563 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1564 {
1565
1566 switch (err) {
1567 case I40E_AQ_RC_EPERM:
1568 return (EPERM);
1569 case I40E_AQ_RC_ENOENT:
1570 return (ENOENT);
1571 case I40E_AQ_RC_ESRCH:
1572 return (ESRCH);
1573 case I40E_AQ_RC_EINTR:
1574 return (EINTR);
1575 case I40E_AQ_RC_EIO:
1576 return (EIO);
1577 case I40E_AQ_RC_ENXIO:
1578 return (ENXIO);
1579 case I40E_AQ_RC_E2BIG:
1580 return (E2BIG);
1581 case I40E_AQ_RC_EAGAIN:
1582 return (EAGAIN);
1583 case I40E_AQ_RC_ENOMEM:
1584 return (ENOMEM);
1585 case I40E_AQ_RC_EACCES:
1586 return (EACCES);
1587 case I40E_AQ_RC_EFAULT:
1588 return (EFAULT);
1589 case I40E_AQ_RC_EBUSY:
1590 return (EBUSY);
1591 case I40E_AQ_RC_EEXIST:
1592 return (EEXIST);
1593 case I40E_AQ_RC_EINVAL:
1594 return (EINVAL);
1595 case I40E_AQ_RC_ENOTTY:
1596 return (ENOTTY);
1597 case I40E_AQ_RC_ENOSPC:
1598 return (ENOSPC);
1599 case I40E_AQ_RC_ENOSYS:
1600 return (ENOSYS);
1601 case I40E_AQ_RC_ERANGE:
1602 return (ERANGE);
1603 case I40E_AQ_RC_EFLUSHED:
1604 return (EINVAL); /* No exact equivalent in errno.h */
1605 case I40E_AQ_RC_BAD_ADDR:
1606 return (EFAULT);
1607 case I40E_AQ_RC_EMODE:
1608 return (EPERM);
1609 case I40E_AQ_RC_EFBIG:
1610 return (EFBIG);
1611 default:
1612 return (EINVAL);
1613 }
1614 }
1615
1616 static int
ixl_config_pf_vsi_loopback(struct ixl_pf * pf,bool enable)1617 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1618 {
1619 struct i40e_hw *hw = &pf->hw;
1620 device_t dev = pf->dev;
1621 struct ixl_vsi *vsi = &pf->vsi;
1622 struct i40e_vsi_context ctxt;
1623 int error;
1624
1625 memset(&ctxt, 0, sizeof(ctxt));
1626
1627 ctxt.seid = vsi->seid;
1628 if (pf->veb_seid != 0)
1629 ctxt.uplink_seid = pf->veb_seid;
1630 ctxt.pf_num = hw->pf_id;
1631 ctxt.connection_type = IXL_VSI_DATA_PORT;
1632
1633 ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1634 ctxt.info.switch_id = (enable) ?
1635 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1636
1637 /* error is set to 0 on success */
1638 error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1639 if (error) {
1640 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1641 " aq_error %d\n", error, hw->aq.asq_last_status);
1642 }
1643
1644 return (error);
1645 }
1646
1647 int
ixl_if_iov_init(if_ctx_t ctx,uint16_t num_vfs,const nvlist_t * params)1648 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1649 {
1650 struct ixl_pf *pf = iflib_get_softc(ctx);
1651 device_t dev = iflib_get_dev(ctx);
1652 struct i40e_hw *hw;
1653 struct ixl_vsi *pf_vsi;
1654 enum i40e_status_code ret;
1655 int error;
1656
1657 hw = &pf->hw;
1658 pf_vsi = &pf->vsi;
1659
1660 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1661 M_ZERO);
1662 if (pf->vfs == NULL) {
1663 error = ENOMEM;
1664 goto fail;
1665 }
1666
1667 /*
1668 * Add the VEB and ...
1669 * - do nothing: VEPA mode
1670 * - enable loopback mode on connected VSIs: VEB mode
1671 */
1672 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1673 1, FALSE, &pf->veb_seid, FALSE, NULL);
1674 if (ret != I40E_SUCCESS) {
1675 error = hw->aq.asq_last_status;
1676 device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1677 i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1678 goto fail;
1679 }
1680 if (pf->enable_vf_loopback)
1681 ixl_config_pf_vsi_loopback(pf, true);
1682
1683 /*
1684 * Adding a VEB brings back the default MAC filter(s). Remove them,
1685 * and let the driver add the proper filters back.
1686 */
1687 ixl_del_default_hw_filters(pf_vsi);
1688 ixl_reconfigure_filters(pf_vsi);
1689
1690 pf->num_vfs = num_vfs;
1691 return (0);
1692
1693 fail:
1694 free(pf->vfs, M_IXL);
1695 pf->vfs = NULL;
1696 return (error);
1697 }
1698
1699 void
ixl_if_iov_uninit(if_ctx_t ctx)1700 ixl_if_iov_uninit(if_ctx_t ctx)
1701 {
1702 struct ixl_pf *pf = iflib_get_softc(ctx);
1703 struct i40e_hw *hw;
1704 struct ixl_vf *vfs;
1705 int i, num_vfs;
1706
1707 hw = &pf->hw;
1708
1709 for (i = 0; i < pf->num_vfs; i++) {
1710 if (pf->vfs[i].vsi.seid != 0)
1711 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1712 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1713 ixl_free_filters(&pf->vfs[i].vsi.ftl);
1714 ixl_dbg_iov(pf, "VF %d: %d released\n",
1715 i, pf->vfs[i].qtag.num_allocated);
1716 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1717 }
1718
1719 if (pf->veb_seid != 0) {
1720 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1721 pf->veb_seid = 0;
1722 }
1723 /* Reset PF VSI loopback mode */
1724 if (pf->enable_vf_loopback)
1725 ixl_config_pf_vsi_loopback(pf, false);
1726
1727 vfs = pf->vfs;
1728 num_vfs = pf->num_vfs;
1729
1730 pf->vfs = NULL;
1731 pf->num_vfs = 0;
1732
1733 /* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1734 for (i = 0; i < num_vfs; i++)
1735 sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1736 free(vfs, M_IXL);
1737 }
1738
1739 static int
ixl_vf_reserve_queues(struct ixl_pf * pf,struct ixl_vf * vf,int num_queues)1740 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1741 {
1742 device_t dev = pf->dev;
1743 int error;
1744
1745 /* Validate, and clamp value if invalid */
1746 if (num_queues < 1 || num_queues > 16)
1747 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1748 num_queues, vf->vf_num);
1749 if (num_queues < 1) {
1750 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1751 num_queues = 1;
1752 } else if (num_queues > IAVF_MAX_QUEUES) {
1753 device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1754 num_queues = IAVF_MAX_QUEUES;
1755 }
1756 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1757 if (error) {
1758 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1759 num_queues, vf->vf_num);
1760 return (ENOSPC);
1761 }
1762
1763 ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1764 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1765 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1766
1767 return (0);
1768 }
1769
1770 int
ixl_if_iov_vf_add(if_ctx_t ctx,uint16_t vfnum,const nvlist_t * params)1771 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1772 {
1773 struct ixl_pf *pf = iflib_get_softc(ctx);
1774 char sysctl_name[IXL_QUEUE_NAME_LEN];
1775 struct ixl_vf *vf;
1776 const void *mac;
1777 size_t size;
1778 int error;
1779 int vf_num_queues;
1780
1781 vf = &pf->vfs[vfnum];
1782 vf->vf_num = vfnum;
1783 vf->vsi.back = pf;
1784 vf->vf_flags = VF_FLAG_ENABLED;
1785
1786 /* Reserve queue allocation from PF */
1787 vf_num_queues = nvlist_get_number(params, "num-queues");
1788 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1789 if (error != 0)
1790 goto out;
1791
1792 error = ixl_vf_setup_vsi(pf, vf);
1793 if (error != 0)
1794 goto out;
1795
1796 if (nvlist_exists_binary(params, "mac-addr")) {
1797 mac = nvlist_get_binary(params, "mac-addr", &size);
1798 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1799
1800 if (nvlist_get_bool(params, "allow-set-mac"))
1801 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1802 } else
1803 /*
1804 * If the administrator has not specified a MAC address then
1805 * we must allow the VF to choose one.
1806 */
1807 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1808
1809 if (nvlist_get_bool(params, "mac-anti-spoof"))
1810 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1811
1812 if (nvlist_get_bool(params, "allow-promisc"))
1813 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1814
1815 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1816
1817 /* VF needs to be reset before it can be used */
1818 ixl_reset_vf(pf, vf);
1819 out:
1820 if (error == 0) {
1821 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1822 ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
1823 }
1824
1825 return (error);
1826 }
1827
1828