1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
3
4 #include "virtchnl.h"
5 #include "queues.h"
6 #include "rss.h"
7 #include "ice_vf_lib_private.h"
8 #include "ice.h"
9 #include "ice_base.h"
10 #include "ice_lib.h"
11 #include "ice_fltr.h"
12 #include "allowlist.h"
13 #include "ice_vf_vsi_vlan_ops.h"
14 #include "ice_vlan.h"
15 #include "ice_flex_pipe.h"
16 #include "ice_dcb_lib.h"
17
18 /**
19 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
20 * @pf: pointer to the PF structure
21 * @v_opcode: operation code
22 * @v_retval: return value
23 * @msg: pointer to the msg buffer
24 * @msglen: msg length
25 */
26 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)27 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
28 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
29 {
30 struct ice_hw *hw = &pf->hw;
31 struct ice_vf *vf;
32 unsigned int bkt;
33
34 mutex_lock(&pf->vfs.table_lock);
35 ice_for_each_vf(pf, bkt, vf) {
36 /* Not all vfs are enabled so skip the ones that are not */
37 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
38 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
39 continue;
40
41 /* Ignore return value on purpose - a given VF may fail, but
42 * we need to keep going and send to all of them
43 */
44 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
45 msglen, NULL);
46 }
47 mutex_unlock(&pf->vfs.table_lock);
48 }
49
50 /**
51 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
52 * @vf: pointer to the VF structure
53 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
54 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
55 * @link_up: whether or not to set the link up/down
56 */
57 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)58 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
59 int ice_link_speed, bool link_up)
60 {
61 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
62 pfe->event_data.link_event_adv.link_status = link_up;
63 /* Speed in Mbps */
64 pfe->event_data.link_event_adv.link_speed =
65 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
66 } else {
67 pfe->event_data.link_event.link_status = link_up;
68 /* Legacy method for virtchnl link speeds */
69 pfe->event_data.link_event.link_speed =
70 (enum virtchnl_link_speed)
71 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
72 }
73 }
74
75 /**
76 * ice_vc_notify_vf_link_state - Inform a VF of link status
77 * @vf: pointer to the VF structure
78 *
79 * send a link status message to a single VF
80 */
ice_vc_notify_vf_link_state(struct ice_vf * vf)81 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
82 {
83 struct virtchnl_pf_event pfe = { 0 };
84 struct ice_hw *hw = &vf->pf->hw;
85
86 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
87 pfe.severity = PF_EVENT_SEVERITY_INFO;
88
89 if (ice_is_vf_link_up(vf))
90 ice_set_pfe_link(vf, &pfe,
91 hw->port_info->phy.link_info.link_speed, true);
92 else
93 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
94
95 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
96 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
97 sizeof(pfe), NULL);
98 }
99
100 /**
101 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
102 * @pf: pointer to the PF structure
103 */
ice_vc_notify_link_state(struct ice_pf * pf)104 void ice_vc_notify_link_state(struct ice_pf *pf)
105 {
106 struct ice_vf *vf;
107 unsigned int bkt;
108
109 mutex_lock(&pf->vfs.table_lock);
110 ice_for_each_vf(pf, bkt, vf)
111 ice_vc_notify_vf_link_state(vf);
112 mutex_unlock(&pf->vfs.table_lock);
113 }
114
115 /**
116 * ice_vc_notify_reset - Send pending reset message to all VFs
117 * @pf: pointer to the PF structure
118 *
119 * indicate a pending reset to all VFs on a given PF
120 */
ice_vc_notify_reset(struct ice_pf * pf)121 void ice_vc_notify_reset(struct ice_pf *pf)
122 {
123 struct virtchnl_pf_event pfe;
124
125 if (!ice_has_vfs(pf))
126 return;
127
128 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
129 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
130 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
131 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
132 }
133
134 /**
135 * ice_vc_send_msg_to_vf - Send message to VF
136 * @vf: pointer to the VF info
137 * @v_opcode: virtual channel opcode
138 * @v_retval: virtual channel return value
139 * @msg: pointer to the msg buffer
140 * @msglen: msg length
141 *
142 * send msg to VF
143 */
144 int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)145 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
146 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
147 {
148 struct device *dev;
149 struct ice_pf *pf;
150 int aq_ret;
151
152 pf = vf->pf;
153 dev = ice_pf_to_dev(pf);
154
155 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
156 msg, msglen, NULL);
157 if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
158 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
159 vf->vf_id, aq_ret,
160 libie_aq_str(pf->hw.mailboxq.sq_last_status));
161 return -EIO;
162 }
163
164 return 0;
165 }
166
167 /**
168 * ice_vc_get_ver_msg
169 * @vf: pointer to the VF info
170 * @msg: pointer to the msg buffer
171 *
172 * called from the VF to request the API version used by the PF
173 */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)174 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
175 {
176 struct virtchnl_version_info info = {
177 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
178 };
179
180 vf->vf_ver = *(struct virtchnl_version_info *)msg;
181 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
182 if (VF_IS_V10(&vf->vf_ver))
183 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
184
185 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
186 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
187 sizeof(struct virtchnl_version_info));
188 }
189
190 /**
191 * ice_vc_get_vlan_caps
192 * @hw: pointer to the hw
193 * @vf: pointer to the VF info
194 * @vsi: pointer to the VSI
195 * @driver_caps: current driver caps
196 *
197 * Return 0 if there is no VLAN caps supported, or VLAN caps value
198 */
199 static u32
ice_vc_get_vlan_caps(struct ice_hw * hw,struct ice_vf * vf,struct ice_vsi * vsi,u32 driver_caps)200 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
201 u32 driver_caps)
202 {
203 if (ice_is_eswitch_mode_switchdev(vf->pf))
204 /* In switchdev setting VLAN from VF isn't supported */
205 return 0;
206
207 if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
208 /* VLAN offloads based on current device configuration */
209 return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
210 } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
211 /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
212 * these two conditions, which amounts to guest VLAN filtering
213 * and offloads being based on the inner VLAN or the
214 * inner/single VLAN respectively and don't allow VF to
215 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
216 */
217 if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
218 return VIRTCHNL_VF_OFFLOAD_VLAN;
219 } else if (!ice_is_dvm_ena(hw) &&
220 !ice_vf_is_port_vlan_ena(vf)) {
221 /* configure backward compatible support for VFs that
222 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
223 * configured in SVM, and no port VLAN is configured
224 */
225 ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
226 return VIRTCHNL_VF_OFFLOAD_VLAN;
227 } else if (ice_is_dvm_ena(hw)) {
228 /* configure software offloaded VLAN support when DVM
229 * is enabled, but no port VLAN is enabled
230 */
231 ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
232 }
233 }
234
235 return 0;
236 }
237
238 /**
239 * ice_vc_get_vf_res_msg
240 * @vf: pointer to the VF info
241 * @msg: pointer to the msg buffer
242 *
243 * called from the VF to request its resources
244 */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)245 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
246 {
247 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
248 struct virtchnl_vf_resource *vfres = NULL;
249 struct ice_hw *hw = &vf->pf->hw;
250 struct ice_vsi *vsi;
251 int len = 0;
252 int ret;
253
254 if (ice_check_vf_init(vf)) {
255 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
256 goto err;
257 }
258
259 len = virtchnl_struct_size(vfres, vsi_res, 0);
260
261 vfres = kzalloc(len, GFP_KERNEL);
262 if (!vfres) {
263 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
264 len = 0;
265 goto err;
266 }
267 if (VF_IS_V11(&vf->vf_ver))
268 vf->driver_caps = *(u32 *)msg;
269 else
270 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
271 VIRTCHNL_VF_OFFLOAD_VLAN;
272
273 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
274 vsi = ice_get_vf_vsi(vf);
275 if (!vsi) {
276 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
277 goto err;
278 }
279
280 vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
281 vf->driver_caps);
282
283 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
284 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
285
286 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
287 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
288
289 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
290 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
291
292 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
293 vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
294 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
295
296 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
297 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
298
299 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
300 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
301
302 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
303 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
304
305 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
306 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
307
308 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
309 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
310
311 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
312 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
313
314 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC)
315 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC;
316
317 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
318 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
319
320 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
321 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
322
323 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
324 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
325
326 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
327 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
328
329 if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
330 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
331
332 vfres->num_vsis = 1;
333 /* Tx and Rx queue are equal for VF */
334 vfres->num_queue_pairs = vsi->num_txq;
335 vfres->max_vectors = vf->num_msix;
336 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
337 vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
338 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
339
340 vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
341 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
342 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
343 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
344 vf->hw_lan_addr);
345
346 /* match guest capabilities */
347 vf->driver_caps = vfres->vf_cap_flags;
348
349 ice_vc_set_caps_allowlist(vf);
350 ice_vc_set_working_allowlist(vf);
351
352 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
353
354 err:
355 /* send the response back to the VF */
356 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
357 (u8 *)vfres, len);
358
359 kfree(vfres);
360 return ret;
361 }
362
363 /**
364 * ice_vc_reset_vf_msg
365 * @vf: pointer to the VF info
366 *
367 * called from the VF to reset itself,
368 * unlike other virtchnl messages, PF driver
369 * doesn't send the response back to the VF
370 */
ice_vc_reset_vf_msg(struct ice_vf * vf)371 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
372 {
373 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
374 ice_reset_vf(vf, 0);
375 }
376
377 /**
378 * ice_vc_isvalid_vsi_id
379 * @vf: pointer to the VF info
380 * @vsi_id: VF relative VSI ID
381 *
382 * check for the valid VSI ID
383 */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)384 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
385 {
386 return vsi_id == ICE_VF_VSI_ID;
387 }
388
389 /**
390 * ice_vc_get_qos_caps - Get current QoS caps from PF
391 * @vf: pointer to the VF info
392 *
393 * Get VF's QoS capabilities, such as TC number, arbiter and
394 * bandwidth from PF.
395 *
396 * Return: 0 on success or negative error value.
397 */
ice_vc_get_qos_caps(struct ice_vf * vf)398 static int ice_vc_get_qos_caps(struct ice_vf *vf)
399 {
400 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
401 struct virtchnl_qos_cap_list *cap_list = NULL;
402 u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = { 0 };
403 struct virtchnl_qos_cap_elem *cfg = NULL;
404 struct ice_vsi_ctx *vsi_ctx;
405 struct ice_pf *pf = vf->pf;
406 struct ice_port_info *pi;
407 struct ice_vsi *vsi;
408 u8 numtc, tc;
409 u16 len = 0;
410 int ret, i;
411
412 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
413 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
414 goto err;
415 }
416
417 vsi = ice_get_vf_vsi(vf);
418 if (!vsi) {
419 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
420 goto err;
421 }
422
423 pi = pf->hw.port_info;
424 numtc = vsi->tc_cfg.numtc;
425
426 vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx);
427 if (!vsi_ctx) {
428 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
429 goto err;
430 }
431
432 len = struct_size(cap_list, cap, numtc);
433 cap_list = kzalloc(len, GFP_KERNEL);
434 if (!cap_list) {
435 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
436 len = 0;
437 goto err;
438 }
439
440 cap_list->vsi_id = vsi->vsi_num;
441 cap_list->num_elem = numtc;
442
443 /* Store the UP2TC configuration from DCB to a user priority bitmap
444 * of each TC. Each element of prio_of_tc represents one TC. Each
445 * bitmap indicates the user priorities belong to this TC.
446 */
447 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
448 tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i];
449 tc_prio[tc] |= BIT(i);
450 }
451
452 for (i = 0; i < numtc; i++) {
453 cfg = &cap_list->cap[i];
454 cfg->tc_num = i;
455 cfg->tc_prio = tc_prio[i];
456 cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i];
457 cfg->weight = VIRTCHNL_STRICT_WEIGHT;
458 cfg->type = VIRTCHNL_BW_SHAPER;
459 cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw;
460 cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw;
461 }
462
463 err:
464 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret,
465 (u8 *)cap_list, len);
466 kfree(cap_list);
467 return ret;
468 }
469
470 /**
471 * ice_vc_cfg_promiscuous_mode_msg
472 * @vf: pointer to the VF info
473 * @msg: pointer to the msg buffer
474 *
475 * called from the VF to configure VF VSIs promiscuous mode
476 */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)477 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
478 {
479 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
480 bool rm_promisc, alluni = false, allmulti = false;
481 struct virtchnl_promisc_info *info =
482 (struct virtchnl_promisc_info *)msg;
483 struct ice_vsi_vlan_ops *vlan_ops;
484 int mcast_err = 0, ucast_err = 0;
485 struct ice_pf *pf = vf->pf;
486 struct ice_vsi *vsi;
487 u8 mcast_m, ucast_m;
488 struct device *dev;
489 int ret = 0;
490
491 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
492 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
493 goto error_param;
494 }
495
496 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
497 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
498 goto error_param;
499 }
500
501 vsi = ice_get_vf_vsi(vf);
502 if (!vsi) {
503 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
504 goto error_param;
505 }
506
507 dev = ice_pf_to_dev(pf);
508 if (!ice_is_vf_trusted(vf)) {
509 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
510 vf->vf_id);
511 /* Leave v_ret alone, lie to the VF on purpose. */
512 goto error_param;
513 }
514
515 if (info->flags & FLAG_VF_UNICAST_PROMISC)
516 alluni = true;
517
518 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
519 allmulti = true;
520
521 rm_promisc = !allmulti && !alluni;
522
523 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
524 if (rm_promisc)
525 ret = vlan_ops->ena_rx_filtering(vsi);
526 else
527 ret = vlan_ops->dis_rx_filtering(vsi);
528 if (ret) {
529 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
530 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
531 goto error_param;
532 }
533
534 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
535
536 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
537 if (alluni) {
538 /* in this case we're turning on promiscuous mode */
539 ret = ice_set_dflt_vsi(vsi);
540 } else {
541 /* in this case we're turning off promiscuous mode */
542 if (ice_is_dflt_vsi_in_use(vsi->port_info))
543 ret = ice_clear_dflt_vsi(vsi);
544 }
545
546 /* in this case we're turning on/off only
547 * allmulticast
548 */
549 if (allmulti)
550 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
551 else
552 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
553
554 if (ret) {
555 dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
556 vf->vf_id, ret);
557 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
558 goto error_param;
559 }
560 } else {
561 if (alluni)
562 ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
563 else
564 ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
565
566 if (allmulti)
567 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
568 else
569 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
570
571 if (ucast_err || mcast_err)
572 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
573 }
574
575 if (!mcast_err) {
576 if (allmulti &&
577 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
578 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
579 vf->vf_id);
580 else if (!allmulti &&
581 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
582 vf->vf_states))
583 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
584 vf->vf_id);
585 } else {
586 dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
587 vf->vf_id, mcast_err);
588 }
589
590 if (!ucast_err) {
591 if (alluni &&
592 !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
593 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
594 vf->vf_id);
595 else if (!alluni &&
596 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
597 vf->vf_states))
598 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
599 vf->vf_id);
600 } else {
601 dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
602 vf->vf_id, ucast_err);
603 }
604
605 error_param:
606 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
607 v_ret, NULL, 0);
608 }
609
610 /**
611 * ice_vc_get_stats_msg
612 * @vf: pointer to the VF info
613 * @msg: pointer to the msg buffer
614 *
615 * called from the VF to get VSI stats
616 */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)617 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
618 {
619 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
620 struct virtchnl_queue_select *vqs =
621 (struct virtchnl_queue_select *)msg;
622 struct ice_eth_stats stats = { 0 };
623 struct ice_vsi *vsi;
624
625 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
626 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
627 goto error_param;
628 }
629
630 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
631 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
632 goto error_param;
633 }
634
635 vsi = ice_get_vf_vsi(vf);
636 if (!vsi) {
637 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
638 goto error_param;
639 }
640
641 ice_update_eth_stats(vsi);
642
643 stats = vsi->eth_stats;
644
645 error_param:
646 /* send the response to the VF */
647 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
648 (u8 *)&stats, sizeof(stats));
649 }
650
651 /**
652 * ice_can_vf_change_mac
653 * @vf: pointer to the VF info
654 *
655 * Return true if the VF is allowed to change its MAC filters, false otherwise
656 */
ice_can_vf_change_mac(struct ice_vf * vf)657 static bool ice_can_vf_change_mac(struct ice_vf *vf)
658 {
659 /* If the VF MAC address has been set administratively (via the
660 * ndo_set_vf_mac command), then deny permission to the VF to
661 * add/delete unicast MAC addresses, unless the VF is trusted
662 */
663 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
664 return false;
665
666 return true;
667 }
668
669 /**
670 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
671 * @vc_ether_addr: used to extract the type
672 */
673 static u8
ice_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)674 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
675 {
676 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
677 }
678
679 /**
680 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
681 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
682 */
683 static bool
ice_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)684 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
685 {
686 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
687
688 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
689 }
690
691 /**
692 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
693 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
694 *
695 * This function should only be called when the MAC address in
696 * virtchnl_ether_addr is a valid unicast MAC
697 */
698 static bool
ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused * vc_ether_addr)699 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
700 {
701 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
702
703 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
704 }
705
706 /**
707 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
708 * @vf: VF to update
709 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
710 */
711 static void
ice_vfhw_mac_add(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)712 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
713 {
714 u8 *mac_addr = vc_ether_addr->addr;
715
716 if (!is_valid_ether_addr(mac_addr))
717 return;
718
719 /* only allow legacy VF drivers to set the device and hardware MAC if it
720 * is zero and allow new VF drivers to set the hardware MAC if the type
721 * was correctly specified over VIRTCHNL
722 */
723 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
724 is_zero_ether_addr(vf->hw_lan_addr)) ||
725 ice_is_vc_addr_primary(vc_ether_addr)) {
726 ether_addr_copy(vf->dev_lan_addr, mac_addr);
727 ether_addr_copy(vf->hw_lan_addr, mac_addr);
728 }
729
730 /* hardware and device MACs are already set, but its possible that the
731 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
732 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
733 * away for the legacy VF driver case as it will be updated in the
734 * delete flow for this case
735 */
736 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
737 ether_addr_copy(vf->legacy_last_added_umac.addr,
738 mac_addr);
739 vf->legacy_last_added_umac.time_modified = jiffies;
740 }
741 }
742
743 /**
744 * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
745 * @mac: address to check
746 *
747 * Return: true if the address is one of the three possible LLDP multicast
748 * addresses, false otherwise.
749 */
ice_is_mc_lldp_eth_addr(const u8 * mac)750 static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
751 {
752 const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
753
754 if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
755 return false;
756
757 return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
758 }
759
760 /**
761 * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
762 * @vf: a VF to add the address to
763 * @mac: address to check
764 *
765 * Return: true if the VF is allowed to add such MAC address, false otherwise.
766 */
ice_vc_can_add_mac(const struct ice_vf * vf,const u8 * mac)767 static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
768 {
769 struct device *dev = ice_pf_to_dev(vf->pf);
770
771 if (is_unicast_ether_addr(mac) &&
772 !ice_can_vf_change_mac((struct ice_vf *)vf)) {
773 dev_err(dev,
774 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
775 return false;
776 }
777
778 if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
779 dev_warn(dev,
780 "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
781 vf->vf_id);
782 return false;
783 }
784
785 return true;
786 }
787
788 /**
789 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
790 * @vf: pointer to the VF info
791 * @vsi: pointer to the VF's VSI
792 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
793 */
794 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)795 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
796 struct virtchnl_ether_addr *vc_ether_addr)
797 {
798 struct device *dev = ice_pf_to_dev(vf->pf);
799 u8 *mac_addr = vc_ether_addr->addr;
800 int ret;
801
802 /* device MAC already added */
803 if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
804 return 0;
805
806 if (!ice_vc_can_add_mac(vf, mac_addr))
807 return -EPERM;
808
809 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
810 if (ret == -EEXIST) {
811 dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
812 vf->vf_id);
813 /* don't return since we might need to update
814 * the primary MAC in ice_vfhw_mac_add() below
815 */
816 } else if (ret) {
817 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
818 mac_addr, vf->vf_id, ret);
819 return ret;
820 } else {
821 vf->num_mac++;
822 if (ice_is_mc_lldp_eth_addr(mac_addr))
823 ice_vf_update_mac_lldp_num(vf, vsi, true);
824 }
825
826 ice_vfhw_mac_add(vf, vc_ether_addr);
827
828 return ret;
829 }
830
831 /**
832 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
833 * @last_added_umac: structure used to check expiration
834 */
ice_is_legacy_umac_expired(struct ice_time_mac * last_added_umac)835 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
836 {
837 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
838 return time_is_before_jiffies(last_added_umac->time_modified +
839 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
840 }
841
842 /**
843 * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
844 * @vf: VF to update
845 * @vc_ether_addr: structure from VIRTCHNL with MAC to check
846 *
847 * only update cached hardware MAC for legacy VF drivers on delete
848 * because we cannot guarantee order/type of MAC from the VF driver
849 */
850 static void
ice_update_legacy_cached_mac(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)851 ice_update_legacy_cached_mac(struct ice_vf *vf,
852 struct virtchnl_ether_addr *vc_ether_addr)
853 {
854 if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
855 ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
856 return;
857
858 ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
859 ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
860 }
861
862 /**
863 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
864 * @vf: VF to update
865 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
866 */
867 static void
ice_vfhw_mac_del(struct ice_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)868 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
869 {
870 u8 *mac_addr = vc_ether_addr->addr;
871
872 if (!is_valid_ether_addr(mac_addr) ||
873 !ether_addr_equal(vf->dev_lan_addr, mac_addr))
874 return;
875
876 /* allow the device MAC to be repopulated in the add flow and don't
877 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
878 * to be persistent on VM reboot and across driver unload/load, which
879 * won't work if we clear the hardware MAC here
880 */
881 eth_zero_addr(vf->dev_lan_addr);
882
883 ice_update_legacy_cached_mac(vf, vc_ether_addr);
884 }
885
886 /**
887 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
888 * @vf: pointer to the VF info
889 * @vsi: pointer to the VF's VSI
890 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
891 */
892 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_ether_addr * vc_ether_addr)893 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
894 struct virtchnl_ether_addr *vc_ether_addr)
895 {
896 struct device *dev = ice_pf_to_dev(vf->pf);
897 u8 *mac_addr = vc_ether_addr->addr;
898 int status;
899
900 if (!ice_can_vf_change_mac(vf) &&
901 ether_addr_equal(vf->dev_lan_addr, mac_addr))
902 return 0;
903
904 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
905 if (status == -ENOENT) {
906 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
907 vf->vf_id);
908 return -ENOENT;
909 } else if (status) {
910 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
911 mac_addr, vf->vf_id, status);
912 return -EIO;
913 }
914
915 ice_vfhw_mac_del(vf, vc_ether_addr);
916
917 vf->num_mac--;
918 if (ice_is_mc_lldp_eth_addr(mac_addr))
919 ice_vf_update_mac_lldp_num(vf, vsi, false);
920
921 return 0;
922 }
923
924 /**
925 * ice_vc_handle_mac_addr_msg
926 * @vf: pointer to the VF info
927 * @msg: pointer to the msg buffer
928 * @set: true if MAC filters are being set, false otherwise
929 *
930 * add guest MAC address filter
931 */
932 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)933 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
934 {
935 int (*ice_vc_cfg_mac)
936 (struct ice_vf *vf, struct ice_vsi *vsi,
937 struct virtchnl_ether_addr *virtchnl_ether_addr);
938 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
939 struct virtchnl_ether_addr_list *al =
940 (struct virtchnl_ether_addr_list *)msg;
941 struct ice_pf *pf = vf->pf;
942 enum virtchnl_ops vc_op;
943 struct ice_vsi *vsi;
944 int i;
945
946 if (set) {
947 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
948 ice_vc_cfg_mac = ice_vc_add_mac_addr;
949 } else {
950 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
951 ice_vc_cfg_mac = ice_vc_del_mac_addr;
952 }
953
954 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
955 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
956 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
957 goto handle_mac_exit;
958 }
959
960 /* If this VF is not privileged, then we can't add more than a
961 * limited number of addresses. Check to make sure that the
962 * additions do not push us over the limit.
963 */
964 if (set && !ice_is_vf_trusted(vf) &&
965 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
966 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
967 vf->vf_id);
968 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
969 goto handle_mac_exit;
970 }
971
972 vsi = ice_get_vf_vsi(vf);
973 if (!vsi) {
974 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
975 goto handle_mac_exit;
976 }
977
978 for (i = 0; i < al->num_elements; i++) {
979 u8 *mac_addr = al->list[i].addr;
980 int result;
981
982 if (is_broadcast_ether_addr(mac_addr) ||
983 is_zero_ether_addr(mac_addr))
984 continue;
985
986 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
987 if (result == -EEXIST || result == -ENOENT) {
988 continue;
989 } else if (result) {
990 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
991 goto handle_mac_exit;
992 }
993 }
994
995 handle_mac_exit:
996 /* send the response to the VF */
997 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
998 }
999
1000 /**
1001 * ice_vc_add_mac_addr_msg
1002 * @vf: pointer to the VF info
1003 * @msg: pointer to the msg buffer
1004 *
1005 * add guest MAC address filter
1006 */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)1007 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1008 {
1009 return ice_vc_handle_mac_addr_msg(vf, msg, true);
1010 }
1011
1012 /**
1013 * ice_vc_del_mac_addr_msg
1014 * @vf: pointer to the VF info
1015 * @msg: pointer to the msg buffer
1016 *
1017 * remove guest MAC address filter
1018 */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)1019 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1020 {
1021 return ice_vc_handle_mac_addr_msg(vf, msg, false);
1022 }
1023
1024 /**
1025 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
1026 * @caps: VF driver negotiated capabilities
1027 *
1028 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
1029 */
ice_vf_vlan_offload_ena(u32 caps)1030 static bool ice_vf_vlan_offload_ena(u32 caps)
1031 {
1032 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
1033 }
1034
1035 /**
1036 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
1037 * @vf: VF used to determine if VLAN promiscuous config is allowed
1038 */
ice_is_vlan_promisc_allowed(struct ice_vf * vf)1039 bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
1040 {
1041 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1042 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
1043 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
1044 return true;
1045
1046 return false;
1047 }
1048
1049 /**
1050 * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
1051 * @vf: VF to enable VLAN promisc on
1052 * @vsi: VF's VSI used to enable VLAN promiscuous mode
1053 * @vlan: VLAN used to enable VLAN promiscuous
1054 *
1055 * This function should only be called if VLAN promiscuous mode is allowed,
1056 * which can be determined via ice_is_vlan_promisc_allowed().
1057 */
ice_vf_ena_vlan_promisc(struct ice_vf * vf,struct ice_vsi * vsi,struct ice_vlan * vlan)1058 int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
1059 struct ice_vlan *vlan)
1060 {
1061 u8 promisc_m = 0;
1062 int status;
1063
1064 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1065 promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS;
1066 if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1067 promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS;
1068
1069 if (!promisc_m)
1070 return 0;
1071
1072 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1073 vlan->vid);
1074 if (status && status != -EEXIST)
1075 return status;
1076
1077 return 0;
1078 }
1079
1080 /**
1081 * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
1082 * @vsi: VF's VSI used to disable VLAN promiscuous mode for
1083 * @vlan: VLAN used to disable VLAN promiscuous
1084 *
1085 * This function should only be called if VLAN promiscuous mode is allowed,
1086 * which can be determined via ice_is_vlan_promisc_allowed().
1087 */
ice_vf_dis_vlan_promisc(struct ice_vsi * vsi,struct ice_vlan * vlan)1088 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
1089 {
1090 u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS;
1091 int status;
1092
1093 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
1094 vlan->vid);
1095 if (status && status != -ENOENT)
1096 return status;
1097
1098 return 0;
1099 }
1100
1101 /**
1102 * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
1103 * @vf: VF to check against
1104 * @vsi: VF's VSI
1105 *
1106 * If the VF is trusted then the VF is allowed to add as many VLANs as it
1107 * wants to, so return false.
1108 *
1109 * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
1110 * allowed VLANs for an untrusted VF. Return the result of this comparison.
1111 */
ice_vf_has_max_vlans(struct ice_vf * vf,struct ice_vsi * vsi)1112 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
1113 {
1114 if (ice_is_vf_trusted(vf))
1115 return false;
1116
1117 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
1118 return ((ice_vsi_num_non_zero_vlans(vsi) +
1119 ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
1120 }
1121
1122 /**
1123 * ice_vc_process_vlan_msg
1124 * @vf: pointer to the VF info
1125 * @msg: pointer to the msg buffer
1126 * @add_v: Add VLAN if true, otherwise delete VLAN
1127 *
1128 * Process virtchnl op to add or remove programmed guest VLAN ID
1129 */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)1130 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
1131 {
1132 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1133 struct virtchnl_vlan_filter_list *vfl =
1134 (struct virtchnl_vlan_filter_list *)msg;
1135 struct ice_pf *pf = vf->pf;
1136 bool vlan_promisc = false;
1137 struct ice_vsi *vsi;
1138 struct device *dev;
1139 int status = 0;
1140 int i;
1141
1142 dev = ice_pf_to_dev(pf);
1143 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1144 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1145 goto error_param;
1146 }
1147
1148 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1149 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1150 goto error_param;
1151 }
1152
1153 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
1154 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1155 goto error_param;
1156 }
1157
1158 for (i = 0; i < vfl->num_elements; i++) {
1159 if (vfl->vlan_id[i] >= VLAN_N_VID) {
1160 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1161 dev_err(dev, "invalid VF VLAN id %d\n",
1162 vfl->vlan_id[i]);
1163 goto error_param;
1164 }
1165 }
1166
1167 vsi = ice_get_vf_vsi(vf);
1168 if (!vsi) {
1169 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1170 goto error_param;
1171 }
1172
1173 if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
1174 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1175 vf->vf_id);
1176 /* There is no need to let VF know about being not trusted,
1177 * so we can just return success message here
1178 */
1179 goto error_param;
1180 }
1181
1182 /* in DVM a VF can add/delete inner VLAN filters when
1183 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
1184 */
1185 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
1186 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1187 goto error_param;
1188 }
1189
1190 /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
1191 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
1192 * allow vlan_promisc = true in SVM and if no port VLAN is configured
1193 */
1194 vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
1195 !ice_is_dvm_ena(&pf->hw) &&
1196 !ice_vf_is_port_vlan_ena(vf);
1197
1198 if (add_v) {
1199 for (i = 0; i < vfl->num_elements; i++) {
1200 u16 vid = vfl->vlan_id[i];
1201 struct ice_vlan vlan;
1202
1203 if (ice_vf_has_max_vlans(vf, vsi)) {
1204 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
1205 vf->vf_id);
1206 /* There is no need to let VF know about being
1207 * not trusted, so we can just return success
1208 * message here as well.
1209 */
1210 goto error_param;
1211 }
1212
1213 /* we add VLAN 0 by default for each VF so we can enable
1214 * Tx VLAN anti-spoof without triggering MDD events so
1215 * we don't need to add it again here
1216 */
1217 if (!vid)
1218 continue;
1219
1220 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1221 status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
1222 if (status) {
1223 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1224 goto error_param;
1225 }
1226
1227 /* Enable VLAN filtering on first non-zero VLAN */
1228 if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
1229 if (vf->spoofchk) {
1230 status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
1231 if (status) {
1232 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1233 dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
1234 vid, status);
1235 goto error_param;
1236 }
1237 }
1238 if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
1239 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1240 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
1241 vid, status);
1242 goto error_param;
1243 }
1244 } else if (vlan_promisc) {
1245 status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
1246 if (status) {
1247 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1248 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
1249 vid, status);
1250 }
1251 }
1252 }
1253 } else {
1254 /* In case of non_trusted VF, number of VLAN elements passed
1255 * to PF for removal might be greater than number of VLANs
1256 * filter programmed for that VF - So, use actual number of
1257 * VLANS added earlier with add VLAN opcode. In order to avoid
1258 * removing VLAN that doesn't exist, which result to sending
1259 * erroneous failed message back to the VF
1260 */
1261 int num_vf_vlan;
1262
1263 num_vf_vlan = vsi->num_vlan;
1264 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
1265 u16 vid = vfl->vlan_id[i];
1266 struct ice_vlan vlan;
1267
1268 /* we add VLAN 0 by default for each VF so we can enable
1269 * Tx VLAN anti-spoof without triggering MDD events so
1270 * we don't want a VIRTCHNL request to remove it
1271 */
1272 if (!vid)
1273 continue;
1274
1275 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
1276 status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
1277 if (status) {
1278 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1279 goto error_param;
1280 }
1281
1282 /* Disable VLAN filtering when only VLAN 0 is left */
1283 if (!ice_vsi_has_non_zero_vlans(vsi)) {
1284 vsi->inner_vlan_ops.dis_tx_filtering(vsi);
1285 vsi->inner_vlan_ops.dis_rx_filtering(vsi);
1286 }
1287
1288 if (vlan_promisc)
1289 ice_vf_dis_vlan_promisc(vsi, &vlan);
1290 }
1291 }
1292
1293 error_param:
1294 /* send the response to the VF */
1295 if (add_v)
1296 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
1297 NULL, 0);
1298 else
1299 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
1300 NULL, 0);
1301 }
1302
1303 /**
1304 * ice_vc_add_vlan_msg
1305 * @vf: pointer to the VF info
1306 * @msg: pointer to the msg buffer
1307 *
1308 * Add and program guest VLAN ID
1309 */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)1310 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
1311 {
1312 return ice_vc_process_vlan_msg(vf, msg, true);
1313 }
1314
1315 /**
1316 * ice_vc_remove_vlan_msg
1317 * @vf: pointer to the VF info
1318 * @msg: pointer to the msg buffer
1319 *
1320 * remove programmed guest VLAN ID
1321 */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)1322 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
1323 {
1324 return ice_vc_process_vlan_msg(vf, msg, false);
1325 }
1326
1327 /**
1328 * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
1329 * @vsi: pointer to the VF VSI info
1330 */
ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi * vsi)1331 static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
1332 {
1333 unsigned int i;
1334
1335 ice_for_each_alloc_rxq(vsi, i)
1336 if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
1337 return true;
1338
1339 return false;
1340 }
1341
1342 /**
1343 * ice_vc_ena_vlan_stripping
1344 * @vf: pointer to the VF info
1345 *
1346 * Enable VLAN header stripping for a given VF
1347 */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)1348 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
1349 {
1350 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1351 struct ice_vsi *vsi;
1352
1353 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1354 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1355 goto error_param;
1356 }
1357
1358 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1359 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1360 goto error_param;
1361 }
1362
1363 vsi = ice_get_vf_vsi(vf);
1364 if (!vsi) {
1365 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1366 goto error_param;
1367 }
1368
1369 if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
1370 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1371 else
1372 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1373
1374 error_param:
1375 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
1376 v_ret, NULL, 0);
1377 }
1378
1379 /**
1380 * ice_vc_dis_vlan_stripping
1381 * @vf: pointer to the VF info
1382 *
1383 * Disable VLAN header stripping for a given VF
1384 */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)1385 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
1386 {
1387 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1388 struct ice_vsi *vsi;
1389
1390 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1391 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1392 goto error_param;
1393 }
1394
1395 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
1396 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1397 goto error_param;
1398 }
1399
1400 vsi = ice_get_vf_vsi(vf);
1401 if (!vsi) {
1402 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1403 goto error_param;
1404 }
1405
1406 if (vsi->inner_vlan_ops.dis_stripping(vsi))
1407 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1408 else
1409 vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
1410
1411 error_param:
1412 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
1413 v_ret, NULL, 0);
1414 }
1415
1416 /**
1417 * ice_vc_query_rxdid - query RXDID supported by DDP package
1418 * @vf: pointer to VF info
1419 *
1420 * Called from VF to query a bitmap of supported flexible
1421 * descriptor RXDIDs of a DDP package.
1422 */
ice_vc_query_rxdid(struct ice_vf * vf)1423 static int ice_vc_query_rxdid(struct ice_vf *vf)
1424 {
1425 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1426 struct ice_pf *pf = vf->pf;
1427 u64 rxdid;
1428
1429 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1431 goto err;
1432 }
1433
1434 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
1435 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1436 goto err;
1437 }
1438
1439 rxdid = pf->supported_rxdids;
1440
1441 err:
1442 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
1443 v_ret, (u8 *)&rxdid, sizeof(rxdid));
1444 }
1445
1446 /**
1447 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
1448 * @vf: VF to enable/disable VLAN stripping for on initialization
1449 *
1450 * Set the default for VLAN stripping based on whether a port VLAN is configured
1451 * and the current VLAN mode of the device.
1452 */
ice_vf_init_vlan_stripping(struct ice_vf * vf)1453 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
1454 {
1455 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1456
1457 vf->vlan_strip_ena = 0;
1458
1459 if (!vsi)
1460 return -EINVAL;
1461
1462 /* don't modify stripping if port VLAN is configured in SVM since the
1463 * port VLAN is based on the inner/single VLAN in SVM
1464 */
1465 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
1466 return 0;
1467
1468 if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
1469 int err;
1470
1471 err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
1472 if (!err)
1473 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
1474 return err;
1475 }
1476
1477 return vsi->inner_vlan_ops.dis_stripping(vsi);
1478 }
1479
ice_vc_get_max_vlan_fltrs(struct ice_vf * vf)1480 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
1481 {
1482 if (vf->trusted)
1483 return VLAN_N_VID;
1484 else
1485 return ICE_MAX_VLAN_PER_VF;
1486 }
1487
1488 /**
1489 * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
1490 * @vf: VF that being checked for
1491 *
1492 * When the device is in double VLAN mode, check whether or not the outer VLAN
1493 * is allowed.
1494 */
ice_vf_outer_vlan_not_allowed(struct ice_vf * vf)1495 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
1496 {
1497 if (ice_vf_is_port_vlan_ena(vf))
1498 return true;
1499
1500 return false;
1501 }
1502
1503 /**
1504 * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
1505 * @vf: VF that capabilities are being set for
1506 * @caps: VLAN capabilities to populate
1507 *
1508 * Determine VLAN capabilities support based on whether a port VLAN is
1509 * configured. If a port VLAN is configured then the VF should use the inner
1510 * filtering/offload capabilities since the port VLAN is using the outer VLAN
1511 * capabilies.
1512 */
1513 static void
ice_vc_set_dvm_caps(struct ice_vf * vf,struct virtchnl_vlan_caps * caps)1514 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
1515 {
1516 struct virtchnl_vlan_supported_caps *supported_caps;
1517
1518 if (ice_vf_outer_vlan_not_allowed(vf)) {
1519 /* until support for inner VLAN filtering is added when a port
1520 * VLAN is configured, only support software offloaded inner
1521 * VLANs when a port VLAN is confgured in DVM
1522 */
1523 supported_caps = &caps->filtering.filtering_support;
1524 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1525
1526 supported_caps = &caps->offloads.stripping_support;
1527 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1528 VIRTCHNL_VLAN_TOGGLE |
1529 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1530 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1531
1532 supported_caps = &caps->offloads.insertion_support;
1533 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1534 VIRTCHNL_VLAN_TOGGLE |
1535 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1536 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1537
1538 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
1539 caps->offloads.ethertype_match =
1540 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
1541 } else {
1542 supported_caps = &caps->filtering.filtering_support;
1543 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1544 supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1545 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1546 VIRTCHNL_VLAN_ETHERTYPE_9100 |
1547 VIRTCHNL_VLAN_ETHERTYPE_AND;
1548 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1549 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1550 VIRTCHNL_VLAN_ETHERTYPE_9100;
1551
1552 supported_caps = &caps->offloads.stripping_support;
1553 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
1554 VIRTCHNL_VLAN_ETHERTYPE_8100 |
1555 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1556 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
1557 VIRTCHNL_VLAN_ETHERTYPE_8100 |
1558 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1559 VIRTCHNL_VLAN_ETHERTYPE_9100 |
1560 VIRTCHNL_VLAN_ETHERTYPE_XOR |
1561 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
1562
1563 supported_caps = &caps->offloads.insertion_support;
1564 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
1565 VIRTCHNL_VLAN_ETHERTYPE_8100 |
1566 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1567 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
1568 VIRTCHNL_VLAN_ETHERTYPE_8100 |
1569 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
1570 VIRTCHNL_VLAN_ETHERTYPE_9100 |
1571 VIRTCHNL_VLAN_ETHERTYPE_XOR |
1572 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
1573
1574 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
1575
1576 caps->offloads.ethertype_match =
1577 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
1578 }
1579
1580 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
1581 }
1582
1583 /**
1584 * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
1585 * @vf: VF that capabilities are being set for
1586 * @caps: VLAN capabilities to populate
1587 *
1588 * Determine VLAN capabilities support based on whether a port VLAN is
1589 * configured. If a port VLAN is configured then the VF does not have any VLAN
1590 * filtering or offload capabilities since the port VLAN is using the inner VLAN
1591 * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
1592 * VLAN fitlering and offload capabilities.
1593 */
1594 static void
ice_vc_set_svm_caps(struct ice_vf * vf,struct virtchnl_vlan_caps * caps)1595 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
1596 {
1597 struct virtchnl_vlan_supported_caps *supported_caps;
1598
1599 if (ice_vf_is_port_vlan_ena(vf)) {
1600 supported_caps = &caps->filtering.filtering_support;
1601 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1602 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1603
1604 supported_caps = &caps->offloads.stripping_support;
1605 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1606 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1607
1608 supported_caps = &caps->offloads.insertion_support;
1609 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
1610 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1611
1612 caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
1613 caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
1614 caps->filtering.max_filters = 0;
1615 } else {
1616 supported_caps = &caps->filtering.filtering_support;
1617 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
1618 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1619 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
1620
1621 supported_caps = &caps->offloads.stripping_support;
1622 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1623 VIRTCHNL_VLAN_TOGGLE |
1624 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1625 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1626
1627 supported_caps = &caps->offloads.insertion_support;
1628 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
1629 VIRTCHNL_VLAN_TOGGLE |
1630 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
1631 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
1632
1633 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
1634 caps->offloads.ethertype_match =
1635 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
1636 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
1637 }
1638 }
1639
1640 /**
1641 * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
1642 * @vf: VF to determine VLAN capabilities for
1643 *
1644 * This will only be called if the VF and PF successfully negotiated
1645 * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
1646 *
1647 * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
1648 * is configured or not.
1649 */
ice_vc_get_offload_vlan_v2_caps(struct ice_vf * vf)1650 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
1651 {
1652 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1653 struct virtchnl_vlan_caps *caps = NULL;
1654 int err, len = 0;
1655
1656 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1657 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1658 goto out;
1659 }
1660
1661 caps = kzalloc_obj(*caps);
1662 if (!caps) {
1663 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1664 goto out;
1665 }
1666 len = sizeof(*caps);
1667
1668 if (ice_is_dvm_ena(&vf->pf->hw))
1669 ice_vc_set_dvm_caps(vf, caps);
1670 else
1671 ice_vc_set_svm_caps(vf, caps);
1672
1673 /* store negotiated caps to prevent invalid VF messages */
1674 memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
1675
1676 out:
1677 err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
1678 v_ret, (u8 *)caps, len);
1679 kfree(caps);
1680 return err;
1681 }
1682
1683 /**
1684 * ice_vc_validate_vlan_tpid - validate VLAN TPID
1685 * @filtering_caps: negotiated/supported VLAN filtering capabilities
1686 * @tpid: VLAN TPID used for validation
1687 *
1688 * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
1689 * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
1690 */
ice_vc_validate_vlan_tpid(u16 filtering_caps,u16 tpid)1691 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
1692 {
1693 enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
1694
1695 switch (tpid) {
1696 case ETH_P_8021Q:
1697 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
1698 break;
1699 case ETH_P_8021AD:
1700 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
1701 break;
1702 case ETH_P_QINQ1:
1703 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
1704 break;
1705 }
1706
1707 if (!(filtering_caps & vlan_ethertype))
1708 return false;
1709
1710 return true;
1711 }
1712
1713 /**
1714 * ice_vc_is_valid_vlan - validate the virtchnl_vlan
1715 * @vc_vlan: virtchnl_vlan to validate
1716 *
1717 * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
1718 * false. Otherwise return true.
1719 */
ice_vc_is_valid_vlan(struct virtchnl_vlan * vc_vlan)1720 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
1721 {
1722 if (!vc_vlan->tci || !vc_vlan->tpid)
1723 return false;
1724
1725 return true;
1726 }
1727
1728 /**
1729 * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
1730 * @vfc: negotiated/supported VLAN filtering capabilities
1731 * @vfl: VLAN filter list from VF to validate
1732 *
1733 * Validate all of the filters in the VLAN filter list from the VF. If any of
1734 * the checks fail then return false. Otherwise return true.
1735 */
1736 static bool
ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps * vfc,struct virtchnl_vlan_filter_list_v2 * vfl)1737 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
1738 struct virtchnl_vlan_filter_list_v2 *vfl)
1739 {
1740 u16 i;
1741
1742 if (!vfl->num_elements)
1743 return false;
1744
1745 for (i = 0; i < vfl->num_elements; i++) {
1746 struct virtchnl_vlan_supported_caps *filtering_support =
1747 &vfc->filtering_support;
1748 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1749 struct virtchnl_vlan *outer = &vlan_fltr->outer;
1750 struct virtchnl_vlan *inner = &vlan_fltr->inner;
1751
1752 if ((ice_vc_is_valid_vlan(outer) &&
1753 filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
1754 (ice_vc_is_valid_vlan(inner) &&
1755 filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
1756 return false;
1757
1758 if ((outer->tci_mask &&
1759 !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
1760 (inner->tci_mask &&
1761 !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
1762 return false;
1763
1764 if (((outer->tci & VLAN_PRIO_MASK) &&
1765 !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
1766 ((inner->tci & VLAN_PRIO_MASK) &&
1767 !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
1768 return false;
1769
1770 if ((ice_vc_is_valid_vlan(outer) &&
1771 !ice_vc_validate_vlan_tpid(filtering_support->outer,
1772 outer->tpid)) ||
1773 (ice_vc_is_valid_vlan(inner) &&
1774 !ice_vc_validate_vlan_tpid(filtering_support->inner,
1775 inner->tpid)))
1776 return false;
1777 }
1778
1779 return true;
1780 }
1781
1782 /**
1783 * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
1784 * @vc_vlan: struct virtchnl_vlan to transform
1785 */
ice_vc_to_vlan(struct virtchnl_vlan * vc_vlan)1786 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
1787 {
1788 struct ice_vlan vlan = { 0 };
1789
1790 vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
1791 vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
1792 vlan.tpid = vc_vlan->tpid;
1793
1794 return vlan;
1795 }
1796
1797 /**
1798 * ice_vc_vlan_action - action to perform on the virthcnl_vlan
1799 * @vsi: VF's VSI used to perform the action
1800 * @vlan_action: function to perform the action with (i.e. add/del)
1801 * @vlan: VLAN filter to perform the action with
1802 */
1803 static int
ice_vc_vlan_action(struct ice_vsi * vsi,int (* vlan_action)(struct ice_vsi *,struct ice_vlan *),struct ice_vlan * vlan)1804 ice_vc_vlan_action(struct ice_vsi *vsi,
1805 int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
1806 struct ice_vlan *vlan)
1807 {
1808 int err;
1809
1810 err = vlan_action(vsi, vlan);
1811 if (err)
1812 return err;
1813
1814 return 0;
1815 }
1816
1817 /**
1818 * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
1819 * @vf: VF used to delete the VLAN(s)
1820 * @vsi: VF's VSI used to delete the VLAN(s)
1821 * @vfl: virthchnl filter list used to delete the filters
1822 */
1823 static int
ice_vc_del_vlans(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_vlan_filter_list_v2 * vfl)1824 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
1825 struct virtchnl_vlan_filter_list_v2 *vfl)
1826 {
1827 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
1828 int err;
1829 u16 i;
1830
1831 for (i = 0; i < vfl->num_elements; i++) {
1832 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1833 struct virtchnl_vlan *vc_vlan;
1834
1835 vc_vlan = &vlan_fltr->outer;
1836 if (ice_vc_is_valid_vlan(vc_vlan)) {
1837 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1838
1839 err = ice_vc_vlan_action(vsi,
1840 vsi->outer_vlan_ops.del_vlan,
1841 &vlan);
1842 if (err)
1843 return err;
1844
1845 if (vlan_promisc)
1846 ice_vf_dis_vlan_promisc(vsi, &vlan);
1847
1848 /* Disable VLAN filtering when only VLAN 0 is left */
1849 if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
1850 err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
1851 if (err)
1852 return err;
1853 }
1854 }
1855
1856 vc_vlan = &vlan_fltr->inner;
1857 if (ice_vc_is_valid_vlan(vc_vlan)) {
1858 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1859
1860 err = ice_vc_vlan_action(vsi,
1861 vsi->inner_vlan_ops.del_vlan,
1862 &vlan);
1863 if (err)
1864 return err;
1865
1866 /* no support for VLAN promiscuous on inner VLAN unless
1867 * we are in Single VLAN Mode (SVM)
1868 */
1869 if (!ice_is_dvm_ena(&vsi->back->hw)) {
1870 if (vlan_promisc)
1871 ice_vf_dis_vlan_promisc(vsi, &vlan);
1872
1873 /* Disable VLAN filtering when only VLAN 0 is left */
1874 if (!ice_vsi_has_non_zero_vlans(vsi)) {
1875 err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
1876 if (err)
1877 return err;
1878 }
1879 }
1880 }
1881 }
1882
1883 return 0;
1884 }
1885
1886 /**
1887 * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
1888 * @vf: VF the message was received from
1889 * @msg: message received from the VF
1890 */
ice_vc_remove_vlan_v2_msg(struct ice_vf * vf,u8 * msg)1891 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
1892 {
1893 struct virtchnl_vlan_filter_list_v2 *vfl =
1894 (struct virtchnl_vlan_filter_list_v2 *)msg;
1895 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1896 struct ice_vsi *vsi;
1897
1898 if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
1899 vfl)) {
1900 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1901 goto out;
1902 }
1903
1904 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
1905 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1906 goto out;
1907 }
1908
1909 vsi = ice_get_vf_vsi(vf);
1910 if (!vsi) {
1911 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1912 goto out;
1913 }
1914
1915 if (ice_vc_del_vlans(vf, vsi, vfl))
1916 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1917
1918 out:
1919 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
1920 0);
1921 }
1922
1923 /**
1924 * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
1925 * @vf: VF used to add the VLAN(s)
1926 * @vsi: VF's VSI used to add the VLAN(s)
1927 * @vfl: virthchnl filter list used to add the filters
1928 */
1929 static int
ice_vc_add_vlans(struct ice_vf * vf,struct ice_vsi * vsi,struct virtchnl_vlan_filter_list_v2 * vfl)1930 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
1931 struct virtchnl_vlan_filter_list_v2 *vfl)
1932 {
1933 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
1934 int err;
1935 u16 i;
1936
1937 for (i = 0; i < vfl->num_elements; i++) {
1938 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
1939 struct virtchnl_vlan *vc_vlan;
1940
1941 vc_vlan = &vlan_fltr->outer;
1942 if (ice_vc_is_valid_vlan(vc_vlan)) {
1943 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1944
1945 err = ice_vc_vlan_action(vsi,
1946 vsi->outer_vlan_ops.add_vlan,
1947 &vlan);
1948 if (err)
1949 return err;
1950
1951 if (vlan_promisc) {
1952 err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan);
1953 if (err)
1954 return err;
1955 }
1956
1957 /* Enable VLAN filtering on first non-zero VLAN */
1958 if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
1959 err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
1960 if (err)
1961 return err;
1962 }
1963 }
1964
1965 vc_vlan = &vlan_fltr->inner;
1966 if (ice_vc_is_valid_vlan(vc_vlan)) {
1967 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
1968
1969 err = ice_vc_vlan_action(vsi,
1970 vsi->inner_vlan_ops.add_vlan,
1971 &vlan);
1972 if (err)
1973 return err;
1974
1975 /* no support for VLAN promiscuous on inner VLAN unless
1976 * we are in Single VLAN Mode (SVM)
1977 */
1978 if (!ice_is_dvm_ena(&vsi->back->hw)) {
1979 if (vlan_promisc) {
1980 err = ice_vf_ena_vlan_promisc(vf, vsi,
1981 &vlan);
1982 if (err)
1983 return err;
1984 }
1985
1986 /* Enable VLAN filtering on first non-zero VLAN */
1987 if (vf->spoofchk && vlan.vid) {
1988 err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
1989 if (err)
1990 return err;
1991 }
1992 }
1993 }
1994 }
1995
1996 return 0;
1997 }
1998
1999 /**
2000 * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2001 * @vsi: VF VSI used to get number of existing VLAN filters
2002 * @vfc: negotiated/supported VLAN filtering capabilities
2003 * @vfl: VLAN filter list from VF to validate
2004 *
2005 * Validate all of the filters in the VLAN filter list from the VF during the
2006 * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2007 * Otherwise return true.
2008 */
2009 static bool
ice_vc_validate_add_vlan_filter_list(struct ice_vsi * vsi,struct virtchnl_vlan_filtering_caps * vfc,struct virtchnl_vlan_filter_list_v2 * vfl)2010 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2011 struct virtchnl_vlan_filtering_caps *vfc,
2012 struct virtchnl_vlan_filter_list_v2 *vfl)
2013 {
2014 u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
2015 vfl->num_elements;
2016
2017 if (num_requested_filters > vfc->max_filters)
2018 return false;
2019
2020 return ice_vc_validate_vlan_filter_list(vfc, vfl);
2021 }
2022
2023 /**
2024 * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2025 * @vf: VF the message was received from
2026 * @msg: message received from the VF
2027 */
ice_vc_add_vlan_v2_msg(struct ice_vf * vf,u8 * msg)2028 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2029 {
2030 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2031 struct virtchnl_vlan_filter_list_v2 *vfl =
2032 (struct virtchnl_vlan_filter_list_v2 *)msg;
2033 struct ice_vsi *vsi;
2034
2035 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2036 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2037 goto out;
2038 }
2039
2040 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2041 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2042 goto out;
2043 }
2044
2045 vsi = ice_get_vf_vsi(vf);
2046 if (!vsi) {
2047 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2048 goto out;
2049 }
2050
2051 if (!ice_vc_validate_add_vlan_filter_list(vsi,
2052 &vf->vlan_v2_caps.filtering,
2053 vfl)) {
2054 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2055 goto out;
2056 }
2057
2058 if (ice_vc_add_vlans(vf, vsi, vfl))
2059 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2060
2061 out:
2062 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
2063 0);
2064 }
2065
2066 /**
2067 * ice_vc_valid_vlan_setting - validate VLAN setting
2068 * @negotiated_settings: negotiated VLAN settings during VF init
2069 * @ethertype_setting: ethertype(s) requested for the VLAN setting
2070 */
2071 static bool
ice_vc_valid_vlan_setting(u32 negotiated_settings,u32 ethertype_setting)2072 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
2073 {
2074 if (ethertype_setting && !(negotiated_settings & ethertype_setting))
2075 return false;
2076
2077 /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
2078 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
2079 */
2080 if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
2081 hweight32(ethertype_setting) > 1)
2082 return false;
2083
2084 /* ability to modify the VLAN setting was not negotiated */
2085 if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
2086 return false;
2087
2088 return true;
2089 }
2090
2091 /**
2092 * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
2093 * @caps: negotiated VLAN settings during VF init
2094 * @msg: message to validate
2095 *
2096 * Used to validate any VLAN virtchnl message sent as a
2097 * virtchnl_vlan_setting structure. Validates the message against the
2098 * negotiated/supported caps during VF driver init.
2099 */
2100 static bool
ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps * caps,struct virtchnl_vlan_setting * msg)2101 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
2102 struct virtchnl_vlan_setting *msg)
2103 {
2104 if ((!msg->outer_ethertype_setting &&
2105 !msg->inner_ethertype_setting) ||
2106 (!caps->outer && !caps->inner))
2107 return false;
2108
2109 if (msg->outer_ethertype_setting &&
2110 !ice_vc_valid_vlan_setting(caps->outer,
2111 msg->outer_ethertype_setting))
2112 return false;
2113
2114 if (msg->inner_ethertype_setting &&
2115 !ice_vc_valid_vlan_setting(caps->inner,
2116 msg->inner_ethertype_setting))
2117 return false;
2118
2119 return true;
2120 }
2121
2122 /**
2123 * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
2124 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
2125 * @tpid: VLAN TPID to populate
2126 */
ice_vc_get_tpid(u32 ethertype_setting,u16 * tpid)2127 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
2128 {
2129 switch (ethertype_setting) {
2130 case VIRTCHNL_VLAN_ETHERTYPE_8100:
2131 *tpid = ETH_P_8021Q;
2132 break;
2133 case VIRTCHNL_VLAN_ETHERTYPE_88A8:
2134 *tpid = ETH_P_8021AD;
2135 break;
2136 case VIRTCHNL_VLAN_ETHERTYPE_9100:
2137 *tpid = ETH_P_QINQ1;
2138 break;
2139 default:
2140 *tpid = 0;
2141 return -EINVAL;
2142 }
2143
2144 return 0;
2145 }
2146
2147 /**
2148 * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
2149 * @vsi: VF's VSI used to enable the VLAN offload
2150 * @ena_offload: function used to enable the VLAN offload
2151 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
2152 */
2153 static int
ice_vc_ena_vlan_offload(struct ice_vsi * vsi,int (* ena_offload)(struct ice_vsi * vsi,u16 tpid),u32 ethertype_setting)2154 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
2155 int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
2156 u32 ethertype_setting)
2157 {
2158 u16 tpid;
2159 int err;
2160
2161 err = ice_vc_get_tpid(ethertype_setting, &tpid);
2162 if (err)
2163 return err;
2164
2165 err = ena_offload(vsi, tpid);
2166 if (err)
2167 return err;
2168
2169 return 0;
2170 }
2171
2172 /**
2173 * ice_vc_ena_vlan_stripping_v2_msg
2174 * @vf: VF the message was received from
2175 * @msg: message received from the VF
2176 *
2177 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
2178 */
ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf * vf,u8 * msg)2179 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2180 {
2181 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2182 struct virtchnl_vlan_supported_caps *stripping_support;
2183 struct virtchnl_vlan_setting *strip_msg =
2184 (struct virtchnl_vlan_setting *)msg;
2185 u32 ethertype_setting;
2186 struct ice_vsi *vsi;
2187
2188 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2189 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2190 goto out;
2191 }
2192
2193 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2194 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2195 goto out;
2196 }
2197
2198 vsi = ice_get_vf_vsi(vf);
2199 if (!vsi) {
2200 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2201 goto out;
2202 }
2203
2204 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2205 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2206 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2207 goto out;
2208 }
2209
2210 if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
2211 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2212 goto out;
2213 }
2214
2215 ethertype_setting = strip_msg->outer_ethertype_setting;
2216 if (ethertype_setting) {
2217 if (ice_vc_ena_vlan_offload(vsi,
2218 vsi->outer_vlan_ops.ena_stripping,
2219 ethertype_setting)) {
2220 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2221 goto out;
2222 } else {
2223 enum ice_l2tsel l2tsel =
2224 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
2225
2226 /* PF tells the VF that the outer VLAN tag is always
2227 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2228 * inner is always extracted to
2229 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2230 * support outer stripping so the first tag always ends
2231 * up in L2TAG2_2ND and the second/inner tag, if
2232 * enabled, is extracted in L2TAG1.
2233 */
2234 ice_vsi_update_l2tsel(vsi, l2tsel);
2235
2236 vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
2237 }
2238 }
2239
2240 ethertype_setting = strip_msg->inner_ethertype_setting;
2241 if (ethertype_setting &&
2242 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
2243 ethertype_setting)) {
2244 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2245 goto out;
2246 }
2247
2248 if (ethertype_setting)
2249 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
2250
2251 out:
2252 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
2253 v_ret, NULL, 0);
2254 }
2255
2256 /**
2257 * ice_vc_dis_vlan_stripping_v2_msg
2258 * @vf: VF the message was received from
2259 * @msg: message received from the VF
2260 *
2261 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
2262 */
ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf * vf,u8 * msg)2263 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
2264 {
2265 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2266 struct virtchnl_vlan_supported_caps *stripping_support;
2267 struct virtchnl_vlan_setting *strip_msg =
2268 (struct virtchnl_vlan_setting *)msg;
2269 u32 ethertype_setting;
2270 struct ice_vsi *vsi;
2271
2272 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2273 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2274 goto out;
2275 }
2276
2277 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
2278 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2279 goto out;
2280 }
2281
2282 vsi = ice_get_vf_vsi(vf);
2283 if (!vsi) {
2284 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2285 goto out;
2286 }
2287
2288 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
2289 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
2290 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2291 goto out;
2292 }
2293
2294 ethertype_setting = strip_msg->outer_ethertype_setting;
2295 if (ethertype_setting) {
2296 if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
2297 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2298 goto out;
2299 } else {
2300 enum ice_l2tsel l2tsel =
2301 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
2302
2303 /* PF tells the VF that the outer VLAN tag is always
2304 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
2305 * inner is always extracted to
2306 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
2307 * support inner stripping while outer stripping is
2308 * disabled so that the first and only tag is extracted
2309 * in L2TAG1.
2310 */
2311 ice_vsi_update_l2tsel(vsi, l2tsel);
2312
2313 vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
2314 }
2315 }
2316
2317 ethertype_setting = strip_msg->inner_ethertype_setting;
2318 if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
2319 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2320 goto out;
2321 }
2322
2323 if (ethertype_setting)
2324 vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
2325
2326 out:
2327 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
2328 v_ret, NULL, 0);
2329 }
2330
2331 /**
2332 * ice_vc_ena_vlan_insertion_v2_msg
2333 * @vf: VF the message was received from
2334 * @msg: message received from the VF
2335 *
2336 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
2337 */
ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf * vf,u8 * msg)2338 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
2339 {
2340 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2341 struct virtchnl_vlan_supported_caps *insertion_support;
2342 struct virtchnl_vlan_setting *insertion_msg =
2343 (struct virtchnl_vlan_setting *)msg;
2344 u32 ethertype_setting;
2345 struct ice_vsi *vsi;
2346
2347 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2348 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2349 goto out;
2350 }
2351
2352 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
2353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2354 goto out;
2355 }
2356
2357 vsi = ice_get_vf_vsi(vf);
2358 if (!vsi) {
2359 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2360 goto out;
2361 }
2362
2363 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
2364 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
2365 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2366 goto out;
2367 }
2368
2369 ethertype_setting = insertion_msg->outer_ethertype_setting;
2370 if (ethertype_setting &&
2371 ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
2372 ethertype_setting)) {
2373 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2374 goto out;
2375 }
2376
2377 ethertype_setting = insertion_msg->inner_ethertype_setting;
2378 if (ethertype_setting &&
2379 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
2380 ethertype_setting)) {
2381 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2382 goto out;
2383 }
2384
2385 out:
2386 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
2387 v_ret, NULL, 0);
2388 }
2389
2390 /**
2391 * ice_vc_dis_vlan_insertion_v2_msg
2392 * @vf: VF the message was received from
2393 * @msg: message received from the VF
2394 *
2395 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
2396 */
ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf * vf,u8 * msg)2397 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
2398 {
2399 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2400 struct virtchnl_vlan_supported_caps *insertion_support;
2401 struct virtchnl_vlan_setting *insertion_msg =
2402 (struct virtchnl_vlan_setting *)msg;
2403 u32 ethertype_setting;
2404 struct ice_vsi *vsi;
2405
2406 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2407 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2408 goto out;
2409 }
2410
2411 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
2412 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2413 goto out;
2414 }
2415
2416 vsi = ice_get_vf_vsi(vf);
2417 if (!vsi) {
2418 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2419 goto out;
2420 }
2421
2422 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
2423 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
2424 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2425 goto out;
2426 }
2427
2428 ethertype_setting = insertion_msg->outer_ethertype_setting;
2429 if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
2430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2431 goto out;
2432 }
2433
2434 ethertype_setting = insertion_msg->inner_ethertype_setting;
2435 if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
2436 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2437 goto out;
2438 }
2439
2440 out:
2441 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
2442 v_ret, NULL, 0);
2443 }
2444
ice_vc_get_ptp_cap(struct ice_vf * vf,const struct virtchnl_ptp_caps * msg)2445 static int ice_vc_get_ptp_cap(struct ice_vf *vf,
2446 const struct virtchnl_ptp_caps *msg)
2447 {
2448 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2449 u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
2450 VIRTCHNL_1588_PTP_CAP_READ_PHC;
2451
2452 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
2453 goto err;
2454
2455 v_ret = VIRTCHNL_STATUS_SUCCESS;
2456
2457 if (msg->caps & caps)
2458 vf->ptp_caps = caps;
2459
2460 err:
2461 /* send the response back to the VF */
2462 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
2463 (u8 *)&vf->ptp_caps,
2464 sizeof(struct virtchnl_ptp_caps));
2465 }
2466
ice_vc_get_phc_time(struct ice_vf * vf)2467 static int ice_vc_get_phc_time(struct ice_vf *vf)
2468 {
2469 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2470 struct virtchnl_phc_time *phc_time = NULL;
2471 struct ice_pf *pf = vf->pf;
2472 u32 len = 0;
2473 int ret;
2474
2475 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
2476 goto err;
2477
2478 v_ret = VIRTCHNL_STATUS_SUCCESS;
2479
2480 phc_time = kzalloc_obj(*phc_time);
2481 if (!phc_time) {
2482 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2483 goto err;
2484 }
2485
2486 len = sizeof(*phc_time);
2487
2488 phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
2489
2490 err:
2491 /* send the response back to the VF */
2492 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
2493 (u8 *)phc_time, len);
2494 kfree(phc_time);
2495 return ret;
2496 }
2497
2498 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
2499 .get_ver_msg = ice_vc_get_ver_msg,
2500 .get_vf_res_msg = ice_vc_get_vf_res_msg,
2501 .reset_vf = ice_vc_reset_vf_msg,
2502 .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
2503 .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
2504 .cfg_qs_msg = ice_vc_cfg_qs_msg,
2505 .ena_qs_msg = ice_vc_ena_qs_msg,
2506 .dis_qs_msg = ice_vc_dis_qs_msg,
2507 .request_qs_msg = ice_vc_request_qs_msg,
2508 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
2509 .config_rss_key = ice_vc_config_rss_key,
2510 .config_rss_lut = ice_vc_config_rss_lut,
2511 .config_rss_hfunc = ice_vc_config_rss_hfunc,
2512 .get_stats_msg = ice_vc_get_stats_msg,
2513 .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
2514 .add_vlan_msg = ice_vc_add_vlan_msg,
2515 .remove_vlan_msg = ice_vc_remove_vlan_msg,
2516 .query_rxdid = ice_vc_query_rxdid,
2517 .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
2518 .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
2519 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
2520 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
2521 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
2522 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
2523 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
2524 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
2525 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
2526 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
2527 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
2528 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
2529 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
2530 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
2531 .get_qos_caps = ice_vc_get_qos_caps,
2532 .cfg_q_bw = ice_vc_cfg_q_bw,
2533 .cfg_q_quanta = ice_vc_cfg_q_quanta,
2534 .get_ptp_cap = ice_vc_get_ptp_cap,
2535 .get_phc_time = ice_vc_get_phc_time,
2536 /* If you add a new op here please make sure to add it to
2537 * ice_virtchnl_repr_ops as well.
2538 */
2539 };
2540
2541 /**
2542 * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
2543 * @vf: the VF to switch ops
2544 */
ice_virtchnl_set_dflt_ops(struct ice_vf * vf)2545 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
2546 {
2547 vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
2548 }
2549
2550 /**
2551 * ice_vc_repr_add_mac
2552 * @vf: pointer to VF
2553 * @msg: virtchannel message
2554 *
2555 * When port representors are created, we do not add MAC rule
2556 * to firmware, we store it so that PF could report same
2557 * MAC as VF.
2558 */
ice_vc_repr_add_mac(struct ice_vf * vf,u8 * msg)2559 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
2560 {
2561 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2562 struct virtchnl_ether_addr_list *al =
2563 (struct virtchnl_ether_addr_list *)msg;
2564 struct ice_vsi *vsi;
2565 struct ice_pf *pf;
2566 int i;
2567
2568 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2569 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2570 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2571 goto handle_mac_exit;
2572 }
2573
2574 pf = vf->pf;
2575
2576 vsi = ice_get_vf_vsi(vf);
2577 if (!vsi) {
2578 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2579 goto handle_mac_exit;
2580 }
2581
2582 for (i = 0; i < al->num_elements; i++) {
2583 u8 *mac_addr = al->list[i].addr;
2584
2585 if (!is_unicast_ether_addr(mac_addr) ||
2586 ether_addr_equal(mac_addr, vf->hw_lan_addr))
2587 continue;
2588
2589 if (vf->pf_set_mac) {
2590 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
2591 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2592 goto handle_mac_exit;
2593 }
2594
2595 ice_vfhw_mac_add(vf, &al->list[i]);
2596 break;
2597 }
2598
2599 handle_mac_exit:
2600 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2601 v_ret, NULL, 0);
2602 }
2603
2604 /**
2605 * ice_vc_repr_del_mac - response with success for deleting MAC
2606 * @vf: pointer to VF
2607 * @msg: virtchannel message
2608 *
2609 * Respond with success to not break normal VF flow.
2610 * For legacy VF driver try to update cached MAC address.
2611 */
2612 static int
ice_vc_repr_del_mac(struct ice_vf __always_unused * vf,u8 __always_unused * msg)2613 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
2614 {
2615 struct virtchnl_ether_addr_list *al =
2616 (struct virtchnl_ether_addr_list *)msg;
2617
2618 ice_update_legacy_cached_mac(vf, &al->list[0]);
2619
2620 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2621 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
2622 }
2623
2624 static int
ice_vc_repr_cfg_promiscuous_mode(struct ice_vf * vf,u8 __always_unused * msg)2625 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
2626 {
2627 dev_dbg(ice_pf_to_dev(vf->pf),
2628 "Can't config promiscuous mode in switchdev mode for VF %d\n",
2629 vf->vf_id);
2630 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2631 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2632 NULL, 0);
2633 }
2634
2635 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
2636 .get_ver_msg = ice_vc_get_ver_msg,
2637 .get_vf_res_msg = ice_vc_get_vf_res_msg,
2638 .reset_vf = ice_vc_reset_vf_msg,
2639 .add_mac_addr_msg = ice_vc_repr_add_mac,
2640 .del_mac_addr_msg = ice_vc_repr_del_mac,
2641 .cfg_qs_msg = ice_vc_cfg_qs_msg,
2642 .ena_qs_msg = ice_vc_ena_qs_msg,
2643 .dis_qs_msg = ice_vc_dis_qs_msg,
2644 .request_qs_msg = ice_vc_request_qs_msg,
2645 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
2646 .config_rss_key = ice_vc_config_rss_key,
2647 .config_rss_lut = ice_vc_config_rss_lut,
2648 .config_rss_hfunc = ice_vc_config_rss_hfunc,
2649 .get_stats_msg = ice_vc_get_stats_msg,
2650 .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
2651 .add_vlan_msg = ice_vc_add_vlan_msg,
2652 .remove_vlan_msg = ice_vc_remove_vlan_msg,
2653 .query_rxdid = ice_vc_query_rxdid,
2654 .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
2655 .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
2656 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
2657 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
2658 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
2659 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
2660 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
2661 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
2662 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
2663 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
2664 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
2665 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
2666 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
2667 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
2668 .get_qos_caps = ice_vc_get_qos_caps,
2669 .cfg_q_bw = ice_vc_cfg_q_bw,
2670 .cfg_q_quanta = ice_vc_cfg_q_quanta,
2671 .get_ptp_cap = ice_vc_get_ptp_cap,
2672 .get_phc_time = ice_vc_get_phc_time,
2673 };
2674
2675 /**
2676 * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
2677 * @vf: the VF to switch ops
2678 */
ice_virtchnl_set_repr_ops(struct ice_vf * vf)2679 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
2680 {
2681 vf->virtchnl_ops = &ice_virtchnl_repr_ops;
2682 }
2683
2684 /**
2685 * ice_is_malicious_vf - check if this vf might be overflowing mailbox
2686 * @vf: the VF to check
2687 * @mbxdata: data about the state of the mailbox
2688 *
2689 * Detect if a given VF might be malicious and attempting to overflow the PF
2690 * mailbox. If so, log a warning message and ignore this event.
2691 */
2692 static bool
ice_is_malicious_vf(struct ice_vf * vf,struct ice_mbx_data * mbxdata)2693 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
2694 {
2695 bool report_malvf = false;
2696 struct device *dev;
2697 struct ice_pf *pf;
2698 int status;
2699
2700 pf = vf->pf;
2701 dev = ice_pf_to_dev(pf);
2702
2703 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
2704 return vf->mbx_info.malicious;
2705
2706 /* check to see if we have a newly malicious VF */
2707 status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
2708 &report_malvf);
2709 if (status)
2710 dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
2711 vf->vf_id, vf->dev_lan_addr, status);
2712
2713 if (report_malvf) {
2714 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2715 u8 zero_addr[ETH_ALEN] = {};
2716
2717 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
2718 vf->dev_lan_addr,
2719 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
2720 }
2721
2722 return vf->mbx_info.malicious;
2723 }
2724
2725 /**
2726 * ice_vc_process_vf_msg - Process request from VF
2727 * @pf: pointer to the PF structure
2728 * @event: pointer to the AQ event
2729 * @mbxdata: information used to detect VF attempting mailbox overflow
2730 *
2731 * Called from the common asq/arq handler to process request from VF. When this
2732 * flow is used for devices with hardware VF to PF message queue overflow
2733 * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf
2734 * check is skipped.
2735 */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event,struct ice_mbx_data * mbxdata)2736 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
2737 struct ice_mbx_data *mbxdata)
2738 {
2739 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2740 s16 vf_id = le16_to_cpu(event->desc.retval);
2741 const struct ice_virtchnl_ops *ops;
2742 u16 msglen = event->msg_len;
2743 u8 *msg = event->msg_buf;
2744 struct ice_vf *vf = NULL;
2745 struct device *dev;
2746 int err = 0;
2747
2748 dev = ice_pf_to_dev(pf);
2749
2750 vf = ice_get_vf_by_id(pf, vf_id);
2751 if (!vf) {
2752 dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
2753 vf_id, v_opcode, msglen);
2754 return;
2755 }
2756
2757 mutex_lock(&vf->cfg_lock);
2758
2759 /* Check if the VF is trying to overflow the mailbox */
2760 if (mbxdata && ice_is_malicious_vf(vf, mbxdata))
2761 goto finish;
2762
2763 /* Check if VF is disabled. */
2764 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2765 err = -EPERM;
2766 goto error_handler;
2767 }
2768
2769 ops = vf->virtchnl_ops;
2770
2771 /* Perform basic checks on the msg */
2772 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2773 if (err) {
2774 if (err == VIRTCHNL_STATUS_ERR_PARAM)
2775 err = -EPERM;
2776 else
2777 err = -EINVAL;
2778 }
2779
2780 error_handler:
2781 if (err) {
2782 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2783 NULL, 0);
2784 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2785 vf_id, v_opcode, msglen, err);
2786 goto finish;
2787 }
2788
2789 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
2790 ice_vc_send_msg_to_vf(vf, v_opcode,
2791 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
2792 0);
2793 goto finish;
2794 }
2795
2796 switch (v_opcode) {
2797 case VIRTCHNL_OP_VERSION:
2798 err = ops->get_ver_msg(vf, msg);
2799 break;
2800 case VIRTCHNL_OP_GET_VF_RESOURCES:
2801 err = ops->get_vf_res_msg(vf, msg);
2802 if (ice_vf_init_vlan_stripping(vf))
2803 dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
2804 vf->vf_id);
2805 ice_vc_notify_vf_link_state(vf);
2806 break;
2807 case VIRTCHNL_OP_RESET_VF:
2808 ops->reset_vf(vf);
2809 break;
2810 case VIRTCHNL_OP_ADD_ETH_ADDR:
2811 err = ops->add_mac_addr_msg(vf, msg);
2812 break;
2813 case VIRTCHNL_OP_DEL_ETH_ADDR:
2814 err = ops->del_mac_addr_msg(vf, msg);
2815 break;
2816 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2817 err = ops->cfg_qs_msg(vf, msg);
2818 break;
2819 case VIRTCHNL_OP_ENABLE_QUEUES:
2820 err = ops->ena_qs_msg(vf, msg);
2821 ice_vc_notify_vf_link_state(vf);
2822 break;
2823 case VIRTCHNL_OP_DISABLE_QUEUES:
2824 err = ops->dis_qs_msg(vf, msg);
2825 break;
2826 case VIRTCHNL_OP_REQUEST_QUEUES:
2827 err = ops->request_qs_msg(vf, msg);
2828 break;
2829 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2830 err = ops->cfg_irq_map_msg(vf, msg);
2831 break;
2832 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2833 err = ops->config_rss_key(vf, msg);
2834 break;
2835 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2836 err = ops->config_rss_lut(vf, msg);
2837 break;
2838 case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2839 err = ops->config_rss_hfunc(vf, msg);
2840 break;
2841 case VIRTCHNL_OP_GET_STATS:
2842 err = ops->get_stats_msg(vf, msg);
2843 break;
2844 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2845 err = ops->cfg_promiscuous_mode_msg(vf, msg);
2846 break;
2847 case VIRTCHNL_OP_ADD_VLAN:
2848 err = ops->add_vlan_msg(vf, msg);
2849 break;
2850 case VIRTCHNL_OP_DEL_VLAN:
2851 err = ops->remove_vlan_msg(vf, msg);
2852 break;
2853 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
2854 err = ops->query_rxdid(vf);
2855 break;
2856 case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
2857 err = ops->get_rss_hashcfg(vf);
2858 break;
2859 case VIRTCHNL_OP_SET_RSS_HASHCFG:
2860 err = ops->set_rss_hashcfg(vf, msg);
2861 break;
2862 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2863 err = ops->ena_vlan_stripping(vf);
2864 break;
2865 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2866 err = ops->dis_vlan_stripping(vf);
2867 break;
2868 case VIRTCHNL_OP_ADD_FDIR_FILTER:
2869 err = ops->add_fdir_fltr_msg(vf, msg);
2870 break;
2871 case VIRTCHNL_OP_DEL_FDIR_FILTER:
2872 err = ops->del_fdir_fltr_msg(vf, msg);
2873 break;
2874 case VIRTCHNL_OP_ADD_RSS_CFG:
2875 err = ops->handle_rss_cfg_msg(vf, msg, true);
2876 break;
2877 case VIRTCHNL_OP_DEL_RSS_CFG:
2878 err = ops->handle_rss_cfg_msg(vf, msg, false);
2879 break;
2880 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
2881 err = ops->get_offload_vlan_v2_caps(vf);
2882 break;
2883 case VIRTCHNL_OP_ADD_VLAN_V2:
2884 err = ops->add_vlan_v2_msg(vf, msg);
2885 break;
2886 case VIRTCHNL_OP_DEL_VLAN_V2:
2887 err = ops->remove_vlan_v2_msg(vf, msg);
2888 break;
2889 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
2890 err = ops->ena_vlan_stripping_v2_msg(vf, msg);
2891 break;
2892 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
2893 err = ops->dis_vlan_stripping_v2_msg(vf, msg);
2894 break;
2895 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
2896 err = ops->ena_vlan_insertion_v2_msg(vf, msg);
2897 break;
2898 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
2899 err = ops->dis_vlan_insertion_v2_msg(vf, msg);
2900 break;
2901 case VIRTCHNL_OP_GET_QOS_CAPS:
2902 err = ops->get_qos_caps(vf);
2903 break;
2904 case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2905 err = ops->cfg_q_bw(vf, msg);
2906 break;
2907 case VIRTCHNL_OP_CONFIG_QUANTA:
2908 err = ops->cfg_q_quanta(vf, msg);
2909 break;
2910 case VIRTCHNL_OP_1588_PTP_GET_CAPS:
2911 err = ops->get_ptp_cap(vf, (const void *)msg);
2912 break;
2913 case VIRTCHNL_OP_1588_PTP_GET_TIME:
2914 err = ops->get_phc_time(vf);
2915 break;
2916 case VIRTCHNL_OP_UNKNOWN:
2917 default:
2918 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
2919 vf_id);
2920 err = ice_vc_send_msg_to_vf(vf, v_opcode,
2921 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2922 NULL, 0);
2923 break;
2924 }
2925 if (err) {
2926 /* Helper function cares less about error return values here
2927 * as it is busy with pending work.
2928 */
2929 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
2930 vf_id, v_opcode, err);
2931 }
2932
2933 finish:
2934 mutex_unlock(&vf->cfg_lock);
2935 ice_put_vf(vf);
2936 }
2937