1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2022, Intel Corporation. */ 3 4 #include "ice_virtchnl.h" 5 #include "ice_vf_lib_private.h" 6 #include "ice.h" 7 #include "ice_base.h" 8 #include "ice_lib.h" 9 #include "ice_fltr.h" 10 #include "ice_virtchnl_allowlist.h" 11 #include "ice_vf_vsi_vlan_ops.h" 12 #include "ice_vlan.h" 13 #include "ice_flex_pipe.h" 14 #include "ice_dcb_lib.h" 15 16 #define FIELD_SELECTOR(proto_hdr_field) \ 17 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK) 18 19 struct ice_vc_hdr_match_type { 20 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */ 21 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */ 22 }; 23 24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { 25 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE}, 26 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH}, 27 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 28 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN}, 29 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 | 30 ICE_FLOW_SEG_HDR_IPV_OTHER}, 31 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 | 32 ICE_FLOW_SEG_HDR_IPV_OTHER}, 33 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP}, 34 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP}, 35 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP}, 36 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE}, 37 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP}, 38 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH}, 39 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, 40 ICE_FLOW_SEG_HDR_GTPU_DWN}, 41 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, 42 ICE_FLOW_SEG_HDR_GTPU_UP}, 43 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3}, 44 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, 45 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, 46 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, 47 }; 48 49 struct ice_vc_hash_field_match_type { 50 u32 vc_hdr; /* virtchnl headers 51 * (VIRTCHNL_PROTO_HDR_XXX) 52 */ 53 u32 vc_hash_field; /* virtchnl hash fields selector 54 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX)) 55 */ 56 u64 ice_hash_field; /* ice hash fields 57 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX)) 58 */ 59 }; 60 61 static const struct 62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { 63 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC), 64 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)}, 65 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 66 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)}, 67 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) | 68 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST), 69 ICE_FLOW_HASH_ETH}, 70 {VIRTCHNL_PROTO_HDR_ETH, 71 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE), 72 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)}, 73 {VIRTCHNL_PROTO_HDR_S_VLAN, 74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID), 75 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)}, 76 {VIRTCHNL_PROTO_HDR_C_VLAN, 77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID), 78 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)}, 79 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), 80 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, 81 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, 83 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 84 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), 85 ICE_FLOW_HASH_IPV4}, 86 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 87 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 88 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | 89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 90 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 91 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 92 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | 93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 94 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | 95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | 96 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 97 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 98 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), 99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, 100 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), 101 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, 102 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)}, 104 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 105 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), 106 ICE_FLOW_HASH_IPV6}, 107 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 108 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 109 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | 110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 111 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 113 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) | 114 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 115 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | 116 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | 117 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 118 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 119 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), 120 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, 121 {VIRTCHNL_PROTO_HDR_TCP, 122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), 123 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, 124 {VIRTCHNL_PROTO_HDR_TCP, 125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 126 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)}, 127 {VIRTCHNL_PROTO_HDR_TCP, 128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | 129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), 130 ICE_FLOW_HASH_TCP_PORT}, 131 {VIRTCHNL_PROTO_HDR_UDP, 132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), 133 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, 134 {VIRTCHNL_PROTO_HDR_UDP, 135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 136 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)}, 137 {VIRTCHNL_PROTO_HDR_UDP, 138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | 139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), 140 ICE_FLOW_HASH_UDP_PORT}, 141 {VIRTCHNL_PROTO_HDR_SCTP, 142 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), 143 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, 144 {VIRTCHNL_PROTO_HDR_SCTP, 145 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 146 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)}, 147 {VIRTCHNL_PROTO_HDR_SCTP, 148 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | 149 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), 150 ICE_FLOW_HASH_SCTP_PORT}, 151 {VIRTCHNL_PROTO_HDR_PPPOE, 152 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), 153 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, 154 {VIRTCHNL_PROTO_HDR_GTPU_IP, 155 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID), 156 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)}, 157 {VIRTCHNL_PROTO_HDR_L2TPV3, 158 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID), 159 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)}, 160 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI), 161 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)}, 162 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI), 163 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, 164 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), 165 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, 166 }; 167 168 /** 169 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF 170 * @pf: pointer to the PF structure 171 * @v_opcode: operation code 172 * @v_retval: return value 173 * @msg: pointer to the msg buffer 174 * @msglen: msg length 175 */ 176 static void 177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, 178 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 179 { 180 struct ice_hw *hw = &pf->hw; 181 struct ice_vf *vf; 182 unsigned int bkt; 183 184 mutex_lock(&pf->vfs.table_lock); 185 ice_for_each_vf(pf, bkt, vf) { 186 /* Not all vfs are enabled so skip the ones that are not */ 187 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && 188 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 189 continue; 190 191 /* Ignore return value on purpose - a given VF may fail, but 192 * we need to keep going and send to all of them 193 */ 194 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, 195 msglen, NULL); 196 } 197 mutex_unlock(&pf->vfs.table_lock); 198 } 199 200 /** 201 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event 202 * @vf: pointer to the VF structure 203 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for 204 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* 205 * @link_up: whether or not to set the link up/down 206 */ 207 static void 208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, 209 int ice_link_speed, bool link_up) 210 { 211 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { 212 pfe->event_data.link_event_adv.link_status = link_up; 213 /* Speed in Mbps */ 214 pfe->event_data.link_event_adv.link_speed = 215 ice_conv_link_speed_to_virtchnl(true, ice_link_speed); 216 } else { 217 pfe->event_data.link_event.link_status = link_up; 218 /* Legacy method for virtchnl link speeds */ 219 pfe->event_data.link_event.link_speed = 220 (enum virtchnl_link_speed) 221 ice_conv_link_speed_to_virtchnl(false, ice_link_speed); 222 } 223 } 224 225 /** 226 * ice_vc_notify_vf_link_state - Inform a VF of link status 227 * @vf: pointer to the VF structure 228 * 229 * send a link status message to a single VF 230 */ 231 void ice_vc_notify_vf_link_state(struct ice_vf *vf) 232 { 233 struct virtchnl_pf_event pfe = { 0 }; 234 struct ice_hw *hw = &vf->pf->hw; 235 236 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; 237 pfe.severity = PF_EVENT_SEVERITY_INFO; 238 239 if (ice_is_vf_link_up(vf)) 240 ice_set_pfe_link(vf, &pfe, 241 hw->port_info->phy.link_info.link_speed, true); 242 else 243 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false); 244 245 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 246 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, 247 sizeof(pfe), NULL); 248 } 249 250 /** 251 * ice_vc_notify_link_state - Inform all VFs on a PF of link status 252 * @pf: pointer to the PF structure 253 */ 254 void ice_vc_notify_link_state(struct ice_pf *pf) 255 { 256 struct ice_vf *vf; 257 unsigned int bkt; 258 259 mutex_lock(&pf->vfs.table_lock); 260 ice_for_each_vf(pf, bkt, vf) 261 ice_vc_notify_vf_link_state(vf); 262 mutex_unlock(&pf->vfs.table_lock); 263 } 264 265 /** 266 * ice_vc_notify_reset - Send pending reset message to all VFs 267 * @pf: pointer to the PF structure 268 * 269 * indicate a pending reset to all VFs on a given PF 270 */ 271 void ice_vc_notify_reset(struct ice_pf *pf) 272 { 273 struct virtchnl_pf_event pfe; 274 275 if (!ice_has_vfs(pf)) 276 return; 277 278 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; 279 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; 280 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, 281 (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); 282 } 283 284 /** 285 * ice_vc_send_msg_to_vf - Send message to VF 286 * @vf: pointer to the VF info 287 * @v_opcode: virtual channel opcode 288 * @v_retval: virtual channel return value 289 * @msg: pointer to the msg buffer 290 * @msglen: msg length 291 * 292 * send msg to VF 293 */ 294 int 295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, 296 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) 297 { 298 struct device *dev; 299 struct ice_pf *pf; 300 int aq_ret; 301 302 pf = vf->pf; 303 dev = ice_pf_to_dev(pf); 304 305 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, 306 msg, msglen, NULL); 307 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { 308 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n", 309 vf->vf_id, aq_ret, 310 ice_aq_str(pf->hw.mailboxq.sq_last_status)); 311 return -EIO; 312 } 313 314 return 0; 315 } 316 317 /** 318 * ice_vc_get_ver_msg 319 * @vf: pointer to the VF info 320 * @msg: pointer to the msg buffer 321 * 322 * called from the VF to request the API version used by the PF 323 */ 324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) 325 { 326 struct virtchnl_version_info info = { 327 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR 328 }; 329 330 vf->vf_ver = *(struct virtchnl_version_info *)msg; 331 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ 332 if (VF_IS_V10(&vf->vf_ver)) 333 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; 334 335 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, 336 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info, 337 sizeof(struct virtchnl_version_info)); 338 } 339 340 /** 341 * ice_vc_get_max_frame_size - get max frame size allowed for VF 342 * @vf: VF used to determine max frame size 343 * 344 * Max frame size is determined based on the current port's max frame size and 345 * whether a port VLAN is configured on this VF. The VF is not aware whether 346 * it's in a port VLAN so the PF needs to account for this in max frame size 347 * checks and sending the max frame size to the VF. 348 */ 349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf) 350 { 351 struct ice_port_info *pi = ice_vf_get_port_info(vf); 352 u16 max_frame_size; 353 354 max_frame_size = pi->phy.link_info.max_frame_size; 355 356 if (ice_vf_is_port_vlan_ena(vf)) 357 max_frame_size -= VLAN_HLEN; 358 359 return max_frame_size; 360 } 361 362 /** 363 * ice_vc_get_vlan_caps 364 * @hw: pointer to the hw 365 * @vf: pointer to the VF info 366 * @vsi: pointer to the VSI 367 * @driver_caps: current driver caps 368 * 369 * Return 0 if there is no VLAN caps supported, or VLAN caps value 370 */ 371 static u32 372 ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi, 373 u32 driver_caps) 374 { 375 if (ice_is_eswitch_mode_switchdev(vf->pf)) 376 /* In switchdev setting VLAN from VF isn't supported */ 377 return 0; 378 379 if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { 380 /* VLAN offloads based on current device configuration */ 381 return VIRTCHNL_VF_OFFLOAD_VLAN_V2; 382 } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) { 383 /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for 384 * these two conditions, which amounts to guest VLAN filtering 385 * and offloads being based on the inner VLAN or the 386 * inner/single VLAN respectively and don't allow VF to 387 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases 388 */ 389 if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) { 390 return VIRTCHNL_VF_OFFLOAD_VLAN; 391 } else if (!ice_is_dvm_ena(hw) && 392 !ice_vf_is_port_vlan_ena(vf)) { 393 /* configure backward compatible support for VFs that 394 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is 395 * configured in SVM, and no port VLAN is configured 396 */ 397 ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi); 398 return VIRTCHNL_VF_OFFLOAD_VLAN; 399 } else if (ice_is_dvm_ena(hw)) { 400 /* configure software offloaded VLAN support when DVM 401 * is enabled, but no port VLAN is enabled 402 */ 403 ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi); 404 } 405 } 406 407 return 0; 408 } 409 410 /** 411 * ice_vc_get_vf_res_msg 412 * @vf: pointer to the VF info 413 * @msg: pointer to the msg buffer 414 * 415 * called from the VF to request its resources 416 */ 417 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) 418 { 419 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 420 struct virtchnl_vf_resource *vfres = NULL; 421 struct ice_hw *hw = &vf->pf->hw; 422 struct ice_vsi *vsi; 423 int len = 0; 424 int ret; 425 426 if (ice_check_vf_init(vf)) { 427 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 428 goto err; 429 } 430 431 len = virtchnl_struct_size(vfres, vsi_res, 0); 432 433 vfres = kzalloc(len, GFP_KERNEL); 434 if (!vfres) { 435 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 436 len = 0; 437 goto err; 438 } 439 if (VF_IS_V11(&vf->vf_ver)) 440 vf->driver_caps = *(u32 *)msg; 441 else 442 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | 443 VIRTCHNL_VF_OFFLOAD_RSS_REG | 444 VIRTCHNL_VF_OFFLOAD_VLAN; 445 446 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; 447 vsi = ice_get_vf_vsi(vf); 448 if (!vsi) { 449 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 450 goto err; 451 } 452 453 vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, 454 vf->driver_caps); 455 456 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { 457 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; 458 } else { 459 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) 460 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; 461 else 462 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; 463 } 464 465 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) 466 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; 467 468 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) 469 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; 470 471 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) 472 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; 473 474 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) 475 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; 476 477 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) 478 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; 479 480 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) 481 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; 482 483 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) 484 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; 485 486 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) 487 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; 488 489 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) 490 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC; 491 492 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) 493 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; 494 495 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) 496 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; 497 498 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) 499 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; 500 501 vfres->num_vsis = 1; 502 /* Tx and Rx queue are equal for VF */ 503 vfres->num_queue_pairs = vsi->num_txq; 504 vfres->max_vectors = vf->num_msix; 505 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; 506 vfres->rss_lut_size = ICE_LUT_VSI_SIZE; 507 vfres->max_mtu = ice_vc_get_max_frame_size(vf); 508 509 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; 510 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; 511 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; 512 ether_addr_copy(vfres->vsi_res[0].default_mac_addr, 513 vf->hw_lan_addr); 514 515 /* match guest capabilities */ 516 vf->driver_caps = vfres->vf_cap_flags; 517 518 ice_vc_set_caps_allowlist(vf); 519 ice_vc_set_working_allowlist(vf); 520 521 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 522 523 err: 524 /* send the response back to the VF */ 525 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, 526 (u8 *)vfres, len); 527 528 kfree(vfres); 529 return ret; 530 } 531 532 /** 533 * ice_vc_reset_vf_msg 534 * @vf: pointer to the VF info 535 * 536 * called from the VF to reset itself, 537 * unlike other virtchnl messages, PF driver 538 * doesn't send the response back to the VF 539 */ 540 static void ice_vc_reset_vf_msg(struct ice_vf *vf) 541 { 542 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) 543 ice_reset_vf(vf, 0); 544 } 545 546 /** 547 * ice_vc_isvalid_vsi_id 548 * @vf: pointer to the VF info 549 * @vsi_id: VF relative VSI ID 550 * 551 * check for the valid VSI ID 552 */ 553 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) 554 { 555 struct ice_pf *pf = vf->pf; 556 struct ice_vsi *vsi; 557 558 vsi = ice_find_vsi(pf, vsi_id); 559 560 return (vsi && (vsi->vf == vf)); 561 } 562 563 /** 564 * ice_vc_isvalid_q_id 565 * @vf: pointer to the VF info 566 * @vsi_id: VSI ID 567 * @qid: VSI relative queue ID 568 * 569 * check for the valid queue ID 570 */ 571 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) 572 { 573 struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); 574 /* allocated Tx and Rx queues should be always equal for VF VSI */ 575 return (vsi && (qid < vsi->alloc_txq)); 576 } 577 578 /** 579 * ice_vc_isvalid_ring_len 580 * @ring_len: length of ring 581 * 582 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE 583 * or zero 584 */ 585 static bool ice_vc_isvalid_ring_len(u16 ring_len) 586 { 587 return ring_len == 0 || 588 (ring_len >= ICE_MIN_NUM_DESC && 589 ring_len <= ICE_MAX_NUM_DESC && 590 !(ring_len % ICE_REQ_DESC_MULTIPLE)); 591 } 592 593 /** 594 * ice_vc_validate_pattern 595 * @vf: pointer to the VF info 596 * @proto: virtchnl protocol headers 597 * 598 * validate the pattern is supported or not. 599 * 600 * Return: true on success, false on error. 601 */ 602 bool 603 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto) 604 { 605 bool is_ipv4 = false; 606 bool is_ipv6 = false; 607 bool is_udp = false; 608 u16 ptype = -1; 609 int i = 0; 610 611 while (i < proto->count && 612 proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) { 613 switch (proto->proto_hdr[i].type) { 614 case VIRTCHNL_PROTO_HDR_ETH: 615 ptype = ICE_PTYPE_MAC_PAY; 616 break; 617 case VIRTCHNL_PROTO_HDR_IPV4: 618 ptype = ICE_PTYPE_IPV4_PAY; 619 is_ipv4 = true; 620 break; 621 case VIRTCHNL_PROTO_HDR_IPV6: 622 ptype = ICE_PTYPE_IPV6_PAY; 623 is_ipv6 = true; 624 break; 625 case VIRTCHNL_PROTO_HDR_UDP: 626 if (is_ipv4) 627 ptype = ICE_PTYPE_IPV4_UDP_PAY; 628 else if (is_ipv6) 629 ptype = ICE_PTYPE_IPV6_UDP_PAY; 630 is_udp = true; 631 break; 632 case VIRTCHNL_PROTO_HDR_TCP: 633 if (is_ipv4) 634 ptype = ICE_PTYPE_IPV4_TCP_PAY; 635 else if (is_ipv6) 636 ptype = ICE_PTYPE_IPV6_TCP_PAY; 637 break; 638 case VIRTCHNL_PROTO_HDR_SCTP: 639 if (is_ipv4) 640 ptype = ICE_PTYPE_IPV4_SCTP_PAY; 641 else if (is_ipv6) 642 ptype = ICE_PTYPE_IPV6_SCTP_PAY; 643 break; 644 case VIRTCHNL_PROTO_HDR_GTPU_IP: 645 case VIRTCHNL_PROTO_HDR_GTPU_EH: 646 if (is_ipv4) 647 ptype = ICE_MAC_IPV4_GTPU; 648 else if (is_ipv6) 649 ptype = ICE_MAC_IPV6_GTPU; 650 goto out; 651 case VIRTCHNL_PROTO_HDR_L2TPV3: 652 if (is_ipv4) 653 ptype = ICE_MAC_IPV4_L2TPV3; 654 else if (is_ipv6) 655 ptype = ICE_MAC_IPV6_L2TPV3; 656 goto out; 657 case VIRTCHNL_PROTO_HDR_ESP: 658 if (is_ipv4) 659 ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP : 660 ICE_MAC_IPV4_ESP; 661 else if (is_ipv6) 662 ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP : 663 ICE_MAC_IPV6_ESP; 664 goto out; 665 case VIRTCHNL_PROTO_HDR_AH: 666 if (is_ipv4) 667 ptype = ICE_MAC_IPV4_AH; 668 else if (is_ipv6) 669 ptype = ICE_MAC_IPV6_AH; 670 goto out; 671 case VIRTCHNL_PROTO_HDR_PFCP: 672 if (is_ipv4) 673 ptype = ICE_MAC_IPV4_PFCP_SESSION; 674 else if (is_ipv6) 675 ptype = ICE_MAC_IPV6_PFCP_SESSION; 676 goto out; 677 default: 678 break; 679 } 680 i++; 681 } 682 683 out: 684 return ice_hw_ptype_ena(&vf->pf->hw, ptype); 685 } 686 687 /** 688 * ice_vc_parse_rss_cfg - parses hash fields and headers from 689 * a specific virtchnl RSS cfg 690 * @hw: pointer to the hardware 691 * @rss_cfg: pointer to the virtchnl RSS cfg 692 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*) 693 * to configure 694 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure 695 * 696 * Return true if all the protocol header and hash fields in the RSS cfg could 697 * be parsed, else return false 698 * 699 * This function parses the virtchnl RSS cfg to be the intended 700 * hash fields and the intended header for RSS configuration 701 */ 702 static bool 703 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg, 704 u32 *addl_hdrs, u64 *hash_flds) 705 { 706 const struct ice_vc_hash_field_match_type *hf_list; 707 const struct ice_vc_hdr_match_type *hdr_list; 708 int i, hf_list_len, hdr_list_len; 709 710 hf_list = ice_vc_hash_field_list; 711 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list); 712 hdr_list = ice_vc_hdr_list; 713 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list); 714 715 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { 716 struct virtchnl_proto_hdr *proto_hdr = 717 &rss_cfg->proto_hdrs.proto_hdr[i]; 718 bool hdr_found = false; 719 int j; 720 721 /* Find matched ice headers according to virtchnl headers. */ 722 for (j = 0; j < hdr_list_len; j++) { 723 struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; 724 725 if (proto_hdr->type == hdr_map.vc_hdr) { 726 *addl_hdrs |= hdr_map.ice_hdr; 727 hdr_found = true; 728 } 729 } 730 731 if (!hdr_found) 732 return false; 733 734 /* Find matched ice hash fields according to 735 * virtchnl hash fields. 736 */ 737 for (j = 0; j < hf_list_len; j++) { 738 struct ice_vc_hash_field_match_type hf_map = hf_list[j]; 739 740 if (proto_hdr->type == hf_map.vc_hdr && 741 proto_hdr->field_selector == hf_map.vc_hash_field) { 742 *hash_flds |= hf_map.ice_hash_field; 743 break; 744 } 745 } 746 } 747 748 return true; 749 } 750 751 /** 752 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced 753 * RSS offloads 754 * @caps: VF driver negotiated capabilities 755 * 756 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set, 757 * else return false 758 */ 759 static bool ice_vf_adv_rss_offload_ena(u32 caps) 760 { 761 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF); 762 } 763 764 /** 765 * ice_vc_handle_rss_cfg 766 * @vf: pointer to the VF info 767 * @msg: pointer to the message buffer 768 * @add: add a RSS config if true, otherwise delete a RSS config 769 * 770 * This function adds/deletes a RSS config 771 */ 772 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) 773 { 774 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG; 775 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg; 776 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 777 struct device *dev = ice_pf_to_dev(vf->pf); 778 struct ice_hw *hw = &vf->pf->hw; 779 struct ice_vsi *vsi; 780 781 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 782 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", 783 vf->vf_id); 784 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 785 goto error_param; 786 } 787 788 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) { 789 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n", 790 vf->vf_id); 791 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 792 goto error_param; 793 } 794 795 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 796 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 797 goto error_param; 798 } 799 800 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS || 801 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC || 802 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) { 803 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n", 804 vf->vf_id); 805 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 806 goto error_param; 807 } 808 809 vsi = ice_get_vf_vsi(vf); 810 if (!vsi) { 811 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 812 goto error_param; 813 } 814 815 if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { 816 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 817 goto error_param; 818 } 819 820 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { 821 struct ice_vsi_ctx *ctx; 822 u8 lut_type, hash_type; 823 int status; 824 825 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; 826 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR : 827 ICE_AQ_VSI_Q_OPT_RSS_TPLZ; 828 829 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 830 if (!ctx) { 831 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 832 goto error_param; 833 } 834 835 ctx->info.q_opt_rss = ((lut_type << 836 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & 837 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | 838 (hash_type & 839 ICE_AQ_VSI_Q_OPT_RSS_HASH_M); 840 841 /* Preserve existing queueing option setting */ 842 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & 843 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); 844 ctx->info.q_opt_tc = vsi->info.q_opt_tc; 845 ctx->info.q_opt_flags = vsi->info.q_opt_rss; 846 847 ctx->info.valid_sections = 848 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); 849 850 status = ice_update_vsi(hw, vsi->idx, ctx, NULL); 851 if (status) { 852 dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", 853 status, ice_aq_str(hw->adminq.sq_last_status)); 854 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 855 } else { 856 vsi->info.q_opt_rss = ctx->info.q_opt_rss; 857 } 858 859 kfree(ctx); 860 } else { 861 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE; 862 u64 hash_flds = ICE_HASH_INVALID; 863 864 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs, 865 &hash_flds)) { 866 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 867 goto error_param; 868 } 869 870 if (add) { 871 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds, 872 addl_hdrs)) { 873 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 874 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", 875 vsi->vsi_num, v_ret); 876 } 877 } else { 878 int status; 879 880 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds, 881 addl_hdrs); 882 /* We just ignore -ENOENT, because if two configurations 883 * share the same profile remove one of them actually 884 * removes both, since the profile is deleted. 885 */ 886 if (status && status != -ENOENT) { 887 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 888 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", 889 vf->vf_id, status); 890 } 891 } 892 } 893 894 error_param: 895 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0); 896 } 897 898 /** 899 * ice_vc_config_rss_key 900 * @vf: pointer to the VF info 901 * @msg: pointer to the msg buffer 902 * 903 * Configure the VF's RSS key 904 */ 905 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) 906 { 907 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 908 struct virtchnl_rss_key *vrk = 909 (struct virtchnl_rss_key *)msg; 910 struct ice_vsi *vsi; 911 912 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 913 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 914 goto error_param; 915 } 916 917 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { 918 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 919 goto error_param; 920 } 921 922 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { 923 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 924 goto error_param; 925 } 926 927 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 928 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 929 goto error_param; 930 } 931 932 vsi = ice_get_vf_vsi(vf); 933 if (!vsi) { 934 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 935 goto error_param; 936 } 937 938 if (ice_set_rss_key(vsi, vrk->key)) 939 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 940 error_param: 941 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret, 942 NULL, 0); 943 } 944 945 /** 946 * ice_vc_config_rss_lut 947 * @vf: pointer to the VF info 948 * @msg: pointer to the msg buffer 949 * 950 * Configure the VF's RSS LUT 951 */ 952 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) 953 { 954 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; 955 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 956 struct ice_vsi *vsi; 957 958 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 959 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 960 goto error_param; 961 } 962 963 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { 964 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 965 goto error_param; 966 } 967 968 if (vrl->lut_entries != ICE_LUT_VSI_SIZE) { 969 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 970 goto error_param; 971 } 972 973 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 974 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 975 goto error_param; 976 } 977 978 vsi = ice_get_vf_vsi(vf); 979 if (!vsi) { 980 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 981 goto error_param; 982 } 983 984 if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE)) 985 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 986 error_param: 987 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret, 988 NULL, 0); 989 } 990 991 /** 992 * ice_vc_cfg_promiscuous_mode_msg 993 * @vf: pointer to the VF info 994 * @msg: pointer to the msg buffer 995 * 996 * called from the VF to configure VF VSIs promiscuous mode 997 */ 998 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg) 999 { 1000 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 1001 bool rm_promisc, alluni = false, allmulti = false; 1002 struct virtchnl_promisc_info *info = 1003 (struct virtchnl_promisc_info *)msg; 1004 struct ice_vsi_vlan_ops *vlan_ops; 1005 int mcast_err = 0, ucast_err = 0; 1006 struct ice_pf *pf = vf->pf; 1007 struct ice_vsi *vsi; 1008 u8 mcast_m, ucast_m; 1009 struct device *dev; 1010 int ret = 0; 1011 1012 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 1013 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1014 goto error_param; 1015 } 1016 1017 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) { 1018 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1019 goto error_param; 1020 } 1021 1022 vsi = ice_get_vf_vsi(vf); 1023 if (!vsi) { 1024 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1025 goto error_param; 1026 } 1027 1028 dev = ice_pf_to_dev(pf); 1029 if (!ice_is_vf_trusted(vf)) { 1030 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", 1031 vf->vf_id); 1032 /* Leave v_ret alone, lie to the VF on purpose. */ 1033 goto error_param; 1034 } 1035 1036 if (info->flags & FLAG_VF_UNICAST_PROMISC) 1037 alluni = true; 1038 1039 if (info->flags & FLAG_VF_MULTICAST_PROMISC) 1040 allmulti = true; 1041 1042 rm_promisc = !allmulti && !alluni; 1043 1044 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); 1045 if (rm_promisc) 1046 ret = vlan_ops->ena_rx_filtering(vsi); 1047 else 1048 ret = vlan_ops->dis_rx_filtering(vsi); 1049 if (ret) { 1050 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n"); 1051 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1052 goto error_param; 1053 } 1054 1055 ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m); 1056 1057 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { 1058 if (alluni) { 1059 /* in this case we're turning on promiscuous mode */ 1060 ret = ice_set_dflt_vsi(vsi); 1061 } else { 1062 /* in this case we're turning off promiscuous mode */ 1063 if (ice_is_dflt_vsi_in_use(vsi->port_info)) 1064 ret = ice_clear_dflt_vsi(vsi); 1065 } 1066 1067 /* in this case we're turning on/off only 1068 * allmulticast 1069 */ 1070 if (allmulti) 1071 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); 1072 else 1073 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 1074 1075 if (ret) { 1076 dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n", 1077 vf->vf_id, ret); 1078 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 1079 goto error_param; 1080 } 1081 } else { 1082 if (alluni) 1083 ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m); 1084 else 1085 ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m); 1086 1087 if (allmulti) 1088 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m); 1089 else 1090 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m); 1091 1092 if (ucast_err || mcast_err) 1093 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1094 } 1095 1096 if (!mcast_err) { 1097 if (allmulti && 1098 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) 1099 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", 1100 vf->vf_id); 1101 else if (!allmulti && 1102 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, 1103 vf->vf_states)) 1104 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", 1105 vf->vf_id); 1106 } else { 1107 dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n", 1108 vf->vf_id, mcast_err); 1109 } 1110 1111 if (!ucast_err) { 1112 if (alluni && 1113 !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) 1114 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", 1115 vf->vf_id); 1116 else if (!alluni && 1117 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, 1118 vf->vf_states)) 1119 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", 1120 vf->vf_id); 1121 } else { 1122 dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n", 1123 vf->vf_id, ucast_err); 1124 } 1125 1126 error_param: 1127 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 1128 v_ret, NULL, 0); 1129 } 1130 1131 /** 1132 * ice_vc_get_stats_msg 1133 * @vf: pointer to the VF info 1134 * @msg: pointer to the msg buffer 1135 * 1136 * called from the VF to get VSI stats 1137 */ 1138 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) 1139 { 1140 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 1141 struct virtchnl_queue_select *vqs = 1142 (struct virtchnl_queue_select *)msg; 1143 struct ice_eth_stats stats = { 0 }; 1144 struct ice_vsi *vsi; 1145 1146 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 1147 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1148 goto error_param; 1149 } 1150 1151 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1152 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1153 goto error_param; 1154 } 1155 1156 vsi = ice_get_vf_vsi(vf); 1157 if (!vsi) { 1158 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1159 goto error_param; 1160 } 1161 1162 ice_update_eth_stats(vsi); 1163 1164 stats = vsi->eth_stats; 1165 1166 error_param: 1167 /* send the response to the VF */ 1168 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret, 1169 (u8 *)&stats, sizeof(stats)); 1170 } 1171 1172 /** 1173 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL 1174 * @vqs: virtchnl_queue_select structure containing bitmaps to validate 1175 * 1176 * Return true on successful validation, else false 1177 */ 1178 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs) 1179 { 1180 if ((!vqs->rx_queues && !vqs->tx_queues) || 1181 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) || 1182 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF)) 1183 return false; 1184 1185 return true; 1186 } 1187 1188 /** 1189 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL 1190 * @vsi: VSI of the VF to configure 1191 * @q_idx: VF queue index used to determine the queue in the PF's space 1192 */ 1193 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1194 { 1195 struct ice_hw *hw = &vsi->back->hw; 1196 u32 pfq = vsi->txq_map[q_idx]; 1197 u32 reg; 1198 1199 reg = rd32(hw, QINT_TQCTL(pfq)); 1200 1201 /* MSI-X index 0 in the VF's space is always for the OICR, which means 1202 * this is most likely a poll mode VF driver, so don't enable an 1203 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 1204 */ 1205 if (!(reg & QINT_TQCTL_MSIX_INDX_M)) 1206 return; 1207 1208 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M); 1209 } 1210 1211 /** 1212 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL 1213 * @vsi: VSI of the VF to configure 1214 * @q_idx: VF queue index used to determine the queue in the PF's space 1215 */ 1216 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) 1217 { 1218 struct ice_hw *hw = &vsi->back->hw; 1219 u32 pfq = vsi->rxq_map[q_idx]; 1220 u32 reg; 1221 1222 reg = rd32(hw, QINT_RQCTL(pfq)); 1223 1224 /* MSI-X index 0 in the VF's space is always for the OICR, which means 1225 * this is most likely a poll mode VF driver, so don't enable an 1226 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP 1227 */ 1228 if (!(reg & QINT_RQCTL_MSIX_INDX_M)) 1229 return; 1230 1231 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M); 1232 } 1233 1234 /** 1235 * ice_vc_ena_qs_msg 1236 * @vf: pointer to the VF info 1237 * @msg: pointer to the msg buffer 1238 * 1239 * called from the VF to enable all or specific queue(s) 1240 */ 1241 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) 1242 { 1243 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 1244 struct virtchnl_queue_select *vqs = 1245 (struct virtchnl_queue_select *)msg; 1246 struct ice_vsi *vsi; 1247 unsigned long q_map; 1248 u16 vf_q_id; 1249 1250 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 1251 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1252 goto error_param; 1253 } 1254 1255 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1256 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1257 goto error_param; 1258 } 1259 1260 if (!ice_vc_validate_vqs_bitmaps(vqs)) { 1261 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1262 goto error_param; 1263 } 1264 1265 vsi = ice_get_vf_vsi(vf); 1266 if (!vsi) { 1267 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1268 goto error_param; 1269 } 1270 1271 /* Enable only Rx rings, Tx rings were enabled by the FW when the 1272 * Tx queue group list was configured and the context bits were 1273 * programmed using ice_vsi_cfg_txqs 1274 */ 1275 q_map = vqs->rx_queues; 1276 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 1277 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 1278 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1279 goto error_param; 1280 } 1281 1282 /* Skip queue if enabled */ 1283 if (test_bit(vf_q_id, vf->rxq_ena)) 1284 continue; 1285 1286 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) { 1287 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n", 1288 vf_q_id, vsi->vsi_num); 1289 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1290 goto error_param; 1291 } 1292 1293 ice_vf_ena_rxq_interrupt(vsi, vf_q_id); 1294 set_bit(vf_q_id, vf->rxq_ena); 1295 } 1296 1297 q_map = vqs->tx_queues; 1298 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 1299 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 1300 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1301 goto error_param; 1302 } 1303 1304 /* Skip queue if enabled */ 1305 if (test_bit(vf_q_id, vf->txq_ena)) 1306 continue; 1307 1308 ice_vf_ena_txq_interrupt(vsi, vf_q_id); 1309 set_bit(vf_q_id, vf->txq_ena); 1310 } 1311 1312 /* Set flag to indicate that queues are enabled */ 1313 if (v_ret == VIRTCHNL_STATUS_SUCCESS) 1314 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 1315 1316 error_param: 1317 /* send the response to the VF */ 1318 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret, 1319 NULL, 0); 1320 } 1321 1322 /** 1323 * ice_vf_vsi_dis_single_txq - disable a single Tx queue 1324 * @vf: VF to disable queue for 1325 * @vsi: VSI for the VF 1326 * @q_id: VF relative (0-based) queue ID 1327 * 1328 * Attempt to disable the Tx queue passed in. If the Tx queue was successfully 1329 * disabled then clear q_id bit in the enabled queues bitmap and return 1330 * success. Otherwise return error. 1331 */ 1332 static int 1333 ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id) 1334 { 1335 struct ice_txq_meta txq_meta = { 0 }; 1336 struct ice_tx_ring *ring; 1337 int err; 1338 1339 if (!test_bit(q_id, vf->txq_ena)) 1340 dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", 1341 q_id, vsi->vsi_num); 1342 1343 ring = vsi->tx_rings[q_id]; 1344 if (!ring) 1345 return -EINVAL; 1346 1347 ice_fill_txq_meta(vsi, ring, &txq_meta); 1348 1349 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta); 1350 if (err) { 1351 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", 1352 q_id, vsi->vsi_num); 1353 return err; 1354 } 1355 1356 /* Clear enabled queues flag */ 1357 clear_bit(q_id, vf->txq_ena); 1358 1359 return 0; 1360 } 1361 1362 /** 1363 * ice_vc_dis_qs_msg 1364 * @vf: pointer to the VF info 1365 * @msg: pointer to the msg buffer 1366 * 1367 * called from the VF to disable all or specific queue(s) 1368 */ 1369 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) 1370 { 1371 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 1372 struct virtchnl_queue_select *vqs = 1373 (struct virtchnl_queue_select *)msg; 1374 struct ice_vsi *vsi; 1375 unsigned long q_map; 1376 u16 vf_q_id; 1377 1378 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && 1379 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) { 1380 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1381 goto error_param; 1382 } 1383 1384 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { 1385 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1386 goto error_param; 1387 } 1388 1389 if (!ice_vc_validate_vqs_bitmaps(vqs)) { 1390 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1391 goto error_param; 1392 } 1393 1394 vsi = ice_get_vf_vsi(vf); 1395 if (!vsi) { 1396 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1397 goto error_param; 1398 } 1399 1400 if (vqs->tx_queues) { 1401 q_map = vqs->tx_queues; 1402 1403 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 1404 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 1405 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1406 goto error_param; 1407 } 1408 1409 if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) { 1410 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1411 goto error_param; 1412 } 1413 } 1414 } 1415 1416 q_map = vqs->rx_queues; 1417 /* speed up Rx queue disable by batching them if possible */ 1418 if (q_map && 1419 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) { 1420 if (ice_vsi_stop_all_rx_rings(vsi)) { 1421 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n", 1422 vsi->vsi_num); 1423 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1424 goto error_param; 1425 } 1426 1427 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); 1428 } else if (q_map) { 1429 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { 1430 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { 1431 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1432 goto error_param; 1433 } 1434 1435 /* Skip queue if not enabled */ 1436 if (!test_bit(vf_q_id, vf->rxq_ena)) 1437 continue; 1438 1439 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id, 1440 true)) { 1441 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n", 1442 vf_q_id, vsi->vsi_num); 1443 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1444 goto error_param; 1445 } 1446 1447 /* Clear enabled queues flag */ 1448 clear_bit(vf_q_id, vf->rxq_ena); 1449 } 1450 } 1451 1452 /* Clear enabled queues flag */ 1453 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf)) 1454 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states); 1455 1456 error_param: 1457 /* send the response to the VF */ 1458 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret, 1459 NULL, 0); 1460 } 1461 1462 /** 1463 * ice_cfg_interrupt 1464 * @vf: pointer to the VF info 1465 * @vsi: the VSI being configured 1466 * @vector_id: vector ID 1467 * @map: vector map for mapping vectors to queues 1468 * @q_vector: structure for interrupt vector 1469 * configure the IRQ to queue map 1470 */ 1471 static int 1472 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, 1473 struct virtchnl_vector_map *map, 1474 struct ice_q_vector *q_vector) 1475 { 1476 u16 vsi_q_id, vsi_q_id_idx; 1477 unsigned long qmap; 1478 1479 q_vector->num_ring_rx = 0; 1480 q_vector->num_ring_tx = 0; 1481 1482 qmap = map->rxq_map; 1483 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 1484 vsi_q_id = vsi_q_id_idx; 1485 1486 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 1487 return VIRTCHNL_STATUS_ERR_PARAM; 1488 1489 q_vector->num_ring_rx++; 1490 q_vector->rx.itr_idx = map->rxitr_idx; 1491 vsi->rx_rings[vsi_q_id]->q_vector = q_vector; 1492 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, 1493 q_vector->rx.itr_idx); 1494 } 1495 1496 qmap = map->txq_map; 1497 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { 1498 vsi_q_id = vsi_q_id_idx; 1499 1500 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) 1501 return VIRTCHNL_STATUS_ERR_PARAM; 1502 1503 q_vector->num_ring_tx++; 1504 q_vector->tx.itr_idx = map->txitr_idx; 1505 vsi->tx_rings[vsi_q_id]->q_vector = q_vector; 1506 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, 1507 q_vector->tx.itr_idx); 1508 } 1509 1510 return VIRTCHNL_STATUS_SUCCESS; 1511 } 1512 1513 /** 1514 * ice_vc_cfg_irq_map_msg 1515 * @vf: pointer to the VF info 1516 * @msg: pointer to the msg buffer 1517 * 1518 * called from the VF to configure the IRQ to queue map 1519 */ 1520 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) 1521 { 1522 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 1523 u16 num_q_vectors_mapped, vsi_id, vector_id; 1524 struct virtchnl_irq_map_info *irqmap_info; 1525 struct virtchnl_vector_map *map; 1526 struct ice_pf *pf = vf->pf; 1527 struct ice_vsi *vsi; 1528 int i; 1529 1530 irqmap_info = (struct virtchnl_irq_map_info *)msg; 1531 num_q_vectors_mapped = irqmap_info->num_vectors; 1532 1533 /* Check to make sure number of VF vectors mapped is not greater than 1534 * number of VF vectors originally allocated, and check that 1535 * there is actually at least a single VF queue vector mapped 1536 */ 1537 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 1538 pf->vfs.num_msix_per < num_q_vectors_mapped || 1539 !num_q_vectors_mapped) { 1540 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1541 goto error_param; 1542 } 1543 1544 vsi = ice_get_vf_vsi(vf); 1545 if (!vsi) { 1546 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1547 goto error_param; 1548 } 1549 1550 for (i = 0; i < num_q_vectors_mapped; i++) { 1551 struct ice_q_vector *q_vector; 1552 1553 map = &irqmap_info->vecmap[i]; 1554 1555 vector_id = map->vector_id; 1556 vsi_id = map->vsi_id; 1557 /* vector_id is always 0-based for each VF, and can never be 1558 * larger than or equal to the max allowed interrupts per VF 1559 */ 1560 if (!(vector_id < pf->vfs.num_msix_per) || 1561 !ice_vc_isvalid_vsi_id(vf, vsi_id) || 1562 (!vector_id && (map->rxq_map || map->txq_map))) { 1563 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1564 goto error_param; 1565 } 1566 1567 /* No need to map VF miscellaneous or rogue vector */ 1568 if (!vector_id) 1569 continue; 1570 1571 /* Subtract non queue vector from vector_id passed by VF 1572 * to get actual number of VSI queue vector array index 1573 */ 1574 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF]; 1575 if (!q_vector) { 1576 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 1577 goto error_param; 1578 } 1579 1580 /* lookout for the invalid queue index */ 1581 v_ret = (enum virtchnl_status_code) 1582 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); 1583 if (v_ret) 1584 goto error_param; 1585 } 1586 1587 error_param: 1588 /* send the response to the VF */ 1589 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, 1590 NULL, 0); 1591 } 1592 1593 /** 1594 * ice_vc_cfg_qs_msg 1595 * @vf: pointer to the VF info 1596 * @msg: pointer to the msg buffer 1597 * 1598 * called from the VF to configure the Rx/Tx queues 1599 */ 1600 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) 1601 { 1602 struct virtchnl_vsi_queue_config_info *qci = 1603 (struct virtchnl_vsi_queue_config_info *)msg; 1604 struct virtchnl_queue_pair_info *qpi; 1605 struct ice_pf *pf = vf->pf; 1606 struct ice_lag *lag; 1607 struct ice_vsi *vsi; 1608 u8 act_prt, pri_prt; 1609 int i = -1, q_idx; 1610 1611 lag = pf->lag; 1612 mutex_lock(&pf->lag_mutex); 1613 act_prt = ICE_LAG_INVALID_PORT; 1614 pri_prt = pf->hw.port_info->lport; 1615 if (lag && lag->bonded && lag->primary) { 1616 act_prt = lag->active_port; 1617 if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT && 1618 lag->upper_netdev) 1619 ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt); 1620 else 1621 act_prt = ICE_LAG_INVALID_PORT; 1622 } 1623 1624 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 1625 goto error_param; 1626 1627 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) 1628 goto error_param; 1629 1630 vsi = ice_get_vf_vsi(vf); 1631 if (!vsi) 1632 goto error_param; 1633 1634 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || 1635 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { 1636 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", 1637 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); 1638 goto error_param; 1639 } 1640 1641 for (i = 0; i < qci->num_queue_pairs; i++) { 1642 if (!qci->qpair[i].rxq.crc_disable) 1643 continue; 1644 1645 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) || 1646 vf->vlan_strip_ena) 1647 goto error_param; 1648 } 1649 1650 for (i = 0; i < qci->num_queue_pairs; i++) { 1651 qpi = &qci->qpair[i]; 1652 if (qpi->txq.vsi_id != qci->vsi_id || 1653 qpi->rxq.vsi_id != qci->vsi_id || 1654 qpi->rxq.queue_id != qpi->txq.queue_id || 1655 qpi->txq.headwb_enabled || 1656 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || 1657 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || 1658 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { 1659 goto error_param; 1660 } 1661 1662 q_idx = qpi->rxq.queue_id; 1663 1664 /* make sure selected "q_idx" is in valid range of queues 1665 * for selected "vsi" 1666 */ 1667 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { 1668 goto error_param; 1669 } 1670 1671 /* copy Tx queue info from VF into VSI */ 1672 if (qpi->txq.ring_len > 0) { 1673 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; 1674 vsi->tx_rings[i]->count = qpi->txq.ring_len; 1675 1676 /* Disable any existing queue first */ 1677 if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) 1678 goto error_param; 1679 1680 /* Configure a queue with the requested settings */ 1681 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { 1682 dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", 1683 vf->vf_id, i); 1684 goto error_param; 1685 } 1686 } 1687 1688 /* copy Rx queue info from VF into VSI */ 1689 if (qpi->rxq.ring_len > 0) { 1690 u16 max_frame_size = ice_vc_get_max_frame_size(vf); 1691 u32 rxdid; 1692 1693 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; 1694 vsi->rx_rings[i]->count = qpi->rxq.ring_len; 1695 1696 if (qpi->rxq.crc_disable) 1697 vsi->rx_rings[q_idx]->flags |= 1698 ICE_RX_FLAGS_CRC_STRIP_DIS; 1699 else 1700 vsi->rx_rings[q_idx]->flags &= 1701 ~ICE_RX_FLAGS_CRC_STRIP_DIS; 1702 1703 if (qpi->rxq.databuffer_size != 0 && 1704 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || 1705 qpi->rxq.databuffer_size < 1024)) 1706 goto error_param; 1707 vsi->rx_buf_len = qpi->rxq.databuffer_size; 1708 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; 1709 if (qpi->rxq.max_pkt_size > max_frame_size || 1710 qpi->rxq.max_pkt_size < 64) 1711 goto error_param; 1712 1713 vsi->max_frame = qpi->rxq.max_pkt_size; 1714 /* add space for the port VLAN since the VF driver is 1715 * not expected to account for it in the MTU 1716 * calculation 1717 */ 1718 if (ice_vf_is_port_vlan_ena(vf)) 1719 vsi->max_frame += VLAN_HLEN; 1720 1721 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { 1722 dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", 1723 vf->vf_id, i); 1724 goto error_param; 1725 } 1726 1727 /* If Rx flex desc is supported, select RXDID for Rx 1728 * queues. Otherwise, use legacy 32byte descriptor 1729 * format. Legacy 16byte descriptor is not supported. 1730 * If this RXDID is selected, return error. 1731 */ 1732 if (vf->driver_caps & 1733 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { 1734 rxdid = qpi->rxq.rxdid; 1735 if (!(BIT(rxdid) & pf->supported_rxdids)) 1736 goto error_param; 1737 } else { 1738 rxdid = ICE_RXDID_LEGACY_1; 1739 } 1740 1741 ice_write_qrxflxp_cntxt(&vsi->back->hw, 1742 vsi->rxq_map[q_idx], 1743 rxdid, 0x03, false); 1744 } 1745 } 1746 1747 if (lag && lag->bonded && lag->primary && 1748 act_prt != ICE_LAG_INVALID_PORT) 1749 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 1750 mutex_unlock(&pf->lag_mutex); 1751 1752 /* send the response to the VF */ 1753 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1754 VIRTCHNL_STATUS_SUCCESS, NULL, 0); 1755 error_param: 1756 /* disable whatever we can */ 1757 for (; i >= 0; i--) { 1758 if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true)) 1759 dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n", 1760 vf->vf_id, i); 1761 if (ice_vf_vsi_dis_single_txq(vf, vsi, i)) 1762 dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", 1763 vf->vf_id, i); 1764 } 1765 1766 if (lag && lag->bonded && lag->primary && 1767 act_prt != ICE_LAG_INVALID_PORT) 1768 ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt); 1769 mutex_unlock(&pf->lag_mutex); 1770 1771 ice_lag_move_new_vf_nodes(vf); 1772 1773 /* send the response to the VF */ 1774 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, 1775 VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); 1776 } 1777 1778 /** 1779 * ice_can_vf_change_mac 1780 * @vf: pointer to the VF info 1781 * 1782 * Return true if the VF is allowed to change its MAC filters, false otherwise 1783 */ 1784 static bool ice_can_vf_change_mac(struct ice_vf *vf) 1785 { 1786 /* If the VF MAC address has been set administratively (via the 1787 * ndo_set_vf_mac command), then deny permission to the VF to 1788 * add/delete unicast MAC addresses, unless the VF is trusted 1789 */ 1790 if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) 1791 return false; 1792 1793 return true; 1794 } 1795 1796 /** 1797 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr 1798 * @vc_ether_addr: used to extract the type 1799 */ 1800 static u8 1801 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr) 1802 { 1803 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK); 1804 } 1805 1806 /** 1807 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF 1808 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 1809 */ 1810 static bool 1811 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr) 1812 { 1813 u8 type = ice_vc_ether_addr_type(vc_ether_addr); 1814 1815 return (type == VIRTCHNL_ETHER_ADDR_LEGACY); 1816 } 1817 1818 /** 1819 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC 1820 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type 1821 * 1822 * This function should only be called when the MAC address in 1823 * virtchnl_ether_addr is a valid unicast MAC 1824 */ 1825 static bool 1826 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr) 1827 { 1828 u8 type = ice_vc_ether_addr_type(vc_ether_addr); 1829 1830 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY); 1831 } 1832 1833 /** 1834 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed 1835 * @vf: VF to update 1836 * @vc_ether_addr: structure from VIRTCHNL with MAC to add 1837 */ 1838 static void 1839 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 1840 { 1841 u8 *mac_addr = vc_ether_addr->addr; 1842 1843 if (!is_valid_ether_addr(mac_addr)) 1844 return; 1845 1846 /* only allow legacy VF drivers to set the device and hardware MAC if it 1847 * is zero and allow new VF drivers to set the hardware MAC if the type 1848 * was correctly specified over VIRTCHNL 1849 */ 1850 if ((ice_is_vc_addr_legacy(vc_ether_addr) && 1851 is_zero_ether_addr(vf->hw_lan_addr)) || 1852 ice_is_vc_addr_primary(vc_ether_addr)) { 1853 ether_addr_copy(vf->dev_lan_addr, mac_addr); 1854 ether_addr_copy(vf->hw_lan_addr, mac_addr); 1855 } 1856 1857 /* hardware and device MACs are already set, but its possible that the 1858 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the 1859 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it 1860 * away for the legacy VF driver case as it will be updated in the 1861 * delete flow for this case 1862 */ 1863 if (ice_is_vc_addr_legacy(vc_ether_addr)) { 1864 ether_addr_copy(vf->legacy_last_added_umac.addr, 1865 mac_addr); 1866 vf->legacy_last_added_umac.time_modified = jiffies; 1867 } 1868 } 1869 1870 /** 1871 * ice_vc_add_mac_addr - attempt to add the MAC address passed in 1872 * @vf: pointer to the VF info 1873 * @vsi: pointer to the VF's VSI 1874 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC 1875 */ 1876 static int 1877 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 1878 struct virtchnl_ether_addr *vc_ether_addr) 1879 { 1880 struct device *dev = ice_pf_to_dev(vf->pf); 1881 u8 *mac_addr = vc_ether_addr->addr; 1882 int ret; 1883 1884 /* device MAC already added */ 1885 if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) 1886 return 0; 1887 1888 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { 1889 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); 1890 return -EPERM; 1891 } 1892 1893 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 1894 if (ret == -EEXIST) { 1895 dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr, 1896 vf->vf_id); 1897 /* don't return since we might need to update 1898 * the primary MAC in ice_vfhw_mac_add() below 1899 */ 1900 } else if (ret) { 1901 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n", 1902 mac_addr, vf->vf_id, ret); 1903 return ret; 1904 } else { 1905 vf->num_mac++; 1906 } 1907 1908 ice_vfhw_mac_add(vf, vc_ether_addr); 1909 1910 return ret; 1911 } 1912 1913 /** 1914 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired 1915 * @last_added_umac: structure used to check expiration 1916 */ 1917 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac) 1918 { 1919 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000) 1920 return time_is_before_jiffies(last_added_umac->time_modified + 1921 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME); 1922 } 1923 1924 /** 1925 * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF 1926 * @vf: VF to update 1927 * @vc_ether_addr: structure from VIRTCHNL with MAC to check 1928 * 1929 * only update cached hardware MAC for legacy VF drivers on delete 1930 * because we cannot guarantee order/type of MAC from the VF driver 1931 */ 1932 static void 1933 ice_update_legacy_cached_mac(struct ice_vf *vf, 1934 struct virtchnl_ether_addr *vc_ether_addr) 1935 { 1936 if (!ice_is_vc_addr_legacy(vc_ether_addr) || 1937 ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) 1938 return; 1939 1940 ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr); 1941 ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr); 1942 } 1943 1944 /** 1945 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed 1946 * @vf: VF to update 1947 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete 1948 */ 1949 static void 1950 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) 1951 { 1952 u8 *mac_addr = vc_ether_addr->addr; 1953 1954 if (!is_valid_ether_addr(mac_addr) || 1955 !ether_addr_equal(vf->dev_lan_addr, mac_addr)) 1956 return; 1957 1958 /* allow the device MAC to be repopulated in the add flow and don't 1959 * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant 1960 * to be persistent on VM reboot and across driver unload/load, which 1961 * won't work if we clear the hardware MAC here 1962 */ 1963 eth_zero_addr(vf->dev_lan_addr); 1964 1965 ice_update_legacy_cached_mac(vf, vc_ether_addr); 1966 } 1967 1968 /** 1969 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in 1970 * @vf: pointer to the VF info 1971 * @vsi: pointer to the VF's VSI 1972 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC 1973 */ 1974 static int 1975 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, 1976 struct virtchnl_ether_addr *vc_ether_addr) 1977 { 1978 struct device *dev = ice_pf_to_dev(vf->pf); 1979 u8 *mac_addr = vc_ether_addr->addr; 1980 int status; 1981 1982 if (!ice_can_vf_change_mac(vf) && 1983 ether_addr_equal(vf->dev_lan_addr, mac_addr)) 1984 return 0; 1985 1986 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI); 1987 if (status == -ENOENT) { 1988 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr, 1989 vf->vf_id); 1990 return -ENOENT; 1991 } else if (status) { 1992 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n", 1993 mac_addr, vf->vf_id, status); 1994 return -EIO; 1995 } 1996 1997 ice_vfhw_mac_del(vf, vc_ether_addr); 1998 1999 vf->num_mac--; 2000 2001 return 0; 2002 } 2003 2004 /** 2005 * ice_vc_handle_mac_addr_msg 2006 * @vf: pointer to the VF info 2007 * @msg: pointer to the msg buffer 2008 * @set: true if MAC filters are being set, false otherwise 2009 * 2010 * add guest MAC address filter 2011 */ 2012 static int 2013 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) 2014 { 2015 int (*ice_vc_cfg_mac) 2016 (struct ice_vf *vf, struct ice_vsi *vsi, 2017 struct virtchnl_ether_addr *virtchnl_ether_addr); 2018 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2019 struct virtchnl_ether_addr_list *al = 2020 (struct virtchnl_ether_addr_list *)msg; 2021 struct ice_pf *pf = vf->pf; 2022 enum virtchnl_ops vc_op; 2023 struct ice_vsi *vsi; 2024 int i; 2025 2026 if (set) { 2027 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; 2028 ice_vc_cfg_mac = ice_vc_add_mac_addr; 2029 } else { 2030 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; 2031 ice_vc_cfg_mac = ice_vc_del_mac_addr; 2032 } 2033 2034 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 2035 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 2036 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2037 goto handle_mac_exit; 2038 } 2039 2040 /* If this VF is not privileged, then we can't add more than a 2041 * limited number of addresses. Check to make sure that the 2042 * additions do not push us over the limit. 2043 */ 2044 if (set && !ice_is_vf_trusted(vf) && 2045 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { 2046 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", 2047 vf->vf_id); 2048 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2049 goto handle_mac_exit; 2050 } 2051 2052 vsi = ice_get_vf_vsi(vf); 2053 if (!vsi) { 2054 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2055 goto handle_mac_exit; 2056 } 2057 2058 for (i = 0; i < al->num_elements; i++) { 2059 u8 *mac_addr = al->list[i].addr; 2060 int result; 2061 2062 if (is_broadcast_ether_addr(mac_addr) || 2063 is_zero_ether_addr(mac_addr)) 2064 continue; 2065 2066 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]); 2067 if (result == -EEXIST || result == -ENOENT) { 2068 continue; 2069 } else if (result) { 2070 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; 2071 goto handle_mac_exit; 2072 } 2073 } 2074 2075 handle_mac_exit: 2076 /* send the response to the VF */ 2077 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); 2078 } 2079 2080 /** 2081 * ice_vc_add_mac_addr_msg 2082 * @vf: pointer to the VF info 2083 * @msg: pointer to the msg buffer 2084 * 2085 * add guest MAC address filter 2086 */ 2087 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) 2088 { 2089 return ice_vc_handle_mac_addr_msg(vf, msg, true); 2090 } 2091 2092 /** 2093 * ice_vc_del_mac_addr_msg 2094 * @vf: pointer to the VF info 2095 * @msg: pointer to the msg buffer 2096 * 2097 * remove guest MAC address filter 2098 */ 2099 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) 2100 { 2101 return ice_vc_handle_mac_addr_msg(vf, msg, false); 2102 } 2103 2104 /** 2105 * ice_vc_request_qs_msg 2106 * @vf: pointer to the VF info 2107 * @msg: pointer to the msg buffer 2108 * 2109 * VFs get a default number of queues but can use this message to request a 2110 * different number. If the request is successful, PF will reset the VF and 2111 * return 0. If unsuccessful, PF will send message informing VF of number of 2112 * available queue pairs via virtchnl message response to VF. 2113 */ 2114 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) 2115 { 2116 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2117 struct virtchnl_vf_res_request *vfres = 2118 (struct virtchnl_vf_res_request *)msg; 2119 u16 req_queues = vfres->num_queue_pairs; 2120 struct ice_pf *pf = vf->pf; 2121 u16 max_allowed_vf_queues; 2122 u16 tx_rx_queue_left; 2123 struct device *dev; 2124 u16 cur_queues; 2125 2126 dev = ice_pf_to_dev(pf); 2127 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2128 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2129 goto error_param; 2130 } 2131 2132 cur_queues = vf->num_vf_qs; 2133 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), 2134 ice_get_avail_rxq_count(pf)); 2135 max_allowed_vf_queues = tx_rx_queue_left + cur_queues; 2136 if (!req_queues) { 2137 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", 2138 vf->vf_id); 2139 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) { 2140 dev_err(dev, "VF %d tried to request more than %d queues.\n", 2141 vf->vf_id, ICE_MAX_RSS_QS_PER_VF); 2142 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF; 2143 } else if (req_queues > cur_queues && 2144 req_queues - cur_queues > tx_rx_queue_left) { 2145 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", 2146 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); 2147 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, 2148 ICE_MAX_RSS_QS_PER_VF); 2149 } else { 2150 /* request is successful, then reset VF */ 2151 vf->num_req_qs = req_queues; 2152 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); 2153 dev_info(dev, "VF %d granted request of %u queues.\n", 2154 vf->vf_id, req_queues); 2155 return 0; 2156 } 2157 2158 error_param: 2159 /* send the response to the VF */ 2160 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 2161 v_ret, (u8 *)vfres, sizeof(*vfres)); 2162 } 2163 2164 /** 2165 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads 2166 * @caps: VF driver negotiated capabilities 2167 * 2168 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false 2169 */ 2170 static bool ice_vf_vlan_offload_ena(u32 caps) 2171 { 2172 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN); 2173 } 2174 2175 /** 2176 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed 2177 * @vf: VF used to determine if VLAN promiscuous config is allowed 2178 */ 2179 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) 2180 { 2181 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || 2182 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) && 2183 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags)) 2184 return true; 2185 2186 return false; 2187 } 2188 2189 /** 2190 * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN 2191 * @vsi: VF's VSI used to enable VLAN promiscuous mode 2192 * @vlan: VLAN used to enable VLAN promiscuous 2193 * 2194 * This function should only be called if VLAN promiscuous mode is allowed, 2195 * which can be determined via ice_is_vlan_promisc_allowed(). 2196 */ 2197 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 2198 { 2199 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 2200 int status; 2201 2202 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 2203 vlan->vid); 2204 if (status && status != -EEXIST) 2205 return status; 2206 2207 return 0; 2208 } 2209 2210 /** 2211 * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN 2212 * @vsi: VF's VSI used to disable VLAN promiscuous mode for 2213 * @vlan: VLAN used to disable VLAN promiscuous 2214 * 2215 * This function should only be called if VLAN promiscuous mode is allowed, 2216 * which can be determined via ice_is_vlan_promisc_allowed(). 2217 */ 2218 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) 2219 { 2220 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; 2221 int status; 2222 2223 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 2224 vlan->vid); 2225 if (status && status != -ENOENT) 2226 return status; 2227 2228 return 0; 2229 } 2230 2231 /** 2232 * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters 2233 * @vf: VF to check against 2234 * @vsi: VF's VSI 2235 * 2236 * If the VF is trusted then the VF is allowed to add as many VLANs as it 2237 * wants to, so return false. 2238 * 2239 * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max 2240 * allowed VLANs for an untrusted VF. Return the result of this comparison. 2241 */ 2242 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi) 2243 { 2244 if (ice_is_vf_trusted(vf)) 2245 return false; 2246 2247 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1 2248 return ((ice_vsi_num_non_zero_vlans(vsi) + 2249 ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF); 2250 } 2251 2252 /** 2253 * ice_vc_process_vlan_msg 2254 * @vf: pointer to the VF info 2255 * @msg: pointer to the msg buffer 2256 * @add_v: Add VLAN if true, otherwise delete VLAN 2257 * 2258 * Process virtchnl op to add or remove programmed guest VLAN ID 2259 */ 2260 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) 2261 { 2262 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2263 struct virtchnl_vlan_filter_list *vfl = 2264 (struct virtchnl_vlan_filter_list *)msg; 2265 struct ice_pf *pf = vf->pf; 2266 bool vlan_promisc = false; 2267 struct ice_vsi *vsi; 2268 struct device *dev; 2269 int status = 0; 2270 int i; 2271 2272 dev = ice_pf_to_dev(pf); 2273 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2274 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2275 goto error_param; 2276 } 2277 2278 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 2279 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2280 goto error_param; 2281 } 2282 2283 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { 2284 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2285 goto error_param; 2286 } 2287 2288 for (i = 0; i < vfl->num_elements; i++) { 2289 if (vfl->vlan_id[i] >= VLAN_N_VID) { 2290 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2291 dev_err(dev, "invalid VF VLAN id %d\n", 2292 vfl->vlan_id[i]); 2293 goto error_param; 2294 } 2295 } 2296 2297 vsi = ice_get_vf_vsi(vf); 2298 if (!vsi) { 2299 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2300 goto error_param; 2301 } 2302 2303 if (add_v && ice_vf_has_max_vlans(vf, vsi)) { 2304 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 2305 vf->vf_id); 2306 /* There is no need to let VF know about being not trusted, 2307 * so we can just return success message here 2308 */ 2309 goto error_param; 2310 } 2311 2312 /* in DVM a VF can add/delete inner VLAN filters when 2313 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM 2314 */ 2315 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) { 2316 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2317 goto error_param; 2318 } 2319 2320 /* in DVM VLAN promiscuous is based on the outer VLAN, which would be 2321 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only 2322 * allow vlan_promisc = true in SVM and if no port VLAN is configured 2323 */ 2324 vlan_promisc = ice_is_vlan_promisc_allowed(vf) && 2325 !ice_is_dvm_ena(&pf->hw) && 2326 !ice_vf_is_port_vlan_ena(vf); 2327 2328 if (add_v) { 2329 for (i = 0; i < vfl->num_elements; i++) { 2330 u16 vid = vfl->vlan_id[i]; 2331 struct ice_vlan vlan; 2332 2333 if (ice_vf_has_max_vlans(vf, vsi)) { 2334 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", 2335 vf->vf_id); 2336 /* There is no need to let VF know about being 2337 * not trusted, so we can just return success 2338 * message here as well. 2339 */ 2340 goto error_param; 2341 } 2342 2343 /* we add VLAN 0 by default for each VF so we can enable 2344 * Tx VLAN anti-spoof without triggering MDD events so 2345 * we don't need to add it again here 2346 */ 2347 if (!vid) 2348 continue; 2349 2350 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 2351 status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan); 2352 if (status) { 2353 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2354 goto error_param; 2355 } 2356 2357 /* Enable VLAN filtering on first non-zero VLAN */ 2358 if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) { 2359 if (vf->spoofchk) { 2360 status = vsi->inner_vlan_ops.ena_tx_filtering(vsi); 2361 if (status) { 2362 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2363 dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n", 2364 vid, status); 2365 goto error_param; 2366 } 2367 } 2368 if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) { 2369 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2370 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", 2371 vid, status); 2372 goto error_param; 2373 } 2374 } else if (vlan_promisc) { 2375 status = ice_vf_ena_vlan_promisc(vsi, &vlan); 2376 if (status) { 2377 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2378 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", 2379 vid, status); 2380 } 2381 } 2382 } 2383 } else { 2384 /* In case of non_trusted VF, number of VLAN elements passed 2385 * to PF for removal might be greater than number of VLANs 2386 * filter programmed for that VF - So, use actual number of 2387 * VLANS added earlier with add VLAN opcode. In order to avoid 2388 * removing VLAN that doesn't exist, which result to sending 2389 * erroneous failed message back to the VF 2390 */ 2391 int num_vf_vlan; 2392 2393 num_vf_vlan = vsi->num_vlan; 2394 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) { 2395 u16 vid = vfl->vlan_id[i]; 2396 struct ice_vlan vlan; 2397 2398 /* we add VLAN 0 by default for each VF so we can enable 2399 * Tx VLAN anti-spoof without triggering MDD events so 2400 * we don't want a VIRTCHNL request to remove it 2401 */ 2402 if (!vid) 2403 continue; 2404 2405 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0); 2406 status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan); 2407 if (status) { 2408 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2409 goto error_param; 2410 } 2411 2412 /* Disable VLAN filtering when only VLAN 0 is left */ 2413 if (!ice_vsi_has_non_zero_vlans(vsi)) { 2414 vsi->inner_vlan_ops.dis_tx_filtering(vsi); 2415 vsi->inner_vlan_ops.dis_rx_filtering(vsi); 2416 } 2417 2418 if (vlan_promisc) 2419 ice_vf_dis_vlan_promisc(vsi, &vlan); 2420 } 2421 } 2422 2423 error_param: 2424 /* send the response to the VF */ 2425 if (add_v) 2426 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret, 2427 NULL, 0); 2428 else 2429 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret, 2430 NULL, 0); 2431 } 2432 2433 /** 2434 * ice_vc_add_vlan_msg 2435 * @vf: pointer to the VF info 2436 * @msg: pointer to the msg buffer 2437 * 2438 * Add and program guest VLAN ID 2439 */ 2440 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) 2441 { 2442 return ice_vc_process_vlan_msg(vf, msg, true); 2443 } 2444 2445 /** 2446 * ice_vc_remove_vlan_msg 2447 * @vf: pointer to the VF info 2448 * @msg: pointer to the msg buffer 2449 * 2450 * remove programmed guest VLAN ID 2451 */ 2452 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) 2453 { 2454 return ice_vc_process_vlan_msg(vf, msg, false); 2455 } 2456 2457 /** 2458 * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not 2459 * @vsi: pointer to the VF VSI info 2460 */ 2461 static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi) 2462 { 2463 unsigned int i; 2464 2465 ice_for_each_alloc_rxq(vsi, i) 2466 if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS) 2467 return true; 2468 2469 return false; 2470 } 2471 2472 /** 2473 * ice_vc_ena_vlan_stripping 2474 * @vf: pointer to the VF info 2475 * 2476 * Enable VLAN header stripping for a given VF 2477 */ 2478 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) 2479 { 2480 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2481 struct ice_vsi *vsi; 2482 2483 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2484 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2485 goto error_param; 2486 } 2487 2488 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 2489 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2490 goto error_param; 2491 } 2492 2493 vsi = ice_get_vf_vsi(vf); 2494 if (!vsi) { 2495 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2496 goto error_param; 2497 } 2498 2499 if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) 2500 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2501 else 2502 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; 2503 2504 error_param: 2505 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, 2506 v_ret, NULL, 0); 2507 } 2508 2509 /** 2510 * ice_vc_dis_vlan_stripping 2511 * @vf: pointer to the VF info 2512 * 2513 * Disable VLAN header stripping for a given VF 2514 */ 2515 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) 2516 { 2517 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2518 struct ice_vsi *vsi; 2519 2520 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2521 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2522 goto error_param; 2523 } 2524 2525 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { 2526 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2527 goto error_param; 2528 } 2529 2530 vsi = ice_get_vf_vsi(vf); 2531 if (!vsi) { 2532 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2533 goto error_param; 2534 } 2535 2536 if (vsi->inner_vlan_ops.dis_stripping(vsi)) 2537 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2538 else 2539 vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; 2540 2541 error_param: 2542 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 2543 v_ret, NULL, 0); 2544 } 2545 2546 /** 2547 * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware 2548 * @vf: pointer to the VF info 2549 */ 2550 static int ice_vc_get_rss_hena(struct ice_vf *vf) 2551 { 2552 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2553 struct virtchnl_rss_hena *vrh = NULL; 2554 int len = 0, ret; 2555 2556 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2557 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2558 goto err; 2559 } 2560 2561 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { 2562 dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n"); 2563 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2564 goto err; 2565 } 2566 2567 len = sizeof(struct virtchnl_rss_hena); 2568 vrh = kzalloc(len, GFP_KERNEL); 2569 if (!vrh) { 2570 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2571 len = 0; 2572 goto err; 2573 } 2574 2575 vrh->hena = ICE_DEFAULT_RSS_HENA; 2576 err: 2577 /* send the response back to the VF */ 2578 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret, 2579 (u8 *)vrh, len); 2580 kfree(vrh); 2581 return ret; 2582 } 2583 2584 /** 2585 * ice_vc_set_rss_hena - set RSS HENA bits for the VF 2586 * @vf: pointer to the VF info 2587 * @msg: pointer to the msg buffer 2588 */ 2589 static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) 2590 { 2591 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg; 2592 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2593 struct ice_pf *pf = vf->pf; 2594 struct ice_vsi *vsi; 2595 struct device *dev; 2596 int status; 2597 2598 dev = ice_pf_to_dev(pf); 2599 2600 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2601 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2602 goto err; 2603 } 2604 2605 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { 2606 dev_err(dev, "RSS not supported by PF\n"); 2607 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2608 goto err; 2609 } 2610 2611 vsi = ice_get_vf_vsi(vf); 2612 if (!vsi) { 2613 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2614 goto err; 2615 } 2616 2617 /* clear all previously programmed RSS configuration to allow VF drivers 2618 * the ability to customize the RSS configuration and/or completely 2619 * disable RSS 2620 */ 2621 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx); 2622 if (status && !vrh->hena) { 2623 /* only report failure to clear the current RSS configuration if 2624 * that was clearly the VF's intention (i.e. vrh->hena = 0) 2625 */ 2626 v_ret = ice_err_to_virt_err(status); 2627 goto err; 2628 } else if (status) { 2629 /* allow the VF to update the RSS configuration even on failure 2630 * to clear the current RSS confguration in an attempt to keep 2631 * RSS in a working state 2632 */ 2633 dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n", 2634 vf->vf_id); 2635 } 2636 2637 if (vrh->hena) { 2638 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena); 2639 v_ret = ice_err_to_virt_err(status); 2640 } 2641 2642 /* send the response to the VF */ 2643 err: 2644 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret, 2645 NULL, 0); 2646 } 2647 2648 /** 2649 * ice_vc_query_rxdid - query RXDID supported by DDP package 2650 * @vf: pointer to VF info 2651 * 2652 * Called from VF to query a bitmap of supported flexible 2653 * descriptor RXDIDs of a DDP package. 2654 */ 2655 static int ice_vc_query_rxdid(struct ice_vf *vf) 2656 { 2657 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2658 struct virtchnl_supported_rxdids *rxdid = NULL; 2659 struct ice_hw *hw = &vf->pf->hw; 2660 struct ice_pf *pf = vf->pf; 2661 int len = 0; 2662 int ret, i; 2663 u32 regval; 2664 2665 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2666 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2667 goto err; 2668 } 2669 2670 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) { 2671 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2672 goto err; 2673 } 2674 2675 len = sizeof(struct virtchnl_supported_rxdids); 2676 rxdid = kzalloc(len, GFP_KERNEL); 2677 if (!rxdid) { 2678 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2679 len = 0; 2680 goto err; 2681 } 2682 2683 /* RXDIDs supported by DDP package can be read from the register 2684 * to get the supported RXDID bitmap. But the legacy 32byte RXDID 2685 * is not listed in DDP package, add it in the bitmap manually. 2686 * Legacy 16byte descriptor is not supported. 2687 */ 2688 rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); 2689 2690 for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { 2691 regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); 2692 if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) 2693 & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) 2694 rxdid->supported_rxdids |= BIT(i); 2695 } 2696 2697 pf->supported_rxdids = rxdid->supported_rxdids; 2698 2699 err: 2700 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, 2701 v_ret, (u8 *)rxdid, len); 2702 kfree(rxdid); 2703 return ret; 2704 } 2705 2706 /** 2707 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization 2708 * @vf: VF to enable/disable VLAN stripping for on initialization 2709 * 2710 * Set the default for VLAN stripping based on whether a port VLAN is configured 2711 * and the current VLAN mode of the device. 2712 */ 2713 static int ice_vf_init_vlan_stripping(struct ice_vf *vf) 2714 { 2715 struct ice_vsi *vsi = ice_get_vf_vsi(vf); 2716 2717 vf->vlan_strip_ena = 0; 2718 2719 if (!vsi) 2720 return -EINVAL; 2721 2722 /* don't modify stripping if port VLAN is configured in SVM since the 2723 * port VLAN is based on the inner/single VLAN in SVM 2724 */ 2725 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw)) 2726 return 0; 2727 2728 if (ice_vf_vlan_offload_ena(vf->driver_caps)) { 2729 int err; 2730 2731 err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q); 2732 if (!err) 2733 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; 2734 return err; 2735 } 2736 2737 return vsi->inner_vlan_ops.dis_stripping(vsi); 2738 } 2739 2740 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf) 2741 { 2742 if (vf->trusted) 2743 return VLAN_N_VID; 2744 else 2745 return ICE_MAX_VLAN_PER_VF; 2746 } 2747 2748 /** 2749 * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used 2750 * @vf: VF that being checked for 2751 * 2752 * When the device is in double VLAN mode, check whether or not the outer VLAN 2753 * is allowed. 2754 */ 2755 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf) 2756 { 2757 if (ice_vf_is_port_vlan_ena(vf)) 2758 return true; 2759 2760 return false; 2761 } 2762 2763 /** 2764 * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM 2765 * @vf: VF that capabilities are being set for 2766 * @caps: VLAN capabilities to populate 2767 * 2768 * Determine VLAN capabilities support based on whether a port VLAN is 2769 * configured. If a port VLAN is configured then the VF should use the inner 2770 * filtering/offload capabilities since the port VLAN is using the outer VLAN 2771 * capabilies. 2772 */ 2773 static void 2774 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 2775 { 2776 struct virtchnl_vlan_supported_caps *supported_caps; 2777 2778 if (ice_vf_outer_vlan_not_allowed(vf)) { 2779 /* until support for inner VLAN filtering is added when a port 2780 * VLAN is configured, only support software offloaded inner 2781 * VLANs when a port VLAN is confgured in DVM 2782 */ 2783 supported_caps = &caps->filtering.filtering_support; 2784 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 2785 2786 supported_caps = &caps->offloads.stripping_support; 2787 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2788 VIRTCHNL_VLAN_TOGGLE | 2789 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2790 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2791 2792 supported_caps = &caps->offloads.insertion_support; 2793 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2794 VIRTCHNL_VLAN_TOGGLE | 2795 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2796 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2797 2798 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 2799 caps->offloads.ethertype_match = 2800 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 2801 } else { 2802 supported_caps = &caps->filtering.filtering_support; 2803 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 2804 supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2805 VIRTCHNL_VLAN_ETHERTYPE_88A8 | 2806 VIRTCHNL_VLAN_ETHERTYPE_9100 | 2807 VIRTCHNL_VLAN_ETHERTYPE_AND; 2808 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2809 VIRTCHNL_VLAN_ETHERTYPE_88A8 | 2810 VIRTCHNL_VLAN_ETHERTYPE_9100; 2811 2812 supported_caps = &caps->offloads.stripping_support; 2813 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 2814 VIRTCHNL_VLAN_ETHERTYPE_8100 | 2815 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2816 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 2817 VIRTCHNL_VLAN_ETHERTYPE_8100 | 2818 VIRTCHNL_VLAN_ETHERTYPE_88A8 | 2819 VIRTCHNL_VLAN_ETHERTYPE_9100 | 2820 VIRTCHNL_VLAN_ETHERTYPE_XOR | 2821 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2; 2822 2823 supported_caps = &caps->offloads.insertion_support; 2824 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE | 2825 VIRTCHNL_VLAN_ETHERTYPE_8100 | 2826 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2827 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE | 2828 VIRTCHNL_VLAN_ETHERTYPE_8100 | 2829 VIRTCHNL_VLAN_ETHERTYPE_88A8 | 2830 VIRTCHNL_VLAN_ETHERTYPE_9100 | 2831 VIRTCHNL_VLAN_ETHERTYPE_XOR | 2832 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2; 2833 2834 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 2835 2836 caps->offloads.ethertype_match = 2837 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 2838 } 2839 2840 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 2841 } 2842 2843 /** 2844 * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM 2845 * @vf: VF that capabilities are being set for 2846 * @caps: VLAN capabilities to populate 2847 * 2848 * Determine VLAN capabilities support based on whether a port VLAN is 2849 * configured. If a port VLAN is configured then the VF does not have any VLAN 2850 * filtering or offload capabilities since the port VLAN is using the inner VLAN 2851 * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner 2852 * VLAN fitlering and offload capabilities. 2853 */ 2854 static void 2855 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps) 2856 { 2857 struct virtchnl_vlan_supported_caps *supported_caps; 2858 2859 if (ice_vf_is_port_vlan_ena(vf)) { 2860 supported_caps = &caps->filtering.filtering_support; 2861 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 2862 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2863 2864 supported_caps = &caps->offloads.stripping_support; 2865 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 2866 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2867 2868 supported_caps = &caps->offloads.insertion_support; 2869 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED; 2870 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2871 2872 caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED; 2873 caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED; 2874 caps->filtering.max_filters = 0; 2875 } else { 2876 supported_caps = &caps->filtering.filtering_support; 2877 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100; 2878 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2879 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 2880 2881 supported_caps = &caps->offloads.stripping_support; 2882 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2883 VIRTCHNL_VLAN_TOGGLE | 2884 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2885 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2886 2887 supported_caps = &caps->offloads.insertion_support; 2888 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 | 2889 VIRTCHNL_VLAN_TOGGLE | 2890 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1; 2891 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED; 2892 2893 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100; 2894 caps->offloads.ethertype_match = 2895 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; 2896 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf); 2897 } 2898 } 2899 2900 /** 2901 * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities 2902 * @vf: VF to determine VLAN capabilities for 2903 * 2904 * This will only be called if the VF and PF successfully negotiated 2905 * VIRTCHNL_VF_OFFLOAD_VLAN_V2. 2906 * 2907 * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN 2908 * is configured or not. 2909 */ 2910 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf) 2911 { 2912 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 2913 struct virtchnl_vlan_caps *caps = NULL; 2914 int err, len = 0; 2915 2916 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 2917 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 2918 goto out; 2919 } 2920 2921 caps = kzalloc(sizeof(*caps), GFP_KERNEL); 2922 if (!caps) { 2923 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; 2924 goto out; 2925 } 2926 len = sizeof(*caps); 2927 2928 if (ice_is_dvm_ena(&vf->pf->hw)) 2929 ice_vc_set_dvm_caps(vf, caps); 2930 else 2931 ice_vc_set_svm_caps(vf, caps); 2932 2933 /* store negotiated caps to prevent invalid VF messages */ 2934 memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps)); 2935 2936 out: 2937 err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 2938 v_ret, (u8 *)caps, len); 2939 kfree(caps); 2940 return err; 2941 } 2942 2943 /** 2944 * ice_vc_validate_vlan_tpid - validate VLAN TPID 2945 * @filtering_caps: negotiated/supported VLAN filtering capabilities 2946 * @tpid: VLAN TPID used for validation 2947 * 2948 * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against 2949 * the negotiated/supported filtering caps to see if the VLAN TPID is valid. 2950 */ 2951 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid) 2952 { 2953 enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED; 2954 2955 switch (tpid) { 2956 case ETH_P_8021Q: 2957 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100; 2958 break; 2959 case ETH_P_8021AD: 2960 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8; 2961 break; 2962 case ETH_P_QINQ1: 2963 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100; 2964 break; 2965 } 2966 2967 if (!(filtering_caps & vlan_ethertype)) 2968 return false; 2969 2970 return true; 2971 } 2972 2973 /** 2974 * ice_vc_is_valid_vlan - validate the virtchnl_vlan 2975 * @vc_vlan: virtchnl_vlan to validate 2976 * 2977 * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return 2978 * false. Otherwise return true. 2979 */ 2980 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan) 2981 { 2982 if (!vc_vlan->tci || !vc_vlan->tpid) 2983 return false; 2984 2985 return true; 2986 } 2987 2988 /** 2989 * ice_vc_validate_vlan_filter_list - validate the filter list from the VF 2990 * @vfc: negotiated/supported VLAN filtering capabilities 2991 * @vfl: VLAN filter list from VF to validate 2992 * 2993 * Validate all of the filters in the VLAN filter list from the VF. If any of 2994 * the checks fail then return false. Otherwise return true. 2995 */ 2996 static bool 2997 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc, 2998 struct virtchnl_vlan_filter_list_v2 *vfl) 2999 { 3000 u16 i; 3001 3002 if (!vfl->num_elements) 3003 return false; 3004 3005 for (i = 0; i < vfl->num_elements; i++) { 3006 struct virtchnl_vlan_supported_caps *filtering_support = 3007 &vfc->filtering_support; 3008 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 3009 struct virtchnl_vlan *outer = &vlan_fltr->outer; 3010 struct virtchnl_vlan *inner = &vlan_fltr->inner; 3011 3012 if ((ice_vc_is_valid_vlan(outer) && 3013 filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) || 3014 (ice_vc_is_valid_vlan(inner) && 3015 filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED)) 3016 return false; 3017 3018 if ((outer->tci_mask && 3019 !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) || 3020 (inner->tci_mask && 3021 !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK))) 3022 return false; 3023 3024 if (((outer->tci & VLAN_PRIO_MASK) && 3025 !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) || 3026 ((inner->tci & VLAN_PRIO_MASK) && 3027 !(filtering_support->inner & VIRTCHNL_VLAN_PRIO))) 3028 return false; 3029 3030 if ((ice_vc_is_valid_vlan(outer) && 3031 !ice_vc_validate_vlan_tpid(filtering_support->outer, 3032 outer->tpid)) || 3033 (ice_vc_is_valid_vlan(inner) && 3034 !ice_vc_validate_vlan_tpid(filtering_support->inner, 3035 inner->tpid))) 3036 return false; 3037 } 3038 3039 return true; 3040 } 3041 3042 /** 3043 * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan 3044 * @vc_vlan: struct virtchnl_vlan to transform 3045 */ 3046 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan) 3047 { 3048 struct ice_vlan vlan = { 0 }; 3049 3050 vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 3051 vlan.vid = vc_vlan->tci & VLAN_VID_MASK; 3052 vlan.tpid = vc_vlan->tpid; 3053 3054 return vlan; 3055 } 3056 3057 /** 3058 * ice_vc_vlan_action - action to perform on the virthcnl_vlan 3059 * @vsi: VF's VSI used to perform the action 3060 * @vlan_action: function to perform the action with (i.e. add/del) 3061 * @vlan: VLAN filter to perform the action with 3062 */ 3063 static int 3064 ice_vc_vlan_action(struct ice_vsi *vsi, 3065 int (*vlan_action)(struct ice_vsi *, struct ice_vlan *), 3066 struct ice_vlan *vlan) 3067 { 3068 int err; 3069 3070 err = vlan_action(vsi, vlan); 3071 if (err) 3072 return err; 3073 3074 return 0; 3075 } 3076 3077 /** 3078 * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list 3079 * @vf: VF used to delete the VLAN(s) 3080 * @vsi: VF's VSI used to delete the VLAN(s) 3081 * @vfl: virthchnl filter list used to delete the filters 3082 */ 3083 static int 3084 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 3085 struct virtchnl_vlan_filter_list_v2 *vfl) 3086 { 3087 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 3088 int err; 3089 u16 i; 3090 3091 for (i = 0; i < vfl->num_elements; i++) { 3092 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 3093 struct virtchnl_vlan *vc_vlan; 3094 3095 vc_vlan = &vlan_fltr->outer; 3096 if (ice_vc_is_valid_vlan(vc_vlan)) { 3097 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 3098 3099 err = ice_vc_vlan_action(vsi, 3100 vsi->outer_vlan_ops.del_vlan, 3101 &vlan); 3102 if (err) 3103 return err; 3104 3105 if (vlan_promisc) 3106 ice_vf_dis_vlan_promisc(vsi, &vlan); 3107 3108 /* Disable VLAN filtering when only VLAN 0 is left */ 3109 if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) { 3110 err = vsi->outer_vlan_ops.dis_tx_filtering(vsi); 3111 if (err) 3112 return err; 3113 } 3114 } 3115 3116 vc_vlan = &vlan_fltr->inner; 3117 if (ice_vc_is_valid_vlan(vc_vlan)) { 3118 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 3119 3120 err = ice_vc_vlan_action(vsi, 3121 vsi->inner_vlan_ops.del_vlan, 3122 &vlan); 3123 if (err) 3124 return err; 3125 3126 /* no support for VLAN promiscuous on inner VLAN unless 3127 * we are in Single VLAN Mode (SVM) 3128 */ 3129 if (!ice_is_dvm_ena(&vsi->back->hw)) { 3130 if (vlan_promisc) 3131 ice_vf_dis_vlan_promisc(vsi, &vlan); 3132 3133 /* Disable VLAN filtering when only VLAN 0 is left */ 3134 if (!ice_vsi_has_non_zero_vlans(vsi)) { 3135 err = vsi->inner_vlan_ops.dis_tx_filtering(vsi); 3136 if (err) 3137 return err; 3138 } 3139 } 3140 } 3141 } 3142 3143 return 0; 3144 } 3145 3146 /** 3147 * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2 3148 * @vf: VF the message was received from 3149 * @msg: message received from the VF 3150 */ 3151 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 3152 { 3153 struct virtchnl_vlan_filter_list_v2 *vfl = 3154 (struct virtchnl_vlan_filter_list_v2 *)msg; 3155 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3156 struct ice_vsi *vsi; 3157 3158 if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering, 3159 vfl)) { 3160 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3161 goto out; 3162 } 3163 3164 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 3165 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3166 goto out; 3167 } 3168 3169 vsi = ice_get_vf_vsi(vf); 3170 if (!vsi) { 3171 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3172 goto out; 3173 } 3174 3175 if (ice_vc_del_vlans(vf, vsi, vfl)) 3176 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3177 3178 out: 3179 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL, 3180 0); 3181 } 3182 3183 /** 3184 * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list 3185 * @vf: VF used to add the VLAN(s) 3186 * @vsi: VF's VSI used to add the VLAN(s) 3187 * @vfl: virthchnl filter list used to add the filters 3188 */ 3189 static int 3190 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, 3191 struct virtchnl_vlan_filter_list_v2 *vfl) 3192 { 3193 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf); 3194 int err; 3195 u16 i; 3196 3197 for (i = 0; i < vfl->num_elements; i++) { 3198 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i]; 3199 struct virtchnl_vlan *vc_vlan; 3200 3201 vc_vlan = &vlan_fltr->outer; 3202 if (ice_vc_is_valid_vlan(vc_vlan)) { 3203 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 3204 3205 err = ice_vc_vlan_action(vsi, 3206 vsi->outer_vlan_ops.add_vlan, 3207 &vlan); 3208 if (err) 3209 return err; 3210 3211 if (vlan_promisc) { 3212 err = ice_vf_ena_vlan_promisc(vsi, &vlan); 3213 if (err) 3214 return err; 3215 } 3216 3217 /* Enable VLAN filtering on first non-zero VLAN */ 3218 if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) { 3219 err = vsi->outer_vlan_ops.ena_tx_filtering(vsi); 3220 if (err) 3221 return err; 3222 } 3223 } 3224 3225 vc_vlan = &vlan_fltr->inner; 3226 if (ice_vc_is_valid_vlan(vc_vlan)) { 3227 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan); 3228 3229 err = ice_vc_vlan_action(vsi, 3230 vsi->inner_vlan_ops.add_vlan, 3231 &vlan); 3232 if (err) 3233 return err; 3234 3235 /* no support for VLAN promiscuous on inner VLAN unless 3236 * we are in Single VLAN Mode (SVM) 3237 */ 3238 if (!ice_is_dvm_ena(&vsi->back->hw)) { 3239 if (vlan_promisc) { 3240 err = ice_vf_ena_vlan_promisc(vsi, &vlan); 3241 if (err) 3242 return err; 3243 } 3244 3245 /* Enable VLAN filtering on first non-zero VLAN */ 3246 if (vf->spoofchk && vlan.vid) { 3247 err = vsi->inner_vlan_ops.ena_tx_filtering(vsi); 3248 if (err) 3249 return err; 3250 } 3251 } 3252 } 3253 } 3254 3255 return 0; 3256 } 3257 3258 /** 3259 * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF 3260 * @vsi: VF VSI used to get number of existing VLAN filters 3261 * @vfc: negotiated/supported VLAN filtering capabilities 3262 * @vfl: VLAN filter list from VF to validate 3263 * 3264 * Validate all of the filters in the VLAN filter list from the VF during the 3265 * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false. 3266 * Otherwise return true. 3267 */ 3268 static bool 3269 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi, 3270 struct virtchnl_vlan_filtering_caps *vfc, 3271 struct virtchnl_vlan_filter_list_v2 *vfl) 3272 { 3273 u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) + 3274 vfl->num_elements; 3275 3276 if (num_requested_filters > vfc->max_filters) 3277 return false; 3278 3279 return ice_vc_validate_vlan_filter_list(vfc, vfl); 3280 } 3281 3282 /** 3283 * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2 3284 * @vf: VF the message was received from 3285 * @msg: message received from the VF 3286 */ 3287 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg) 3288 { 3289 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3290 struct virtchnl_vlan_filter_list_v2 *vfl = 3291 (struct virtchnl_vlan_filter_list_v2 *)msg; 3292 struct ice_vsi *vsi; 3293 3294 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 3295 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3296 goto out; 3297 } 3298 3299 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) { 3300 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3301 goto out; 3302 } 3303 3304 vsi = ice_get_vf_vsi(vf); 3305 if (!vsi) { 3306 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3307 goto out; 3308 } 3309 3310 if (!ice_vc_validate_add_vlan_filter_list(vsi, 3311 &vf->vlan_v2_caps.filtering, 3312 vfl)) { 3313 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3314 goto out; 3315 } 3316 3317 if (ice_vc_add_vlans(vf, vsi, vfl)) 3318 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3319 3320 out: 3321 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL, 3322 0); 3323 } 3324 3325 /** 3326 * ice_vc_valid_vlan_setting - validate VLAN setting 3327 * @negotiated_settings: negotiated VLAN settings during VF init 3328 * @ethertype_setting: ethertype(s) requested for the VLAN setting 3329 */ 3330 static bool 3331 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting) 3332 { 3333 if (ethertype_setting && !(negotiated_settings & ethertype_setting)) 3334 return false; 3335 3336 /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if 3337 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported 3338 */ 3339 if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) && 3340 hweight32(ethertype_setting) > 1) 3341 return false; 3342 3343 /* ability to modify the VLAN setting was not negotiated */ 3344 if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE)) 3345 return false; 3346 3347 return true; 3348 } 3349 3350 /** 3351 * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message 3352 * @caps: negotiated VLAN settings during VF init 3353 * @msg: message to validate 3354 * 3355 * Used to validate any VLAN virtchnl message sent as a 3356 * virtchnl_vlan_setting structure. Validates the message against the 3357 * negotiated/supported caps during VF driver init. 3358 */ 3359 static bool 3360 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps, 3361 struct virtchnl_vlan_setting *msg) 3362 { 3363 if ((!msg->outer_ethertype_setting && 3364 !msg->inner_ethertype_setting) || 3365 (!caps->outer && !caps->inner)) 3366 return false; 3367 3368 if (msg->outer_ethertype_setting && 3369 !ice_vc_valid_vlan_setting(caps->outer, 3370 msg->outer_ethertype_setting)) 3371 return false; 3372 3373 if (msg->inner_ethertype_setting && 3374 !ice_vc_valid_vlan_setting(caps->inner, 3375 msg->inner_ethertype_setting)) 3376 return false; 3377 3378 return true; 3379 } 3380 3381 /** 3382 * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID 3383 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID 3384 * @tpid: VLAN TPID to populate 3385 */ 3386 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid) 3387 { 3388 switch (ethertype_setting) { 3389 case VIRTCHNL_VLAN_ETHERTYPE_8100: 3390 *tpid = ETH_P_8021Q; 3391 break; 3392 case VIRTCHNL_VLAN_ETHERTYPE_88A8: 3393 *tpid = ETH_P_8021AD; 3394 break; 3395 case VIRTCHNL_VLAN_ETHERTYPE_9100: 3396 *tpid = ETH_P_QINQ1; 3397 break; 3398 default: 3399 *tpid = 0; 3400 return -EINVAL; 3401 } 3402 3403 return 0; 3404 } 3405 3406 /** 3407 * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting 3408 * @vsi: VF's VSI used to enable the VLAN offload 3409 * @ena_offload: function used to enable the VLAN offload 3410 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for 3411 */ 3412 static int 3413 ice_vc_ena_vlan_offload(struct ice_vsi *vsi, 3414 int (*ena_offload)(struct ice_vsi *vsi, u16 tpid), 3415 u32 ethertype_setting) 3416 { 3417 u16 tpid; 3418 int err; 3419 3420 err = ice_vc_get_tpid(ethertype_setting, &tpid); 3421 if (err) 3422 return err; 3423 3424 err = ena_offload(vsi, tpid); 3425 if (err) 3426 return err; 3427 3428 return 0; 3429 } 3430 3431 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3 3432 #define ICE_L2TSEL_BIT_OFFSET 23 3433 enum ice_l2tsel { 3434 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND, 3435 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1, 3436 }; 3437 3438 /** 3439 * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI 3440 * @vsi: VSI used to update l2tsel on 3441 * @l2tsel: l2tsel setting requested 3442 * 3443 * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel. 3444 * This will modify which descriptor field the first offloaded VLAN will be 3445 * stripped into. 3446 */ 3447 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel) 3448 { 3449 struct ice_hw *hw = &vsi->back->hw; 3450 u32 l2tsel_bit; 3451 int i; 3452 3453 if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND) 3454 l2tsel_bit = 0; 3455 else 3456 l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET); 3457 3458 for (i = 0; i < vsi->alloc_rxq; i++) { 3459 u16 pfq = vsi->rxq_map[i]; 3460 u32 qrx_context_offset; 3461 u32 regval; 3462 3463 qrx_context_offset = 3464 QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq); 3465 3466 regval = rd32(hw, qrx_context_offset); 3467 regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET); 3468 regval |= l2tsel_bit; 3469 wr32(hw, qrx_context_offset, regval); 3470 } 3471 } 3472 3473 /** 3474 * ice_vc_ena_vlan_stripping_v2_msg 3475 * @vf: VF the message was received from 3476 * @msg: message received from the VF 3477 * 3478 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 3479 */ 3480 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 3481 { 3482 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3483 struct virtchnl_vlan_supported_caps *stripping_support; 3484 struct virtchnl_vlan_setting *strip_msg = 3485 (struct virtchnl_vlan_setting *)msg; 3486 u32 ethertype_setting; 3487 struct ice_vsi *vsi; 3488 3489 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 3490 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3491 goto out; 3492 } 3493 3494 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 3495 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3496 goto out; 3497 } 3498 3499 vsi = ice_get_vf_vsi(vf); 3500 if (!vsi) { 3501 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3502 goto out; 3503 } 3504 3505 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 3506 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 3507 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3508 goto out; 3509 } 3510 3511 if (ice_vsi_is_rxq_crc_strip_dis(vsi)) { 3512 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 3513 goto out; 3514 } 3515 3516 ethertype_setting = strip_msg->outer_ethertype_setting; 3517 if (ethertype_setting) { 3518 if (ice_vc_ena_vlan_offload(vsi, 3519 vsi->outer_vlan_ops.ena_stripping, 3520 ethertype_setting)) { 3521 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3522 goto out; 3523 } else { 3524 enum ice_l2tsel l2tsel = 3525 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND; 3526 3527 /* PF tells the VF that the outer VLAN tag is always 3528 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 3529 * inner is always extracted to 3530 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 3531 * support outer stripping so the first tag always ends 3532 * up in L2TAG2_2ND and the second/inner tag, if 3533 * enabled, is extracted in L2TAG1. 3534 */ 3535 ice_vsi_update_l2tsel(vsi, l2tsel); 3536 3537 vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA; 3538 } 3539 } 3540 3541 ethertype_setting = strip_msg->inner_ethertype_setting; 3542 if (ethertype_setting && 3543 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping, 3544 ethertype_setting)) { 3545 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3546 goto out; 3547 } 3548 3549 if (ethertype_setting) 3550 vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA; 3551 3552 out: 3553 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2, 3554 v_ret, NULL, 0); 3555 } 3556 3557 /** 3558 * ice_vc_dis_vlan_stripping_v2_msg 3559 * @vf: VF the message was received from 3560 * @msg: message received from the VF 3561 * 3562 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 3563 */ 3564 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg) 3565 { 3566 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3567 struct virtchnl_vlan_supported_caps *stripping_support; 3568 struct virtchnl_vlan_setting *strip_msg = 3569 (struct virtchnl_vlan_setting *)msg; 3570 u32 ethertype_setting; 3571 struct ice_vsi *vsi; 3572 3573 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 3574 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3575 goto out; 3576 } 3577 3578 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) { 3579 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3580 goto out; 3581 } 3582 3583 vsi = ice_get_vf_vsi(vf); 3584 if (!vsi) { 3585 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3586 goto out; 3587 } 3588 3589 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support; 3590 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) { 3591 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3592 goto out; 3593 } 3594 3595 ethertype_setting = strip_msg->outer_ethertype_setting; 3596 if (ethertype_setting) { 3597 if (vsi->outer_vlan_ops.dis_stripping(vsi)) { 3598 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3599 goto out; 3600 } else { 3601 enum ice_l2tsel l2tsel = 3602 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1; 3603 3604 /* PF tells the VF that the outer VLAN tag is always 3605 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and 3606 * inner is always extracted to 3607 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to 3608 * support inner stripping while outer stripping is 3609 * disabled so that the first and only tag is extracted 3610 * in L2TAG1. 3611 */ 3612 ice_vsi_update_l2tsel(vsi, l2tsel); 3613 3614 vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA; 3615 } 3616 } 3617 3618 ethertype_setting = strip_msg->inner_ethertype_setting; 3619 if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) { 3620 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3621 goto out; 3622 } 3623 3624 if (ethertype_setting) 3625 vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA; 3626 3627 out: 3628 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, 3629 v_ret, NULL, 0); 3630 } 3631 3632 /** 3633 * ice_vc_ena_vlan_insertion_v2_msg 3634 * @vf: VF the message was received from 3635 * @msg: message received from the VF 3636 * 3637 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 3638 */ 3639 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 3640 { 3641 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3642 struct virtchnl_vlan_supported_caps *insertion_support; 3643 struct virtchnl_vlan_setting *insertion_msg = 3644 (struct virtchnl_vlan_setting *)msg; 3645 u32 ethertype_setting; 3646 struct ice_vsi *vsi; 3647 3648 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 3649 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3650 goto out; 3651 } 3652 3653 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 3654 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3655 goto out; 3656 } 3657 3658 vsi = ice_get_vf_vsi(vf); 3659 if (!vsi) { 3660 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3661 goto out; 3662 } 3663 3664 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 3665 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 3666 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3667 goto out; 3668 } 3669 3670 ethertype_setting = insertion_msg->outer_ethertype_setting; 3671 if (ethertype_setting && 3672 ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion, 3673 ethertype_setting)) { 3674 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3675 goto out; 3676 } 3677 3678 ethertype_setting = insertion_msg->inner_ethertype_setting; 3679 if (ethertype_setting && 3680 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion, 3681 ethertype_setting)) { 3682 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3683 goto out; 3684 } 3685 3686 out: 3687 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2, 3688 v_ret, NULL, 0); 3689 } 3690 3691 /** 3692 * ice_vc_dis_vlan_insertion_v2_msg 3693 * @vf: VF the message was received from 3694 * @msg: message received from the VF 3695 * 3696 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 3697 */ 3698 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) 3699 { 3700 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3701 struct virtchnl_vlan_supported_caps *insertion_support; 3702 struct virtchnl_vlan_setting *insertion_msg = 3703 (struct virtchnl_vlan_setting *)msg; 3704 u32 ethertype_setting; 3705 struct ice_vsi *vsi; 3706 3707 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { 3708 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3709 goto out; 3710 } 3711 3712 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) { 3713 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3714 goto out; 3715 } 3716 3717 vsi = ice_get_vf_vsi(vf); 3718 if (!vsi) { 3719 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3720 goto out; 3721 } 3722 3723 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support; 3724 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) { 3725 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3726 goto out; 3727 } 3728 3729 ethertype_setting = insertion_msg->outer_ethertype_setting; 3730 if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) { 3731 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3732 goto out; 3733 } 3734 3735 ethertype_setting = insertion_msg->inner_ethertype_setting; 3736 if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) { 3737 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3738 goto out; 3739 } 3740 3741 out: 3742 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2, 3743 v_ret, NULL, 0); 3744 } 3745 3746 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { 3747 .get_ver_msg = ice_vc_get_ver_msg, 3748 .get_vf_res_msg = ice_vc_get_vf_res_msg, 3749 .reset_vf = ice_vc_reset_vf_msg, 3750 .add_mac_addr_msg = ice_vc_add_mac_addr_msg, 3751 .del_mac_addr_msg = ice_vc_del_mac_addr_msg, 3752 .cfg_qs_msg = ice_vc_cfg_qs_msg, 3753 .ena_qs_msg = ice_vc_ena_qs_msg, 3754 .dis_qs_msg = ice_vc_dis_qs_msg, 3755 .request_qs_msg = ice_vc_request_qs_msg, 3756 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 3757 .config_rss_key = ice_vc_config_rss_key, 3758 .config_rss_lut = ice_vc_config_rss_lut, 3759 .get_stats_msg = ice_vc_get_stats_msg, 3760 .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg, 3761 .add_vlan_msg = ice_vc_add_vlan_msg, 3762 .remove_vlan_msg = ice_vc_remove_vlan_msg, 3763 .query_rxdid = ice_vc_query_rxdid, 3764 .get_rss_hena = ice_vc_get_rss_hena, 3765 .set_rss_hena_msg = ice_vc_set_rss_hena, 3766 .ena_vlan_stripping = ice_vc_ena_vlan_stripping, 3767 .dis_vlan_stripping = ice_vc_dis_vlan_stripping, 3768 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 3769 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 3770 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 3771 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 3772 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 3773 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 3774 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 3775 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 3776 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 3777 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 3778 }; 3779 3780 /** 3781 * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops 3782 * @vf: the VF to switch ops 3783 */ 3784 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) 3785 { 3786 vf->virtchnl_ops = &ice_virtchnl_dflt_ops; 3787 } 3788 3789 /** 3790 * ice_vc_repr_add_mac 3791 * @vf: pointer to VF 3792 * @msg: virtchannel message 3793 * 3794 * When port representors are created, we do not add MAC rule 3795 * to firmware, we store it so that PF could report same 3796 * MAC as VF. 3797 */ 3798 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) 3799 { 3800 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; 3801 struct virtchnl_ether_addr_list *al = 3802 (struct virtchnl_ether_addr_list *)msg; 3803 struct ice_vsi *vsi; 3804 struct ice_pf *pf; 3805 int i; 3806 3807 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || 3808 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { 3809 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3810 goto handle_mac_exit; 3811 } 3812 3813 pf = vf->pf; 3814 3815 vsi = ice_get_vf_vsi(vf); 3816 if (!vsi) { 3817 v_ret = VIRTCHNL_STATUS_ERR_PARAM; 3818 goto handle_mac_exit; 3819 } 3820 3821 for (i = 0; i < al->num_elements; i++) { 3822 u8 *mac_addr = al->list[i].addr; 3823 3824 if (!is_unicast_ether_addr(mac_addr) || 3825 ether_addr_equal(mac_addr, vf->hw_lan_addr)) 3826 continue; 3827 3828 if (vf->pf_set_mac) { 3829 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); 3830 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; 3831 goto handle_mac_exit; 3832 } 3833 3834 ice_vfhw_mac_add(vf, &al->list[i]); 3835 vf->num_mac++; 3836 break; 3837 } 3838 3839 handle_mac_exit: 3840 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, 3841 v_ret, NULL, 0); 3842 } 3843 3844 /** 3845 * ice_vc_repr_del_mac - response with success for deleting MAC 3846 * @vf: pointer to VF 3847 * @msg: virtchannel message 3848 * 3849 * Respond with success to not break normal VF flow. 3850 * For legacy VF driver try to update cached MAC address. 3851 */ 3852 static int 3853 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg) 3854 { 3855 struct virtchnl_ether_addr_list *al = 3856 (struct virtchnl_ether_addr_list *)msg; 3857 3858 ice_update_legacy_cached_mac(vf, &al->list[0]); 3859 3860 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, 3861 VIRTCHNL_STATUS_SUCCESS, NULL, 0); 3862 } 3863 3864 static int 3865 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg) 3866 { 3867 dev_dbg(ice_pf_to_dev(vf->pf), 3868 "Can't config promiscuous mode in switchdev mode for VF %d\n", 3869 vf->vf_id); 3870 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, 3871 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 3872 NULL, 0); 3873 } 3874 3875 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { 3876 .get_ver_msg = ice_vc_get_ver_msg, 3877 .get_vf_res_msg = ice_vc_get_vf_res_msg, 3878 .reset_vf = ice_vc_reset_vf_msg, 3879 .add_mac_addr_msg = ice_vc_repr_add_mac, 3880 .del_mac_addr_msg = ice_vc_repr_del_mac, 3881 .cfg_qs_msg = ice_vc_cfg_qs_msg, 3882 .ena_qs_msg = ice_vc_ena_qs_msg, 3883 .dis_qs_msg = ice_vc_dis_qs_msg, 3884 .request_qs_msg = ice_vc_request_qs_msg, 3885 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg, 3886 .config_rss_key = ice_vc_config_rss_key, 3887 .config_rss_lut = ice_vc_config_rss_lut, 3888 .get_stats_msg = ice_vc_get_stats_msg, 3889 .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode, 3890 .add_vlan_msg = ice_vc_add_vlan_msg, 3891 .remove_vlan_msg = ice_vc_remove_vlan_msg, 3892 .query_rxdid = ice_vc_query_rxdid, 3893 .get_rss_hena = ice_vc_get_rss_hena, 3894 .set_rss_hena_msg = ice_vc_set_rss_hena, 3895 .ena_vlan_stripping = ice_vc_ena_vlan_stripping, 3896 .dis_vlan_stripping = ice_vc_dis_vlan_stripping, 3897 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg, 3898 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr, 3899 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr, 3900 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps, 3901 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg, 3902 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg, 3903 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg, 3904 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, 3905 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, 3906 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, 3907 }; 3908 3909 /** 3910 * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops 3911 * @vf: the VF to switch ops 3912 */ 3913 void ice_virtchnl_set_repr_ops(struct ice_vf *vf) 3914 { 3915 vf->virtchnl_ops = &ice_virtchnl_repr_ops; 3916 } 3917 3918 /** 3919 * ice_is_malicious_vf - check if this vf might be overflowing mailbox 3920 * @vf: the VF to check 3921 * @mbxdata: data about the state of the mailbox 3922 * 3923 * Detect if a given VF might be malicious and attempting to overflow the PF 3924 * mailbox. If so, log a warning message and ignore this event. 3925 */ 3926 static bool 3927 ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata) 3928 { 3929 bool report_malvf = false; 3930 struct device *dev; 3931 struct ice_pf *pf; 3932 int status; 3933 3934 pf = vf->pf; 3935 dev = ice_pf_to_dev(pf); 3936 3937 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) 3938 return vf->mbx_info.malicious; 3939 3940 /* check to see if we have a newly malicious VF */ 3941 status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info, 3942 &report_malvf); 3943 if (status) 3944 dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n", 3945 vf->vf_id, vf->dev_lan_addr, status); 3946 3947 if (report_malvf) { 3948 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); 3949 u8 zero_addr[ETH_ALEN] = {}; 3950 3951 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n", 3952 vf->dev_lan_addr, 3953 pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr); 3954 } 3955 3956 return vf->mbx_info.malicious; 3957 } 3958 3959 /** 3960 * ice_vc_process_vf_msg - Process request from VF 3961 * @pf: pointer to the PF structure 3962 * @event: pointer to the AQ event 3963 * @mbxdata: information used to detect VF attempting mailbox overflow 3964 * 3965 * called from the common asq/arq handler to 3966 * process request from VF 3967 */ 3968 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, 3969 struct ice_mbx_data *mbxdata) 3970 { 3971 u32 v_opcode = le32_to_cpu(event->desc.cookie_high); 3972 s16 vf_id = le16_to_cpu(event->desc.retval); 3973 const struct ice_virtchnl_ops *ops; 3974 u16 msglen = event->msg_len; 3975 u8 *msg = event->msg_buf; 3976 struct ice_vf *vf = NULL; 3977 struct device *dev; 3978 int err = 0; 3979 3980 dev = ice_pf_to_dev(pf); 3981 3982 vf = ice_get_vf_by_id(pf, vf_id); 3983 if (!vf) { 3984 dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n", 3985 vf_id, v_opcode, msglen); 3986 return; 3987 } 3988 3989 mutex_lock(&vf->cfg_lock); 3990 3991 /* Check if the VF is trying to overflow the mailbox */ 3992 if (ice_is_malicious_vf(vf, mbxdata)) 3993 goto finish; 3994 3995 /* Check if VF is disabled. */ 3996 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { 3997 err = -EPERM; 3998 goto error_handler; 3999 } 4000 4001 ops = vf->virtchnl_ops; 4002 4003 /* Perform basic checks on the msg */ 4004 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); 4005 if (err) { 4006 if (err == VIRTCHNL_STATUS_ERR_PARAM) 4007 err = -EPERM; 4008 else 4009 err = -EINVAL; 4010 } 4011 4012 error_handler: 4013 if (err) { 4014 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, 4015 NULL, 0); 4016 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", 4017 vf_id, v_opcode, msglen, err); 4018 goto finish; 4019 } 4020 4021 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { 4022 ice_vc_send_msg_to_vf(vf, v_opcode, 4023 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 4024 0); 4025 goto finish; 4026 } 4027 4028 switch (v_opcode) { 4029 case VIRTCHNL_OP_VERSION: 4030 err = ops->get_ver_msg(vf, msg); 4031 break; 4032 case VIRTCHNL_OP_GET_VF_RESOURCES: 4033 err = ops->get_vf_res_msg(vf, msg); 4034 if (ice_vf_init_vlan_stripping(vf)) 4035 dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n", 4036 vf->vf_id); 4037 ice_vc_notify_vf_link_state(vf); 4038 break; 4039 case VIRTCHNL_OP_RESET_VF: 4040 ops->reset_vf(vf); 4041 break; 4042 case VIRTCHNL_OP_ADD_ETH_ADDR: 4043 err = ops->add_mac_addr_msg(vf, msg); 4044 break; 4045 case VIRTCHNL_OP_DEL_ETH_ADDR: 4046 err = ops->del_mac_addr_msg(vf, msg); 4047 break; 4048 case VIRTCHNL_OP_CONFIG_VSI_QUEUES: 4049 err = ops->cfg_qs_msg(vf, msg); 4050 break; 4051 case VIRTCHNL_OP_ENABLE_QUEUES: 4052 err = ops->ena_qs_msg(vf, msg); 4053 ice_vc_notify_vf_link_state(vf); 4054 break; 4055 case VIRTCHNL_OP_DISABLE_QUEUES: 4056 err = ops->dis_qs_msg(vf, msg); 4057 break; 4058 case VIRTCHNL_OP_REQUEST_QUEUES: 4059 err = ops->request_qs_msg(vf, msg); 4060 break; 4061 case VIRTCHNL_OP_CONFIG_IRQ_MAP: 4062 err = ops->cfg_irq_map_msg(vf, msg); 4063 break; 4064 case VIRTCHNL_OP_CONFIG_RSS_KEY: 4065 err = ops->config_rss_key(vf, msg); 4066 break; 4067 case VIRTCHNL_OP_CONFIG_RSS_LUT: 4068 err = ops->config_rss_lut(vf, msg); 4069 break; 4070 case VIRTCHNL_OP_GET_STATS: 4071 err = ops->get_stats_msg(vf, msg); 4072 break; 4073 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: 4074 err = ops->cfg_promiscuous_mode_msg(vf, msg); 4075 break; 4076 case VIRTCHNL_OP_ADD_VLAN: 4077 err = ops->add_vlan_msg(vf, msg); 4078 break; 4079 case VIRTCHNL_OP_DEL_VLAN: 4080 err = ops->remove_vlan_msg(vf, msg); 4081 break; 4082 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: 4083 err = ops->query_rxdid(vf); 4084 break; 4085 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: 4086 err = ops->get_rss_hena(vf); 4087 break; 4088 case VIRTCHNL_OP_SET_RSS_HENA: 4089 err = ops->set_rss_hena_msg(vf, msg); 4090 break; 4091 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: 4092 err = ops->ena_vlan_stripping(vf); 4093 break; 4094 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: 4095 err = ops->dis_vlan_stripping(vf); 4096 break; 4097 case VIRTCHNL_OP_ADD_FDIR_FILTER: 4098 err = ops->add_fdir_fltr_msg(vf, msg); 4099 break; 4100 case VIRTCHNL_OP_DEL_FDIR_FILTER: 4101 err = ops->del_fdir_fltr_msg(vf, msg); 4102 break; 4103 case VIRTCHNL_OP_ADD_RSS_CFG: 4104 err = ops->handle_rss_cfg_msg(vf, msg, true); 4105 break; 4106 case VIRTCHNL_OP_DEL_RSS_CFG: 4107 err = ops->handle_rss_cfg_msg(vf, msg, false); 4108 break; 4109 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: 4110 err = ops->get_offload_vlan_v2_caps(vf); 4111 break; 4112 case VIRTCHNL_OP_ADD_VLAN_V2: 4113 err = ops->add_vlan_v2_msg(vf, msg); 4114 break; 4115 case VIRTCHNL_OP_DEL_VLAN_V2: 4116 err = ops->remove_vlan_v2_msg(vf, msg); 4117 break; 4118 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: 4119 err = ops->ena_vlan_stripping_v2_msg(vf, msg); 4120 break; 4121 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: 4122 err = ops->dis_vlan_stripping_v2_msg(vf, msg); 4123 break; 4124 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: 4125 err = ops->ena_vlan_insertion_v2_msg(vf, msg); 4126 break; 4127 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: 4128 err = ops->dis_vlan_insertion_v2_msg(vf, msg); 4129 break; 4130 case VIRTCHNL_OP_UNKNOWN: 4131 default: 4132 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, 4133 vf_id); 4134 err = ice_vc_send_msg_to_vf(vf, v_opcode, 4135 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, 4136 NULL, 0); 4137 break; 4138 } 4139 if (err) { 4140 /* Helper function cares less about error return values here 4141 * as it is busy with pending work. 4142 */ 4143 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", 4144 vf_id, v_opcode, err); 4145 } 4146 4147 finish: 4148 mutex_unlock(&vf->cfg_lock); 4149 ice_put_vf(vf); 4150 } 4151