1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 struct ice_dummy_pkt_offsets { 34 enum ice_protocol_type type; 35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 36 }; 37 38 /* offset info for MAC + IPv4 + UDP dummy packet */ 39 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { 40 { ICE_MAC_OFOS, 0 }, 41 { ICE_ETYPE_OL, 12 }, 42 { ICE_IPV4_OFOS, 14 }, 43 { ICE_UDP_ILOS, 34 }, 44 { ICE_PROTOCOL_LAST, 0 }, 45 }; 46 47 /* Dummy packet for MAC + IPv4 + UDP */ 48 static const u8 dummy_udp_packet[] = { 49 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 50 0x00, 0x00, 0x00, 0x00, 51 0x00, 0x00, 0x00, 0x00, 52 53 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 54 55 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 56 0x00, 0x01, 0x00, 0x00, 57 0x00, 0x11, 0x00, 0x00, 58 0x00, 0x00, 0x00, 0x00, 59 0x00, 0x00, 0x00, 0x00, 60 61 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 62 0x00, 0x08, 0x00, 0x00, 63 64 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 65 }; 66 67 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ 68 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { 69 { ICE_MAC_OFOS, 0 }, 70 { ICE_VLAN_OFOS, 12 }, 71 { ICE_ETYPE_OL, 16 }, 72 { ICE_IPV4_OFOS, 18 }, 73 { ICE_UDP_ILOS, 38 }, 74 { ICE_PROTOCOL_LAST, 0 }, 75 }; 76 77 /* C-tag (801.1Q), IPv4:UDP dummy packet */ 78 static const u8 dummy_vlan_udp_packet[] = { 79 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 80 0x00, 0x00, 0x00, 0x00, 81 0x00, 0x00, 0x00, 0x00, 82 83 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 84 85 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 86 87 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 88 0x00, 0x01, 0x00, 0x00, 89 0x00, 0x11, 0x00, 0x00, 90 0x00, 0x00, 0x00, 0x00, 91 0x00, 0x00, 0x00, 0x00, 92 93 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ 94 0x00, 0x08, 0x00, 0x00, 95 96 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 97 }; 98 99 /* offset info for MAC + IPv4 + TCP dummy packet */ 100 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { 101 { ICE_MAC_OFOS, 0 }, 102 { ICE_ETYPE_OL, 12 }, 103 { ICE_IPV4_OFOS, 14 }, 104 { ICE_TCP_IL, 34 }, 105 { ICE_PROTOCOL_LAST, 0 }, 106 }; 107 108 /* Dummy packet for MAC + IPv4 + TCP */ 109 static const u8 dummy_tcp_packet[] = { 110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 111 0x00, 0x00, 0x00, 0x00, 112 0x00, 0x00, 0x00, 0x00, 113 114 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 115 116 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 117 0x00, 0x01, 0x00, 0x00, 118 0x00, 0x06, 0x00, 0x00, 119 0x00, 0x00, 0x00, 0x00, 120 0x00, 0x00, 0x00, 0x00, 121 122 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 123 0x00, 0x00, 0x00, 0x00, 124 0x00, 0x00, 0x00, 0x00, 125 0x50, 0x00, 0x00, 0x00, 126 0x00, 0x00, 0x00, 0x00, 127 128 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 129 }; 130 131 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ 132 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { 133 { ICE_MAC_OFOS, 0 }, 134 { ICE_VLAN_OFOS, 12 }, 135 { ICE_ETYPE_OL, 16 }, 136 { ICE_IPV4_OFOS, 18 }, 137 { ICE_TCP_IL, 38 }, 138 { ICE_PROTOCOL_LAST, 0 }, 139 }; 140 141 /* C-tag (801.1Q), IPv4:TCP dummy packet */ 142 static const u8 dummy_vlan_tcp_packet[] = { 143 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 144 0x00, 0x00, 0x00, 0x00, 145 0x00, 0x00, 0x00, 0x00, 146 147 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 148 149 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 150 151 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 152 0x00, 0x01, 0x00, 0x00, 153 0x00, 0x06, 0x00, 0x00, 154 0x00, 0x00, 0x00, 0x00, 155 0x00, 0x00, 0x00, 0x00, 156 157 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ 158 0x00, 0x00, 0x00, 0x00, 159 0x00, 0x00, 0x00, 0x00, 160 0x50, 0x00, 0x00, 0x00, 161 0x00, 0x00, 0x00, 0x00, 162 163 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 164 }; 165 166 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { 167 { ICE_MAC_OFOS, 0 }, 168 { ICE_ETYPE_OL, 12 }, 169 { ICE_IPV6_OFOS, 14 }, 170 { ICE_TCP_IL, 54 }, 171 { ICE_PROTOCOL_LAST, 0 }, 172 }; 173 174 static const u8 dummy_tcp_ipv6_packet[] = { 175 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 176 0x00, 0x00, 0x00, 0x00, 177 0x00, 0x00, 0x00, 0x00, 178 179 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 180 181 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 182 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 183 0x00, 0x00, 0x00, 0x00, 184 0x00, 0x00, 0x00, 0x00, 185 0x00, 0x00, 0x00, 0x00, 186 0x00, 0x00, 0x00, 0x00, 187 0x00, 0x00, 0x00, 0x00, 188 0x00, 0x00, 0x00, 0x00, 189 0x00, 0x00, 0x00, 0x00, 190 0x00, 0x00, 0x00, 0x00, 191 192 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 193 0x00, 0x00, 0x00, 0x00, 194 0x00, 0x00, 0x00, 0x00, 195 0x50, 0x00, 0x00, 0x00, 196 0x00, 0x00, 0x00, 0x00, 197 198 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 199 }; 200 201 /* C-tag (802.1Q): IPv6 + TCP */ 202 static const struct ice_dummy_pkt_offsets 203 dummy_vlan_tcp_ipv6_packet_offsets[] = { 204 { ICE_MAC_OFOS, 0 }, 205 { ICE_VLAN_OFOS, 12 }, 206 { ICE_ETYPE_OL, 16 }, 207 { ICE_IPV6_OFOS, 18 }, 208 { ICE_TCP_IL, 58 }, 209 { ICE_PROTOCOL_LAST, 0 }, 210 }; 211 212 /* C-tag (802.1Q), IPv6 + TCP dummy packet */ 213 static const u8 dummy_vlan_tcp_ipv6_packet[] = { 214 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 215 0x00, 0x00, 0x00, 0x00, 216 0x00, 0x00, 0x00, 0x00, 217 218 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 219 220 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 221 222 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 223 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 224 0x00, 0x00, 0x00, 0x00, 225 0x00, 0x00, 0x00, 0x00, 226 0x00, 0x00, 0x00, 0x00, 227 0x00, 0x00, 0x00, 0x00, 228 0x00, 0x00, 0x00, 0x00, 229 0x00, 0x00, 0x00, 0x00, 230 0x00, 0x00, 0x00, 0x00, 231 0x00, 0x00, 0x00, 0x00, 232 233 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ 234 0x00, 0x00, 0x00, 0x00, 235 0x00, 0x00, 0x00, 0x00, 236 0x50, 0x00, 0x00, 0x00, 237 0x00, 0x00, 0x00, 0x00, 238 239 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 240 }; 241 242 /* IPv6 + UDP */ 243 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { 244 { ICE_MAC_OFOS, 0 }, 245 { ICE_ETYPE_OL, 12 }, 246 { ICE_IPV6_OFOS, 14 }, 247 { ICE_UDP_ILOS, 54 }, 248 { ICE_PROTOCOL_LAST, 0 }, 249 }; 250 251 /* IPv6 + UDP dummy packet */ 252 static const u8 dummy_udp_ipv6_packet[] = { 253 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 254 0x00, 0x00, 0x00, 0x00, 255 0x00, 0x00, 0x00, 0x00, 256 257 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 258 259 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 260 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 261 0x00, 0x00, 0x00, 0x00, 262 0x00, 0x00, 0x00, 0x00, 263 0x00, 0x00, 0x00, 0x00, 264 0x00, 0x00, 0x00, 0x00, 265 0x00, 0x00, 0x00, 0x00, 266 0x00, 0x00, 0x00, 0x00, 267 0x00, 0x00, 0x00, 0x00, 268 0x00, 0x00, 0x00, 0x00, 269 270 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 271 0x00, 0x10, 0x00, 0x00, 272 273 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 274 0x00, 0x00, 0x00, 0x00, 275 276 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 277 }; 278 279 /* C-tag (802.1Q): IPv6 + UDP */ 280 static const struct ice_dummy_pkt_offsets 281 dummy_vlan_udp_ipv6_packet_offsets[] = { 282 { ICE_MAC_OFOS, 0 }, 283 { ICE_VLAN_OFOS, 12 }, 284 { ICE_ETYPE_OL, 16 }, 285 { ICE_IPV6_OFOS, 18 }, 286 { ICE_UDP_ILOS, 58 }, 287 { ICE_PROTOCOL_LAST, 0 }, 288 }; 289 290 /* C-tag (802.1Q), IPv6 + UDP dummy packet */ 291 static const u8 dummy_vlan_udp_ipv6_packet[] = { 292 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 293 0x00, 0x00, 0x00, 0x00, 294 0x00, 0x00, 0x00, 0x00, 295 296 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ 297 298 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 299 300 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 301 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ 302 0x00, 0x00, 0x00, 0x00, 303 0x00, 0x00, 0x00, 0x00, 304 0x00, 0x00, 0x00, 0x00, 305 0x00, 0x00, 0x00, 0x00, 306 0x00, 0x00, 0x00, 0x00, 307 0x00, 0x00, 0x00, 0x00, 308 0x00, 0x00, 0x00, 0x00, 309 0x00, 0x00, 0x00, 0x00, 310 311 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ 312 0x00, 0x08, 0x00, 0x00, 313 314 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 315 }; 316 317 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 318 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 319 (DUMMY_ETH_HDR_LEN * \ 320 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 321 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 322 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 323 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 324 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 325 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 326 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 327 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 328 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 329 330 /* this is a recipe to profile association bitmap */ 331 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 332 ICE_MAX_NUM_PROFILES); 333 334 /* this is a profile to recipe association bitmap */ 335 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 336 ICE_MAX_NUM_RECIPES); 337 338 /** 339 * ice_init_def_sw_recp - initialize the recipe book keeping tables 340 * @hw: pointer to the HW struct 341 * 342 * Allocate memory for the entire recipe table and initialize the structures/ 343 * entries corresponding to basic recipes. 344 */ 345 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) 346 { 347 struct ice_sw_recipe *recps; 348 u8 i; 349 350 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 351 sizeof(*recps), GFP_KERNEL); 352 if (!recps) 353 return ICE_ERR_NO_MEMORY; 354 355 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 356 recps[i].root_rid = i; 357 INIT_LIST_HEAD(&recps[i].filt_rules); 358 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 359 INIT_LIST_HEAD(&recps[i].rg_list); 360 mutex_init(&recps[i].filt_rule_lock); 361 } 362 363 hw->switch_info->recp_list = recps; 364 365 return 0; 366 } 367 368 /** 369 * ice_aq_get_sw_cfg - get switch configuration 370 * @hw: pointer to the hardware structure 371 * @buf: pointer to the result buffer 372 * @buf_size: length of the buffer available for response 373 * @req_desc: pointer to requested descriptor 374 * @num_elems: pointer to number of elements 375 * @cd: pointer to command details structure or NULL 376 * 377 * Get switch configuration (0x0200) to be placed in buf. 378 * This admin command returns information such as initial VSI/port number 379 * and switch ID it belongs to. 380 * 381 * NOTE: *req_desc is both an input/output parameter. 382 * The caller of this function first calls this function with *request_desc set 383 * to 0. If the response from f/w has *req_desc set to 0, all the switch 384 * configuration information has been returned; if non-zero (meaning not all 385 * the information was returned), the caller should call this function again 386 * with *req_desc set to the previous value returned by f/w to get the 387 * next block of switch configuration information. 388 * 389 * *num_elems is output only parameter. This reflects the number of elements 390 * in response buffer. The caller of this function to use *num_elems while 391 * parsing the response buffer. 392 */ 393 static enum ice_status 394 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 395 u16 buf_size, u16 *req_desc, u16 *num_elems, 396 struct ice_sq_cd *cd) 397 { 398 struct ice_aqc_get_sw_cfg *cmd; 399 struct ice_aq_desc desc; 400 enum ice_status status; 401 402 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 403 cmd = &desc.params.get_sw_conf; 404 cmd->element = cpu_to_le16(*req_desc); 405 406 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 407 if (!status) { 408 *req_desc = le16_to_cpu(cmd->element); 409 *num_elems = le16_to_cpu(cmd->num_elems); 410 } 411 412 return status; 413 } 414 415 /** 416 * ice_aq_add_vsi 417 * @hw: pointer to the HW struct 418 * @vsi_ctx: pointer to a VSI context struct 419 * @cd: pointer to command details structure or NULL 420 * 421 * Add a VSI context to the hardware (0x0210) 422 */ 423 static enum ice_status 424 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 425 struct ice_sq_cd *cd) 426 { 427 struct ice_aqc_add_update_free_vsi_resp *res; 428 struct ice_aqc_add_get_update_free_vsi *cmd; 429 struct ice_aq_desc desc; 430 enum ice_status status; 431 432 cmd = &desc.params.vsi_cmd; 433 res = &desc.params.add_update_free_vsi_res; 434 435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 436 437 if (!vsi_ctx->alloc_from_pool) 438 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 439 ICE_AQ_VSI_IS_VALID); 440 cmd->vf_id = vsi_ctx->vf_num; 441 442 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 443 444 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 445 446 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 447 sizeof(vsi_ctx->info), cd); 448 449 if (!status) { 450 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 451 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 452 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 453 } 454 455 return status; 456 } 457 458 /** 459 * ice_aq_free_vsi 460 * @hw: pointer to the HW struct 461 * @vsi_ctx: pointer to a VSI context struct 462 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 463 * @cd: pointer to command details structure or NULL 464 * 465 * Free VSI context info from hardware (0x0213) 466 */ 467 static enum ice_status 468 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 469 bool keep_vsi_alloc, struct ice_sq_cd *cd) 470 { 471 struct ice_aqc_add_update_free_vsi_resp *resp; 472 struct ice_aqc_add_get_update_free_vsi *cmd; 473 struct ice_aq_desc desc; 474 enum ice_status status; 475 476 cmd = &desc.params.vsi_cmd; 477 resp = &desc.params.add_update_free_vsi_res; 478 479 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 480 481 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 482 if (keep_vsi_alloc) 483 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 484 485 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 486 if (!status) { 487 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 488 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 489 } 490 491 return status; 492 } 493 494 /** 495 * ice_aq_update_vsi 496 * @hw: pointer to the HW struct 497 * @vsi_ctx: pointer to a VSI context struct 498 * @cd: pointer to command details structure or NULL 499 * 500 * Update VSI context in the hardware (0x0211) 501 */ 502 static enum ice_status 503 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 504 struct ice_sq_cd *cd) 505 { 506 struct ice_aqc_add_update_free_vsi_resp *resp; 507 struct ice_aqc_add_get_update_free_vsi *cmd; 508 struct ice_aq_desc desc; 509 enum ice_status status; 510 511 cmd = &desc.params.vsi_cmd; 512 resp = &desc.params.add_update_free_vsi_res; 513 514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 515 516 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 517 518 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 519 520 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 521 sizeof(vsi_ctx->info), cd); 522 523 if (!status) { 524 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 525 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 526 } 527 528 return status; 529 } 530 531 /** 532 * ice_is_vsi_valid - check whether the VSI is valid or not 533 * @hw: pointer to the HW struct 534 * @vsi_handle: VSI handle 535 * 536 * check whether the VSI is valid or not 537 */ 538 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 539 { 540 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 541 } 542 543 /** 544 * ice_get_hw_vsi_num - return the HW VSI number 545 * @hw: pointer to the HW struct 546 * @vsi_handle: VSI handle 547 * 548 * return the HW VSI number 549 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 550 */ 551 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 552 { 553 return hw->vsi_ctx[vsi_handle]->vsi_num; 554 } 555 556 /** 557 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 558 * @hw: pointer to the HW struct 559 * @vsi_handle: VSI handle 560 * 561 * return the VSI context entry for a given VSI handle 562 */ 563 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 564 { 565 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 566 } 567 568 /** 569 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 570 * @hw: pointer to the HW struct 571 * @vsi_handle: VSI handle 572 * @vsi: VSI context pointer 573 * 574 * save the VSI context entry for a given VSI handle 575 */ 576 static void 577 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 578 { 579 hw->vsi_ctx[vsi_handle] = vsi; 580 } 581 582 /** 583 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 584 * @hw: pointer to the HW struct 585 * @vsi_handle: VSI handle 586 */ 587 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 588 { 589 struct ice_vsi_ctx *vsi; 590 u8 i; 591 592 vsi = ice_get_vsi_ctx(hw, vsi_handle); 593 if (!vsi) 594 return; 595 ice_for_each_traffic_class(i) { 596 if (vsi->lan_q_ctx[i]) { 597 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 598 vsi->lan_q_ctx[i] = NULL; 599 } 600 if (vsi->rdma_q_ctx[i]) { 601 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 602 vsi->rdma_q_ctx[i] = NULL; 603 } 604 } 605 } 606 607 /** 608 * ice_clear_vsi_ctx - clear the VSI context entry 609 * @hw: pointer to the HW struct 610 * @vsi_handle: VSI handle 611 * 612 * clear the VSI context entry 613 */ 614 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 615 { 616 struct ice_vsi_ctx *vsi; 617 618 vsi = ice_get_vsi_ctx(hw, vsi_handle); 619 if (vsi) { 620 ice_clear_vsi_q_ctx(hw, vsi_handle); 621 devm_kfree(ice_hw_to_dev(hw), vsi); 622 hw->vsi_ctx[vsi_handle] = NULL; 623 } 624 } 625 626 /** 627 * ice_clear_all_vsi_ctx - clear all the VSI context entries 628 * @hw: pointer to the HW struct 629 */ 630 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 631 { 632 u16 i; 633 634 for (i = 0; i < ICE_MAX_VSI; i++) 635 ice_clear_vsi_ctx(hw, i); 636 } 637 638 /** 639 * ice_add_vsi - add VSI context to the hardware and VSI handle list 640 * @hw: pointer to the HW struct 641 * @vsi_handle: unique VSI handle provided by drivers 642 * @vsi_ctx: pointer to a VSI context struct 643 * @cd: pointer to command details structure or NULL 644 * 645 * Add a VSI context to the hardware also add it into the VSI handle list. 646 * If this function gets called after reset for existing VSIs then update 647 * with the new HW VSI number in the corresponding VSI handle list entry. 648 */ 649 enum ice_status 650 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 651 struct ice_sq_cd *cd) 652 { 653 struct ice_vsi_ctx *tmp_vsi_ctx; 654 enum ice_status status; 655 656 if (vsi_handle >= ICE_MAX_VSI) 657 return ICE_ERR_PARAM; 658 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 659 if (status) 660 return status; 661 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 662 if (!tmp_vsi_ctx) { 663 /* Create a new VSI context */ 664 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 665 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 666 if (!tmp_vsi_ctx) { 667 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 668 return ICE_ERR_NO_MEMORY; 669 } 670 *tmp_vsi_ctx = *vsi_ctx; 671 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 672 } else { 673 /* update with new HW VSI num */ 674 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 675 } 676 677 return 0; 678 } 679 680 /** 681 * ice_free_vsi- free VSI context from hardware and VSI handle list 682 * @hw: pointer to the HW struct 683 * @vsi_handle: unique VSI handle 684 * @vsi_ctx: pointer to a VSI context struct 685 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 686 * @cd: pointer to command details structure or NULL 687 * 688 * Free VSI context info from hardware as well as from VSI handle list 689 */ 690 enum ice_status 691 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 692 bool keep_vsi_alloc, struct ice_sq_cd *cd) 693 { 694 enum ice_status status; 695 696 if (!ice_is_vsi_valid(hw, vsi_handle)) 697 return ICE_ERR_PARAM; 698 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 699 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 700 if (!status) 701 ice_clear_vsi_ctx(hw, vsi_handle); 702 return status; 703 } 704 705 /** 706 * ice_update_vsi 707 * @hw: pointer to the HW struct 708 * @vsi_handle: unique VSI handle 709 * @vsi_ctx: pointer to a VSI context struct 710 * @cd: pointer to command details structure or NULL 711 * 712 * Update VSI context in the hardware 713 */ 714 enum ice_status 715 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 716 struct ice_sq_cd *cd) 717 { 718 if (!ice_is_vsi_valid(hw, vsi_handle)) 719 return ICE_ERR_PARAM; 720 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 721 return ice_aq_update_vsi(hw, vsi_ctx, cd); 722 } 723 724 /** 725 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 726 * @hw: pointer to HW struct 727 * @vsi_handle: VSI SW index 728 * @enable: boolean for enable/disable 729 */ 730 int 731 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 732 { 733 struct ice_vsi_ctx *ctx; 734 735 ctx = ice_get_vsi_ctx(hw, vsi_handle); 736 if (!ctx) 737 return -EIO; 738 739 if (enable) 740 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 741 else 742 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 743 744 return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL)); 745 } 746 747 /** 748 * ice_aq_alloc_free_vsi_list 749 * @hw: pointer to the HW struct 750 * @vsi_list_id: VSI list ID returned or used for lookup 751 * @lkup_type: switch rule filter lookup type 752 * @opc: switch rules population command type - pass in the command opcode 753 * 754 * allocates or free a VSI list resource 755 */ 756 static enum ice_status 757 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 758 enum ice_sw_lkup_type lkup_type, 759 enum ice_adminq_opc opc) 760 { 761 struct ice_aqc_alloc_free_res_elem *sw_buf; 762 struct ice_aqc_res_elem *vsi_ele; 763 enum ice_status status; 764 u16 buf_len; 765 766 buf_len = struct_size(sw_buf, elem, 1); 767 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 768 if (!sw_buf) 769 return ICE_ERR_NO_MEMORY; 770 sw_buf->num_elems = cpu_to_le16(1); 771 772 if (lkup_type == ICE_SW_LKUP_MAC || 773 lkup_type == ICE_SW_LKUP_MAC_VLAN || 774 lkup_type == ICE_SW_LKUP_ETHERTYPE || 775 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 776 lkup_type == ICE_SW_LKUP_PROMISC || 777 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 778 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 779 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 780 sw_buf->res_type = 781 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 782 } else { 783 status = ICE_ERR_PARAM; 784 goto ice_aq_alloc_free_vsi_list_exit; 785 } 786 787 if (opc == ice_aqc_opc_free_res) 788 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 789 790 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 791 if (status) 792 goto ice_aq_alloc_free_vsi_list_exit; 793 794 if (opc == ice_aqc_opc_alloc_res) { 795 vsi_ele = &sw_buf->elem[0]; 796 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 797 } 798 799 ice_aq_alloc_free_vsi_list_exit: 800 devm_kfree(ice_hw_to_dev(hw), sw_buf); 801 return status; 802 } 803 804 /** 805 * ice_aq_sw_rules - add/update/remove switch rules 806 * @hw: pointer to the HW struct 807 * @rule_list: pointer to switch rule population list 808 * @rule_list_sz: total size of the rule list in bytes 809 * @num_rules: number of switch rules in the rule_list 810 * @opc: switch rules population command type - pass in the command opcode 811 * @cd: pointer to command details structure or NULL 812 * 813 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 814 */ 815 enum ice_status 816 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 817 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 818 { 819 struct ice_aq_desc desc; 820 enum ice_status status; 821 822 if (opc != ice_aqc_opc_add_sw_rules && 823 opc != ice_aqc_opc_update_sw_rules && 824 opc != ice_aqc_opc_remove_sw_rules) 825 return ICE_ERR_PARAM; 826 827 ice_fill_dflt_direct_cmd_desc(&desc, opc); 828 829 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 830 desc.params.sw_rules.num_rules_fltr_entry_index = 831 cpu_to_le16(num_rules); 832 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 833 if (opc != ice_aqc_opc_add_sw_rules && 834 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 835 status = ICE_ERR_DOES_NOT_EXIST; 836 837 return status; 838 } 839 840 /** 841 * ice_aq_add_recipe - add switch recipe 842 * @hw: pointer to the HW struct 843 * @s_recipe_list: pointer to switch rule population list 844 * @num_recipes: number of switch recipes in the list 845 * @cd: pointer to command details structure or NULL 846 * 847 * Add(0x0290) 848 */ 849 static enum ice_status 850 ice_aq_add_recipe(struct ice_hw *hw, 851 struct ice_aqc_recipe_data_elem *s_recipe_list, 852 u16 num_recipes, struct ice_sq_cd *cd) 853 { 854 struct ice_aqc_add_get_recipe *cmd; 855 struct ice_aq_desc desc; 856 u16 buf_size; 857 858 cmd = &desc.params.add_get_recipe; 859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 860 861 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 862 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 863 864 buf_size = num_recipes * sizeof(*s_recipe_list); 865 866 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 867 } 868 869 /** 870 * ice_aq_get_recipe - get switch recipe 871 * @hw: pointer to the HW struct 872 * @s_recipe_list: pointer to switch rule population list 873 * @num_recipes: pointer to the number of recipes (input and output) 874 * @recipe_root: root recipe number of recipe(s) to retrieve 875 * @cd: pointer to command details structure or NULL 876 * 877 * Get(0x0292) 878 * 879 * On input, *num_recipes should equal the number of entries in s_recipe_list. 880 * On output, *num_recipes will equal the number of entries returned in 881 * s_recipe_list. 882 * 883 * The caller must supply enough space in s_recipe_list to hold all possible 884 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 885 */ 886 static enum ice_status 887 ice_aq_get_recipe(struct ice_hw *hw, 888 struct ice_aqc_recipe_data_elem *s_recipe_list, 889 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 890 { 891 struct ice_aqc_add_get_recipe *cmd; 892 struct ice_aq_desc desc; 893 enum ice_status status; 894 u16 buf_size; 895 896 if (*num_recipes != ICE_MAX_NUM_RECIPES) 897 return ICE_ERR_PARAM; 898 899 cmd = &desc.params.add_get_recipe; 900 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 901 902 cmd->return_index = cpu_to_le16(recipe_root); 903 cmd->num_sub_recipes = 0; 904 905 buf_size = *num_recipes * sizeof(*s_recipe_list); 906 907 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 908 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 909 910 return status; 911 } 912 913 /** 914 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 915 * @hw: pointer to the HW struct 916 * @profile_id: package profile ID to associate the recipe with 917 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 918 * @cd: pointer to command details structure or NULL 919 * Recipe to profile association (0x0291) 920 */ 921 static enum ice_status 922 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 923 struct ice_sq_cd *cd) 924 { 925 struct ice_aqc_recipe_to_profile *cmd; 926 struct ice_aq_desc desc; 927 928 cmd = &desc.params.recipe_to_profile; 929 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 930 cmd->profile_id = cpu_to_le16(profile_id); 931 /* Set the recipe ID bit in the bitmask to let the device know which 932 * profile we are associating the recipe to 933 */ 934 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 935 936 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 937 } 938 939 /** 940 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 941 * @hw: pointer to the HW struct 942 * @profile_id: package profile ID to associate the recipe with 943 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 944 * @cd: pointer to command details structure or NULL 945 * Associate profile ID with given recipe (0x0293) 946 */ 947 static enum ice_status 948 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 949 struct ice_sq_cd *cd) 950 { 951 struct ice_aqc_recipe_to_profile *cmd; 952 struct ice_aq_desc desc; 953 enum ice_status status; 954 955 cmd = &desc.params.recipe_to_profile; 956 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 957 cmd->profile_id = cpu_to_le16(profile_id); 958 959 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 960 if (!status) 961 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 962 963 return status; 964 } 965 966 /** 967 * ice_alloc_recipe - add recipe resource 968 * @hw: pointer to the hardware structure 969 * @rid: recipe ID returned as response to AQ call 970 */ 971 static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 972 { 973 struct ice_aqc_alloc_free_res_elem *sw_buf; 974 enum ice_status status; 975 u16 buf_len; 976 977 buf_len = struct_size(sw_buf, elem, 1); 978 sw_buf = kzalloc(buf_len, GFP_KERNEL); 979 if (!sw_buf) 980 return ICE_ERR_NO_MEMORY; 981 982 sw_buf->num_elems = cpu_to_le16(1); 983 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 984 ICE_AQC_RES_TYPE_S) | 985 ICE_AQC_RES_TYPE_FLAG_SHARED); 986 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 987 ice_aqc_opc_alloc_res, NULL); 988 if (!status) 989 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 990 kfree(sw_buf); 991 992 return status; 993 } 994 995 /** 996 * ice_get_recp_to_prof_map - updates recipe to profile mapping 997 * @hw: pointer to hardware structure 998 * 999 * This function is used to populate recipe_to_profile matrix where index to 1000 * this array is the recipe ID and the element is the mapping of which profiles 1001 * is this recipe mapped to. 1002 */ 1003 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 1004 { 1005 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 1006 u16 i; 1007 1008 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 1009 u16 j; 1010 1011 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 1012 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 1013 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 1014 continue; 1015 bitmap_copy(profile_to_recipe[i], r_bitmap, 1016 ICE_MAX_NUM_RECIPES); 1017 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 1018 set_bit(i, recipe_to_profile[j]); 1019 } 1020 } 1021 1022 /** 1023 * ice_collect_result_idx - copy result index values 1024 * @buf: buffer that contains the result index 1025 * @recp: the recipe struct to copy data into 1026 */ 1027 static void 1028 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 1029 struct ice_sw_recipe *recp) 1030 { 1031 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1032 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1033 recp->res_idxs); 1034 } 1035 1036 /** 1037 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 1038 * @hw: pointer to hardware structure 1039 * @recps: struct that we need to populate 1040 * @rid: recipe ID that we are populating 1041 * @refresh_required: true if we should get recipe to profile mapping from FW 1042 * 1043 * This function is used to populate all the necessary entries into our 1044 * bookkeeping so that we have a current list of all the recipes that are 1045 * programmed in the firmware. 1046 */ 1047 static enum ice_status 1048 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 1049 bool *refresh_required) 1050 { 1051 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 1052 struct ice_aqc_recipe_data_elem *tmp; 1053 u16 num_recps = ICE_MAX_NUM_RECIPES; 1054 struct ice_prot_lkup_ext *lkup_exts; 1055 enum ice_status status; 1056 u8 fv_word_idx = 0; 1057 u16 sub_recps; 1058 1059 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 1060 1061 /* we need a buffer big enough to accommodate all the recipes */ 1062 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 1063 if (!tmp) 1064 return ICE_ERR_NO_MEMORY; 1065 1066 tmp[0].recipe_indx = rid; 1067 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 1068 /* non-zero status meaning recipe doesn't exist */ 1069 if (status) 1070 goto err_unroll; 1071 1072 /* Get recipe to profile map so that we can get the fv from lkups that 1073 * we read for a recipe from FW. Since we want to minimize the number of 1074 * times we make this FW call, just make one call and cache the copy 1075 * until a new recipe is added. This operation is only required the 1076 * first time to get the changes from FW. Then to search existing 1077 * entries we don't need to update the cache again until another recipe 1078 * gets added. 1079 */ 1080 if (*refresh_required) { 1081 ice_get_recp_to_prof_map(hw); 1082 *refresh_required = false; 1083 } 1084 1085 /* Start populating all the entries for recps[rid] based on lkups from 1086 * firmware. Note that we are only creating the root recipe in our 1087 * database. 1088 */ 1089 lkup_exts = &recps[rid].lkup_exts; 1090 1091 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 1092 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 1093 struct ice_recp_grp_entry *rg_entry; 1094 u8 i, prof, idx, prot = 0; 1095 bool is_root; 1096 u16 off = 0; 1097 1098 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 1099 GFP_KERNEL); 1100 if (!rg_entry) { 1101 status = ICE_ERR_NO_MEMORY; 1102 goto err_unroll; 1103 } 1104 1105 idx = root_bufs.recipe_indx; 1106 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 1107 1108 /* Mark all result indices in this chain */ 1109 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1110 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1111 result_bm); 1112 1113 /* get the first profile that is associated with rid */ 1114 prof = find_first_bit(recipe_to_profile[idx], 1115 ICE_MAX_NUM_PROFILES); 1116 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 1117 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 1118 1119 rg_entry->fv_idx[i] = lkup_indx; 1120 rg_entry->fv_mask[i] = 1121 le16_to_cpu(root_bufs.content.mask[i + 1]); 1122 1123 /* If the recipe is a chained recipe then all its 1124 * child recipe's result will have a result index. 1125 * To fill fv_words we should not use those result 1126 * index, we only need the protocol ids and offsets. 1127 * We will skip all the fv_idx which stores result 1128 * index in them. We also need to skip any fv_idx which 1129 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 1130 * valid offset value. 1131 */ 1132 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 1133 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 1134 rg_entry->fv_idx[i] == 0) 1135 continue; 1136 1137 ice_find_prot_off(hw, ICE_BLK_SW, prof, 1138 rg_entry->fv_idx[i], &prot, &off); 1139 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 1140 lkup_exts->fv_words[fv_word_idx].off = off; 1141 lkup_exts->field_mask[fv_word_idx] = 1142 rg_entry->fv_mask[i]; 1143 fv_word_idx++; 1144 } 1145 /* populate rg_list with the data from the child entry of this 1146 * recipe 1147 */ 1148 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 1149 1150 /* Propagate some data to the recipe database */ 1151 recps[idx].is_root = !!is_root; 1152 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1153 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 1154 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 1155 recps[idx].chain_idx = root_bufs.content.result_indx & 1156 ~ICE_AQ_RECIPE_RESULT_EN; 1157 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 1158 } else { 1159 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 1160 } 1161 1162 if (!is_root) 1163 continue; 1164 1165 /* Only do the following for root recipes entries */ 1166 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 1167 sizeof(recps[idx].r_bitmap)); 1168 recps[idx].root_rid = root_bufs.content.rid & 1169 ~ICE_AQ_RECIPE_ID_IS_ROOT; 1170 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1171 } 1172 1173 /* Complete initialization of the root recipe entry */ 1174 lkup_exts->n_val_words = fv_word_idx; 1175 recps[rid].big_recp = (num_recps > 1); 1176 recps[rid].n_grp_count = (u8)num_recps; 1177 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 1178 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 1179 GFP_KERNEL); 1180 if (!recps[rid].root_buf) 1181 goto err_unroll; 1182 1183 /* Copy result indexes */ 1184 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 1185 recps[rid].recp_created = true; 1186 1187 err_unroll: 1188 kfree(tmp); 1189 return status; 1190 } 1191 1192 /* ice_init_port_info - Initialize port_info with switch configuration data 1193 * @pi: pointer to port_info 1194 * @vsi_port_num: VSI number or port number 1195 * @type: Type of switch element (port or VSI) 1196 * @swid: switch ID of the switch the element is attached to 1197 * @pf_vf_num: PF or VF number 1198 * @is_vf: true if the element is a VF, false otherwise 1199 */ 1200 static void 1201 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 1202 u16 swid, u16 pf_vf_num, bool is_vf) 1203 { 1204 switch (type) { 1205 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 1206 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 1207 pi->sw_id = swid; 1208 pi->pf_vf_num = pf_vf_num; 1209 pi->is_vf = is_vf; 1210 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 1211 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 1212 break; 1213 default: 1214 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 1215 break; 1216 } 1217 } 1218 1219 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 1220 * @hw: pointer to the hardware structure 1221 */ 1222 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) 1223 { 1224 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 1225 enum ice_status status; 1226 u16 req_desc = 0; 1227 u16 num_elems; 1228 u16 i; 1229 1230 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 1231 GFP_KERNEL); 1232 1233 if (!rbuf) 1234 return ICE_ERR_NO_MEMORY; 1235 1236 /* Multiple calls to ice_aq_get_sw_cfg may be required 1237 * to get all the switch configuration information. The need 1238 * for additional calls is indicated by ice_aq_get_sw_cfg 1239 * writing a non-zero value in req_desc 1240 */ 1241 do { 1242 struct ice_aqc_get_sw_cfg_resp_elem *ele; 1243 1244 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 1245 &req_desc, &num_elems, NULL); 1246 1247 if (status) 1248 break; 1249 1250 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 1251 u16 pf_vf_num, swid, vsi_port_num; 1252 bool is_vf = false; 1253 u8 res_type; 1254 1255 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 1256 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 1257 1258 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 1259 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 1260 1261 swid = le16_to_cpu(ele->swid); 1262 1263 if (le16_to_cpu(ele->pf_vf_num) & 1264 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 1265 is_vf = true; 1266 1267 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 1268 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 1269 1270 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 1271 /* FW VSI is not needed. Just continue. */ 1272 continue; 1273 } 1274 1275 ice_init_port_info(hw->port_info, vsi_port_num, 1276 res_type, swid, pf_vf_num, is_vf); 1277 } 1278 } while (req_desc && !status); 1279 1280 devm_kfree(ice_hw_to_dev(hw), rbuf); 1281 return status; 1282 } 1283 1284 /** 1285 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 1286 * @hw: pointer to the hardware structure 1287 * @fi: filter info structure to fill/update 1288 * 1289 * This helper function populates the lb_en and lan_en elements of the provided 1290 * ice_fltr_info struct using the switch's type and characteristics of the 1291 * switch rule being configured. 1292 */ 1293 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 1294 { 1295 fi->lb_en = false; 1296 fi->lan_en = false; 1297 if ((fi->flag & ICE_FLTR_TX) && 1298 (fi->fltr_act == ICE_FWD_TO_VSI || 1299 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1300 fi->fltr_act == ICE_FWD_TO_Q || 1301 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1302 /* Setting LB for prune actions will result in replicated 1303 * packets to the internal switch that will be dropped. 1304 */ 1305 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 1306 fi->lb_en = true; 1307 1308 /* Set lan_en to TRUE if 1309 * 1. The switch is a VEB AND 1310 * 2 1311 * 2.1 The lookup is a directional lookup like ethertype, 1312 * promiscuous, ethertype-MAC, promiscuous-VLAN 1313 * and default-port OR 1314 * 2.2 The lookup is VLAN, OR 1315 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 1316 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 1317 * 1318 * OR 1319 * 1320 * The switch is a VEPA. 1321 * 1322 * In all other cases, the LAN enable has to be set to false. 1323 */ 1324 if (hw->evb_veb) { 1325 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 1326 fi->lkup_type == ICE_SW_LKUP_PROMISC || 1327 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1328 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1329 fi->lkup_type == ICE_SW_LKUP_DFLT || 1330 fi->lkup_type == ICE_SW_LKUP_VLAN || 1331 (fi->lkup_type == ICE_SW_LKUP_MAC && 1332 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 1333 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 1334 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 1335 fi->lan_en = true; 1336 } else { 1337 fi->lan_en = true; 1338 } 1339 } 1340 } 1341 1342 /** 1343 * ice_fill_sw_rule - Helper function to fill switch rule structure 1344 * @hw: pointer to the hardware structure 1345 * @f_info: entry containing packet forwarding information 1346 * @s_rule: switch rule structure to be filled in based on mac_entry 1347 * @opc: switch rules population command type - pass in the command opcode 1348 */ 1349 static void 1350 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 1351 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 1352 { 1353 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 1354 void *daddr = NULL; 1355 u16 eth_hdr_sz; 1356 u8 *eth_hdr; 1357 u32 act = 0; 1358 __be16 *off; 1359 u8 q_rgn; 1360 1361 if (opc == ice_aqc_opc_remove_sw_rules) { 1362 s_rule->pdata.lkup_tx_rx.act = 0; 1363 s_rule->pdata.lkup_tx_rx.index = 1364 cpu_to_le16(f_info->fltr_rule_id); 1365 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 1366 return; 1367 } 1368 1369 eth_hdr_sz = sizeof(dummy_eth_header); 1370 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 1371 1372 /* initialize the ether header with a dummy header */ 1373 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 1374 ice_fill_sw_info(hw, f_info); 1375 1376 switch (f_info->fltr_act) { 1377 case ICE_FWD_TO_VSI: 1378 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 1379 ICE_SINGLE_ACT_VSI_ID_M; 1380 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1381 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1382 ICE_SINGLE_ACT_VALID_BIT; 1383 break; 1384 case ICE_FWD_TO_VSI_LIST: 1385 act |= ICE_SINGLE_ACT_VSI_LIST; 1386 act |= (f_info->fwd_id.vsi_list_id << 1387 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 1388 ICE_SINGLE_ACT_VSI_LIST_ID_M; 1389 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1390 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1391 ICE_SINGLE_ACT_VALID_BIT; 1392 break; 1393 case ICE_FWD_TO_Q: 1394 act |= ICE_SINGLE_ACT_TO_Q; 1395 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1396 ICE_SINGLE_ACT_Q_INDEX_M; 1397 break; 1398 case ICE_DROP_PACKET: 1399 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 1400 ICE_SINGLE_ACT_VALID_BIT; 1401 break; 1402 case ICE_FWD_TO_QGRP: 1403 q_rgn = f_info->qgrp_size > 0 ? 1404 (u8)ilog2(f_info->qgrp_size) : 0; 1405 act |= ICE_SINGLE_ACT_TO_Q; 1406 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1407 ICE_SINGLE_ACT_Q_INDEX_M; 1408 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 1409 ICE_SINGLE_ACT_Q_REGION_M; 1410 break; 1411 default: 1412 return; 1413 } 1414 1415 if (f_info->lb_en) 1416 act |= ICE_SINGLE_ACT_LB_ENABLE; 1417 if (f_info->lan_en) 1418 act |= ICE_SINGLE_ACT_LAN_ENABLE; 1419 1420 switch (f_info->lkup_type) { 1421 case ICE_SW_LKUP_MAC: 1422 daddr = f_info->l_data.mac.mac_addr; 1423 break; 1424 case ICE_SW_LKUP_VLAN: 1425 vlan_id = f_info->l_data.vlan.vlan_id; 1426 if (f_info->fltr_act == ICE_FWD_TO_VSI || 1427 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 1428 act |= ICE_SINGLE_ACT_PRUNE; 1429 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 1430 } 1431 break; 1432 case ICE_SW_LKUP_ETHERTYPE_MAC: 1433 daddr = f_info->l_data.ethertype_mac.mac_addr; 1434 fallthrough; 1435 case ICE_SW_LKUP_ETHERTYPE: 1436 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 1437 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 1438 break; 1439 case ICE_SW_LKUP_MAC_VLAN: 1440 daddr = f_info->l_data.mac_vlan.mac_addr; 1441 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1442 break; 1443 case ICE_SW_LKUP_PROMISC_VLAN: 1444 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1445 fallthrough; 1446 case ICE_SW_LKUP_PROMISC: 1447 daddr = f_info->l_data.mac_vlan.mac_addr; 1448 break; 1449 default: 1450 break; 1451 } 1452 1453 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 1454 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 1455 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 1456 1457 /* Recipe set depending on lookup type */ 1458 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 1459 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 1460 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 1461 1462 if (daddr) 1463 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 1464 1465 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 1466 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 1467 *off = cpu_to_be16(vlan_id); 1468 } 1469 1470 /* Create the switch rule with the final dummy Ethernet header */ 1471 if (opc != ice_aqc_opc_update_sw_rules) 1472 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 1473 } 1474 1475 /** 1476 * ice_add_marker_act 1477 * @hw: pointer to the hardware structure 1478 * @m_ent: the management entry for which sw marker needs to be added 1479 * @sw_marker: sw marker to tag the Rx descriptor with 1480 * @l_id: large action resource ID 1481 * 1482 * Create a large action to hold software marker and update the switch rule 1483 * entry pointed by m_ent with newly created large action 1484 */ 1485 static enum ice_status 1486 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 1487 u16 sw_marker, u16 l_id) 1488 { 1489 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 1490 /* For software marker we need 3 large actions 1491 * 1. FWD action: FWD TO VSI or VSI LIST 1492 * 2. GENERIC VALUE action to hold the profile ID 1493 * 3. GENERIC VALUE action to hold the software marker ID 1494 */ 1495 const u16 num_lg_acts = 3; 1496 enum ice_status status; 1497 u16 lg_act_size; 1498 u16 rules_size; 1499 u32 act; 1500 u16 id; 1501 1502 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 1503 return ICE_ERR_PARAM; 1504 1505 /* Create two back-to-back switch rules and submit them to the HW using 1506 * one memory buffer: 1507 * 1. Large Action 1508 * 2. Look up Tx Rx 1509 */ 1510 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 1511 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1512 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 1513 if (!lg_act) 1514 return ICE_ERR_NO_MEMORY; 1515 1516 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 1517 1518 /* Fill in the first switch rule i.e. large action */ 1519 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 1520 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 1521 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 1522 1523 /* First action VSI forwarding or VSI list forwarding depending on how 1524 * many VSIs 1525 */ 1526 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 1527 m_ent->fltr_info.fwd_id.hw_vsi_id; 1528 1529 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 1530 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 1531 if (m_ent->vsi_count > 1) 1532 act |= ICE_LG_ACT_VSI_LIST; 1533 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 1534 1535 /* Second action descriptor type */ 1536 act = ICE_LG_ACT_GENERIC; 1537 1538 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 1539 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 1540 1541 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 1542 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 1543 1544 /* Third action Marker value */ 1545 act |= ICE_LG_ACT_GENERIC; 1546 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 1547 ICE_LG_ACT_GENERIC_VALUE_M; 1548 1549 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 1550 1551 /* call the fill switch rule to fill the lookup Tx Rx structure */ 1552 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 1553 ice_aqc_opc_update_sw_rules); 1554 1555 /* Update the action to point to the large action ID */ 1556 rx_tx->pdata.lkup_tx_rx.act = 1557 cpu_to_le32(ICE_SINGLE_ACT_PTR | 1558 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 1559 ICE_SINGLE_ACT_PTR_VAL_M)); 1560 1561 /* Use the filter rule ID of the previously created rule with single 1562 * act. Once the update happens, hardware will treat this as large 1563 * action 1564 */ 1565 rx_tx->pdata.lkup_tx_rx.index = 1566 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 1567 1568 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 1569 ice_aqc_opc_update_sw_rules, NULL); 1570 if (!status) { 1571 m_ent->lg_act_idx = l_id; 1572 m_ent->sw_marker_id = sw_marker; 1573 } 1574 1575 devm_kfree(ice_hw_to_dev(hw), lg_act); 1576 return status; 1577 } 1578 1579 /** 1580 * ice_create_vsi_list_map 1581 * @hw: pointer to the hardware structure 1582 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 1583 * @num_vsi: number of VSI handles in the array 1584 * @vsi_list_id: VSI list ID generated as part of allocate resource 1585 * 1586 * Helper function to create a new entry of VSI list ID to VSI mapping 1587 * using the given VSI list ID 1588 */ 1589 static struct ice_vsi_list_map_info * 1590 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1591 u16 vsi_list_id) 1592 { 1593 struct ice_switch_info *sw = hw->switch_info; 1594 struct ice_vsi_list_map_info *v_map; 1595 int i; 1596 1597 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 1598 if (!v_map) 1599 return NULL; 1600 1601 v_map->vsi_list_id = vsi_list_id; 1602 v_map->ref_cnt = 1; 1603 for (i = 0; i < num_vsi; i++) 1604 set_bit(vsi_handle_arr[i], v_map->vsi_map); 1605 1606 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 1607 return v_map; 1608 } 1609 1610 /** 1611 * ice_update_vsi_list_rule 1612 * @hw: pointer to the hardware structure 1613 * @vsi_handle_arr: array of VSI handles to form a VSI list 1614 * @num_vsi: number of VSI handles in the array 1615 * @vsi_list_id: VSI list ID generated as part of allocate resource 1616 * @remove: Boolean value to indicate if this is a remove action 1617 * @opc: switch rules population command type - pass in the command opcode 1618 * @lkup_type: lookup type of the filter 1619 * 1620 * Call AQ command to add a new switch rule or update existing switch rule 1621 * using the given VSI list ID 1622 */ 1623 static enum ice_status 1624 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1625 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 1626 enum ice_sw_lkup_type lkup_type) 1627 { 1628 struct ice_aqc_sw_rules_elem *s_rule; 1629 enum ice_status status; 1630 u16 s_rule_size; 1631 u16 rule_type; 1632 int i; 1633 1634 if (!num_vsi) 1635 return ICE_ERR_PARAM; 1636 1637 if (lkup_type == ICE_SW_LKUP_MAC || 1638 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1639 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1640 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1641 lkup_type == ICE_SW_LKUP_PROMISC || 1642 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 1643 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 1644 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 1645 else if (lkup_type == ICE_SW_LKUP_VLAN) 1646 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 1647 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 1648 else 1649 return ICE_ERR_PARAM; 1650 1651 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 1652 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1653 if (!s_rule) 1654 return ICE_ERR_NO_MEMORY; 1655 for (i = 0; i < num_vsi; i++) { 1656 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 1657 status = ICE_ERR_PARAM; 1658 goto exit; 1659 } 1660 /* AQ call requires hw_vsi_id(s) */ 1661 s_rule->pdata.vsi_list.vsi[i] = 1662 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 1663 } 1664 1665 s_rule->type = cpu_to_le16(rule_type); 1666 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 1667 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1668 1669 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1670 1671 exit: 1672 devm_kfree(ice_hw_to_dev(hw), s_rule); 1673 return status; 1674 } 1675 1676 /** 1677 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1678 * @hw: pointer to the HW struct 1679 * @vsi_handle_arr: array of VSI handles to form a VSI list 1680 * @num_vsi: number of VSI handles in the array 1681 * @vsi_list_id: stores the ID of the VSI list to be created 1682 * @lkup_type: switch rule filter's lookup type 1683 */ 1684 static enum ice_status 1685 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1686 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1687 { 1688 enum ice_status status; 1689 1690 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1691 ice_aqc_opc_alloc_res); 1692 if (status) 1693 return status; 1694 1695 /* Update the newly created VSI list to include the specified VSIs */ 1696 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1697 *vsi_list_id, false, 1698 ice_aqc_opc_add_sw_rules, lkup_type); 1699 } 1700 1701 /** 1702 * ice_create_pkt_fwd_rule 1703 * @hw: pointer to the hardware structure 1704 * @f_entry: entry containing packet forwarding information 1705 * 1706 * Create switch rule with given filter information and add an entry 1707 * to the corresponding filter management list to track this switch rule 1708 * and VSI mapping 1709 */ 1710 static enum ice_status 1711 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1712 struct ice_fltr_list_entry *f_entry) 1713 { 1714 struct ice_fltr_mgmt_list_entry *fm_entry; 1715 struct ice_aqc_sw_rules_elem *s_rule; 1716 enum ice_sw_lkup_type l_type; 1717 struct ice_sw_recipe *recp; 1718 enum ice_status status; 1719 1720 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1721 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1722 if (!s_rule) 1723 return ICE_ERR_NO_MEMORY; 1724 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1725 GFP_KERNEL); 1726 if (!fm_entry) { 1727 status = ICE_ERR_NO_MEMORY; 1728 goto ice_create_pkt_fwd_rule_exit; 1729 } 1730 1731 fm_entry->fltr_info = f_entry->fltr_info; 1732 1733 /* Initialize all the fields for the management entry */ 1734 fm_entry->vsi_count = 1; 1735 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1736 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1737 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1738 1739 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1740 ice_aqc_opc_add_sw_rules); 1741 1742 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1743 ice_aqc_opc_add_sw_rules, NULL); 1744 if (status) { 1745 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1746 goto ice_create_pkt_fwd_rule_exit; 1747 } 1748 1749 f_entry->fltr_info.fltr_rule_id = 1750 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1751 fm_entry->fltr_info.fltr_rule_id = 1752 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1753 1754 /* The book keeping entries will get removed when base driver 1755 * calls remove filter AQ command 1756 */ 1757 l_type = fm_entry->fltr_info.lkup_type; 1758 recp = &hw->switch_info->recp_list[l_type]; 1759 list_add(&fm_entry->list_entry, &recp->filt_rules); 1760 1761 ice_create_pkt_fwd_rule_exit: 1762 devm_kfree(ice_hw_to_dev(hw), s_rule); 1763 return status; 1764 } 1765 1766 /** 1767 * ice_update_pkt_fwd_rule 1768 * @hw: pointer to the hardware structure 1769 * @f_info: filter information for switch rule 1770 * 1771 * Call AQ command to update a previously created switch rule with a 1772 * VSI list ID 1773 */ 1774 static enum ice_status 1775 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1776 { 1777 struct ice_aqc_sw_rules_elem *s_rule; 1778 enum ice_status status; 1779 1780 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1781 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1782 if (!s_rule) 1783 return ICE_ERR_NO_MEMORY; 1784 1785 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1786 1787 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1788 1789 /* Update switch rule with new rule set to forward VSI list */ 1790 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1791 ice_aqc_opc_update_sw_rules, NULL); 1792 1793 devm_kfree(ice_hw_to_dev(hw), s_rule); 1794 return status; 1795 } 1796 1797 /** 1798 * ice_update_sw_rule_bridge_mode 1799 * @hw: pointer to the HW struct 1800 * 1801 * Updates unicast switch filter rules based on VEB/VEPA mode 1802 */ 1803 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1804 { 1805 struct ice_switch_info *sw = hw->switch_info; 1806 struct ice_fltr_mgmt_list_entry *fm_entry; 1807 enum ice_status status = 0; 1808 struct list_head *rule_head; 1809 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1810 1811 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 1812 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 1813 1814 mutex_lock(rule_lock); 1815 list_for_each_entry(fm_entry, rule_head, list_entry) { 1816 struct ice_fltr_info *fi = &fm_entry->fltr_info; 1817 u8 *addr = fi->l_data.mac.mac_addr; 1818 1819 /* Update unicast Tx rules to reflect the selected 1820 * VEB/VEPA mode 1821 */ 1822 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 1823 (fi->fltr_act == ICE_FWD_TO_VSI || 1824 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1825 fi->fltr_act == ICE_FWD_TO_Q || 1826 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1827 status = ice_update_pkt_fwd_rule(hw, fi); 1828 if (status) 1829 break; 1830 } 1831 } 1832 1833 mutex_unlock(rule_lock); 1834 1835 return status; 1836 } 1837 1838 /** 1839 * ice_add_update_vsi_list 1840 * @hw: pointer to the hardware structure 1841 * @m_entry: pointer to current filter management list entry 1842 * @cur_fltr: filter information from the book keeping entry 1843 * @new_fltr: filter information with the new VSI to be added 1844 * 1845 * Call AQ command to add or update previously created VSI list with new VSI. 1846 * 1847 * Helper function to do book keeping associated with adding filter information 1848 * The algorithm to do the book keeping is described below : 1849 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 1850 * if only one VSI has been added till now 1851 * Allocate a new VSI list and add two VSIs 1852 * to this list using switch rule command 1853 * Update the previously created switch rule with the 1854 * newly created VSI list ID 1855 * if a VSI list was previously created 1856 * Add the new VSI to the previously created VSI list set 1857 * using the update switch rule command 1858 */ 1859 static enum ice_status 1860 ice_add_update_vsi_list(struct ice_hw *hw, 1861 struct ice_fltr_mgmt_list_entry *m_entry, 1862 struct ice_fltr_info *cur_fltr, 1863 struct ice_fltr_info *new_fltr) 1864 { 1865 enum ice_status status = 0; 1866 u16 vsi_list_id = 0; 1867 1868 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 1869 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 1870 return ICE_ERR_NOT_IMPL; 1871 1872 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 1873 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 1874 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 1875 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 1876 return ICE_ERR_NOT_IMPL; 1877 1878 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 1879 /* Only one entry existed in the mapping and it was not already 1880 * a part of a VSI list. So, create a VSI list with the old and 1881 * new VSIs. 1882 */ 1883 struct ice_fltr_info tmp_fltr; 1884 u16 vsi_handle_arr[2]; 1885 1886 /* A rule already exists with the new VSI being added */ 1887 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 1888 return ICE_ERR_ALREADY_EXISTS; 1889 1890 vsi_handle_arr[0] = cur_fltr->vsi_handle; 1891 vsi_handle_arr[1] = new_fltr->vsi_handle; 1892 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 1893 &vsi_list_id, 1894 new_fltr->lkup_type); 1895 if (status) 1896 return status; 1897 1898 tmp_fltr = *new_fltr; 1899 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 1900 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 1901 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 1902 /* Update the previous switch rule of "MAC forward to VSI" to 1903 * "MAC fwd to VSI list" 1904 */ 1905 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 1906 if (status) 1907 return status; 1908 1909 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 1910 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 1911 m_entry->vsi_list_info = 1912 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 1913 vsi_list_id); 1914 1915 if (!m_entry->vsi_list_info) 1916 return ICE_ERR_NO_MEMORY; 1917 1918 /* If this entry was large action then the large action needs 1919 * to be updated to point to FWD to VSI list 1920 */ 1921 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 1922 status = 1923 ice_add_marker_act(hw, m_entry, 1924 m_entry->sw_marker_id, 1925 m_entry->lg_act_idx); 1926 } else { 1927 u16 vsi_handle = new_fltr->vsi_handle; 1928 enum ice_adminq_opc opcode; 1929 1930 if (!m_entry->vsi_list_info) 1931 return ICE_ERR_CFG; 1932 1933 /* A rule already exists with the new VSI being added */ 1934 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 1935 return 0; 1936 1937 /* Update the previously created VSI list set with 1938 * the new VSI ID passed in 1939 */ 1940 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 1941 opcode = ice_aqc_opc_update_sw_rules; 1942 1943 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 1944 vsi_list_id, false, opcode, 1945 new_fltr->lkup_type); 1946 /* update VSI list mapping info with new VSI ID */ 1947 if (!status) 1948 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 1949 } 1950 if (!status) 1951 m_entry->vsi_count++; 1952 return status; 1953 } 1954 1955 /** 1956 * ice_find_rule_entry - Search a rule entry 1957 * @hw: pointer to the hardware structure 1958 * @recp_id: lookup type for which the specified rule needs to be searched 1959 * @f_info: rule information 1960 * 1961 * Helper function to search for a given rule entry 1962 * Returns pointer to entry storing the rule if found 1963 */ 1964 static struct ice_fltr_mgmt_list_entry * 1965 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 1966 { 1967 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 1968 struct ice_switch_info *sw = hw->switch_info; 1969 struct list_head *list_head; 1970 1971 list_head = &sw->recp_list[recp_id].filt_rules; 1972 list_for_each_entry(list_itr, list_head, list_entry) { 1973 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 1974 sizeof(f_info->l_data)) && 1975 f_info->flag == list_itr->fltr_info.flag) { 1976 ret = list_itr; 1977 break; 1978 } 1979 } 1980 return ret; 1981 } 1982 1983 /** 1984 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 1985 * @hw: pointer to the hardware structure 1986 * @recp_id: lookup type for which VSI lists needs to be searched 1987 * @vsi_handle: VSI handle to be found in VSI list 1988 * @vsi_list_id: VSI list ID found containing vsi_handle 1989 * 1990 * Helper function to search a VSI list with single entry containing given VSI 1991 * handle element. This can be extended further to search VSI list with more 1992 * than 1 vsi_count. Returns pointer to VSI list entry if found. 1993 */ 1994 static struct ice_vsi_list_map_info * 1995 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 1996 u16 *vsi_list_id) 1997 { 1998 struct ice_vsi_list_map_info *map_info = NULL; 1999 struct ice_switch_info *sw = hw->switch_info; 2000 struct ice_fltr_mgmt_list_entry *list_itr; 2001 struct list_head *list_head; 2002 2003 list_head = &sw->recp_list[recp_id].filt_rules; 2004 list_for_each_entry(list_itr, list_head, list_entry) { 2005 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 2006 map_info = list_itr->vsi_list_info; 2007 if (test_bit(vsi_handle, map_info->vsi_map)) { 2008 *vsi_list_id = map_info->vsi_list_id; 2009 return map_info; 2010 } 2011 } 2012 } 2013 return NULL; 2014 } 2015 2016 /** 2017 * ice_add_rule_internal - add rule for a given lookup type 2018 * @hw: pointer to the hardware structure 2019 * @recp_id: lookup type (recipe ID) for which rule has to be added 2020 * @f_entry: structure containing MAC forwarding information 2021 * 2022 * Adds or updates the rule lists for a given recipe 2023 */ 2024 static enum ice_status 2025 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 2026 struct ice_fltr_list_entry *f_entry) 2027 { 2028 struct ice_switch_info *sw = hw->switch_info; 2029 struct ice_fltr_info *new_fltr, *cur_fltr; 2030 struct ice_fltr_mgmt_list_entry *m_entry; 2031 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2032 enum ice_status status = 0; 2033 2034 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2035 return ICE_ERR_PARAM; 2036 f_entry->fltr_info.fwd_id.hw_vsi_id = 2037 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2038 2039 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2040 2041 mutex_lock(rule_lock); 2042 new_fltr = &f_entry->fltr_info; 2043 if (new_fltr->flag & ICE_FLTR_RX) 2044 new_fltr->src = hw->port_info->lport; 2045 else if (new_fltr->flag & ICE_FLTR_TX) 2046 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 2047 2048 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 2049 if (!m_entry) { 2050 mutex_unlock(rule_lock); 2051 return ice_create_pkt_fwd_rule(hw, f_entry); 2052 } 2053 2054 cur_fltr = &m_entry->fltr_info; 2055 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 2056 mutex_unlock(rule_lock); 2057 2058 return status; 2059 } 2060 2061 /** 2062 * ice_remove_vsi_list_rule 2063 * @hw: pointer to the hardware structure 2064 * @vsi_list_id: VSI list ID generated as part of allocate resource 2065 * @lkup_type: switch rule filter lookup type 2066 * 2067 * The VSI list should be emptied before this function is called to remove the 2068 * VSI list. 2069 */ 2070 static enum ice_status 2071 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 2072 enum ice_sw_lkup_type lkup_type) 2073 { 2074 struct ice_aqc_sw_rules_elem *s_rule; 2075 enum ice_status status; 2076 u16 s_rule_size; 2077 2078 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 2079 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2080 if (!s_rule) 2081 return ICE_ERR_NO_MEMORY; 2082 2083 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 2084 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 2085 2086 /* Free the vsi_list resource that we allocated. It is assumed that the 2087 * list is empty at this point. 2088 */ 2089 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 2090 ice_aqc_opc_free_res); 2091 2092 devm_kfree(ice_hw_to_dev(hw), s_rule); 2093 return status; 2094 } 2095 2096 /** 2097 * ice_rem_update_vsi_list 2098 * @hw: pointer to the hardware structure 2099 * @vsi_handle: VSI handle of the VSI to remove 2100 * @fm_list: filter management entry for which the VSI list management needs to 2101 * be done 2102 */ 2103 static enum ice_status 2104 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 2105 struct ice_fltr_mgmt_list_entry *fm_list) 2106 { 2107 enum ice_sw_lkup_type lkup_type; 2108 enum ice_status status = 0; 2109 u16 vsi_list_id; 2110 2111 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 2112 fm_list->vsi_count == 0) 2113 return ICE_ERR_PARAM; 2114 2115 /* A rule with the VSI being removed does not exist */ 2116 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 2117 return ICE_ERR_DOES_NOT_EXIST; 2118 2119 lkup_type = fm_list->fltr_info.lkup_type; 2120 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 2121 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 2122 ice_aqc_opc_update_sw_rules, 2123 lkup_type); 2124 if (status) 2125 return status; 2126 2127 fm_list->vsi_count--; 2128 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 2129 2130 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 2131 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 2132 struct ice_vsi_list_map_info *vsi_list_info = 2133 fm_list->vsi_list_info; 2134 u16 rem_vsi_handle; 2135 2136 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 2137 ICE_MAX_VSI); 2138 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 2139 return ICE_ERR_OUT_OF_RANGE; 2140 2141 /* Make sure VSI list is empty before removing it below */ 2142 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 2143 vsi_list_id, true, 2144 ice_aqc_opc_update_sw_rules, 2145 lkup_type); 2146 if (status) 2147 return status; 2148 2149 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 2150 tmp_fltr_info.fwd_id.hw_vsi_id = 2151 ice_get_hw_vsi_num(hw, rem_vsi_handle); 2152 tmp_fltr_info.vsi_handle = rem_vsi_handle; 2153 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 2154 if (status) { 2155 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 2156 tmp_fltr_info.fwd_id.hw_vsi_id, status); 2157 return status; 2158 } 2159 2160 fm_list->fltr_info = tmp_fltr_info; 2161 } 2162 2163 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 2164 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 2165 struct ice_vsi_list_map_info *vsi_list_info = 2166 fm_list->vsi_list_info; 2167 2168 /* Remove the VSI list since it is no longer used */ 2169 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 2170 if (status) { 2171 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 2172 vsi_list_id, status); 2173 return status; 2174 } 2175 2176 list_del(&vsi_list_info->list_entry); 2177 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 2178 fm_list->vsi_list_info = NULL; 2179 } 2180 2181 return status; 2182 } 2183 2184 /** 2185 * ice_remove_rule_internal - Remove a filter rule of a given type 2186 * @hw: pointer to the hardware structure 2187 * @recp_id: recipe ID for which the rule needs to removed 2188 * @f_entry: rule entry containing filter information 2189 */ 2190 static enum ice_status 2191 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 2192 struct ice_fltr_list_entry *f_entry) 2193 { 2194 struct ice_switch_info *sw = hw->switch_info; 2195 struct ice_fltr_mgmt_list_entry *list_elem; 2196 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2197 enum ice_status status = 0; 2198 bool remove_rule = false; 2199 u16 vsi_handle; 2200 2201 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2202 return ICE_ERR_PARAM; 2203 f_entry->fltr_info.fwd_id.hw_vsi_id = 2204 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2205 2206 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2207 mutex_lock(rule_lock); 2208 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 2209 if (!list_elem) { 2210 status = ICE_ERR_DOES_NOT_EXIST; 2211 goto exit; 2212 } 2213 2214 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 2215 remove_rule = true; 2216 } else if (!list_elem->vsi_list_info) { 2217 status = ICE_ERR_DOES_NOT_EXIST; 2218 goto exit; 2219 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 2220 /* a ref_cnt > 1 indicates that the vsi_list is being 2221 * shared by multiple rules. Decrement the ref_cnt and 2222 * remove this rule, but do not modify the list, as it 2223 * is in-use by other rules. 2224 */ 2225 list_elem->vsi_list_info->ref_cnt--; 2226 remove_rule = true; 2227 } else { 2228 /* a ref_cnt of 1 indicates the vsi_list is only used 2229 * by one rule. However, the original removal request is only 2230 * for a single VSI. Update the vsi_list first, and only 2231 * remove the rule if there are no further VSIs in this list. 2232 */ 2233 vsi_handle = f_entry->fltr_info.vsi_handle; 2234 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 2235 if (status) 2236 goto exit; 2237 /* if VSI count goes to zero after updating the VSI list */ 2238 if (list_elem->vsi_count == 0) 2239 remove_rule = true; 2240 } 2241 2242 if (remove_rule) { 2243 /* Remove the lookup rule */ 2244 struct ice_aqc_sw_rules_elem *s_rule; 2245 2246 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2247 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 2248 GFP_KERNEL); 2249 if (!s_rule) { 2250 status = ICE_ERR_NO_MEMORY; 2251 goto exit; 2252 } 2253 2254 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 2255 ice_aqc_opc_remove_sw_rules); 2256 2257 status = ice_aq_sw_rules(hw, s_rule, 2258 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 2259 ice_aqc_opc_remove_sw_rules, NULL); 2260 2261 /* Remove a book keeping from the list */ 2262 devm_kfree(ice_hw_to_dev(hw), s_rule); 2263 2264 if (status) 2265 goto exit; 2266 2267 list_del(&list_elem->list_entry); 2268 devm_kfree(ice_hw_to_dev(hw), list_elem); 2269 } 2270 exit: 2271 mutex_unlock(rule_lock); 2272 return status; 2273 } 2274 2275 /** 2276 * ice_mac_fltr_exist - does this MAC filter exist for given VSI 2277 * @hw: pointer to the hardware structure 2278 * @mac: MAC address to be checked (for MAC filter) 2279 * @vsi_handle: check MAC filter for this VSI 2280 */ 2281 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 2282 { 2283 struct ice_fltr_mgmt_list_entry *entry; 2284 struct list_head *rule_head; 2285 struct ice_switch_info *sw; 2286 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2287 u16 hw_vsi_id; 2288 2289 if (!ice_is_vsi_valid(hw, vsi_handle)) 2290 return false; 2291 2292 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2293 sw = hw->switch_info; 2294 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2295 if (!rule_head) 2296 return false; 2297 2298 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2299 mutex_lock(rule_lock); 2300 list_for_each_entry(entry, rule_head, list_entry) { 2301 struct ice_fltr_info *f_info = &entry->fltr_info; 2302 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2303 2304 if (is_zero_ether_addr(mac_addr)) 2305 continue; 2306 2307 if (f_info->flag != ICE_FLTR_TX || 2308 f_info->src_id != ICE_SRC_ID_VSI || 2309 f_info->lkup_type != ICE_SW_LKUP_MAC || 2310 f_info->fltr_act != ICE_FWD_TO_VSI || 2311 hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2312 continue; 2313 2314 if (ether_addr_equal(mac, mac_addr)) { 2315 mutex_unlock(rule_lock); 2316 return true; 2317 } 2318 } 2319 mutex_unlock(rule_lock); 2320 return false; 2321 } 2322 2323 /** 2324 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 2325 * @hw: pointer to the hardware structure 2326 * @vlan_id: VLAN ID 2327 * @vsi_handle: check MAC filter for this VSI 2328 */ 2329 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) 2330 { 2331 struct ice_fltr_mgmt_list_entry *entry; 2332 struct list_head *rule_head; 2333 struct ice_switch_info *sw; 2334 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2335 u16 hw_vsi_id; 2336 2337 if (vlan_id > ICE_MAX_VLAN_ID) 2338 return false; 2339 2340 if (!ice_is_vsi_valid(hw, vsi_handle)) 2341 return false; 2342 2343 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2344 sw = hw->switch_info; 2345 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2346 if (!rule_head) 2347 return false; 2348 2349 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2350 mutex_lock(rule_lock); 2351 list_for_each_entry(entry, rule_head, list_entry) { 2352 struct ice_fltr_info *f_info = &entry->fltr_info; 2353 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; 2354 struct ice_vsi_list_map_info *map_info; 2355 2356 if (entry_vlan_id > ICE_MAX_VLAN_ID) 2357 continue; 2358 2359 if (f_info->flag != ICE_FLTR_TX || 2360 f_info->src_id != ICE_SRC_ID_VSI || 2361 f_info->lkup_type != ICE_SW_LKUP_VLAN) 2362 continue; 2363 2364 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ 2365 if (f_info->fltr_act != ICE_FWD_TO_VSI && 2366 f_info->fltr_act != ICE_FWD_TO_VSI_LIST) 2367 continue; 2368 2369 if (f_info->fltr_act == ICE_FWD_TO_VSI) { 2370 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2371 continue; 2372 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 2373 /* If filter_action is FWD_TO_VSI_LIST, make sure 2374 * that VSI being checked is part of VSI list 2375 */ 2376 if (entry->vsi_count == 1 && 2377 entry->vsi_list_info) { 2378 map_info = entry->vsi_list_info; 2379 if (!test_bit(vsi_handle, map_info->vsi_map)) 2380 continue; 2381 } 2382 } 2383 2384 if (vlan_id == entry_vlan_id) { 2385 mutex_unlock(rule_lock); 2386 return true; 2387 } 2388 } 2389 mutex_unlock(rule_lock); 2390 2391 return false; 2392 } 2393 2394 /** 2395 * ice_add_mac - Add a MAC address based filter rule 2396 * @hw: pointer to the hardware structure 2397 * @m_list: list of MAC addresses and forwarding information 2398 * 2399 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 2400 * multiple unicast addresses, the function assumes that all the 2401 * addresses are unique in a given add_mac call. It doesn't 2402 * check for duplicates in this case, removing duplicates from a given 2403 * list should be taken care of in the caller of this function. 2404 */ 2405 enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 2406 { 2407 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 2408 struct ice_fltr_list_entry *m_list_itr; 2409 struct list_head *rule_head; 2410 u16 total_elem_left, s_rule_size; 2411 struct ice_switch_info *sw; 2412 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2413 enum ice_status status = 0; 2414 u16 num_unicast = 0; 2415 u8 elem_sent; 2416 2417 if (!m_list || !hw) 2418 return ICE_ERR_PARAM; 2419 2420 s_rule = NULL; 2421 sw = hw->switch_info; 2422 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2423 list_for_each_entry(m_list_itr, m_list, list_entry) { 2424 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 2425 u16 vsi_handle; 2426 u16 hw_vsi_id; 2427 2428 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 2429 vsi_handle = m_list_itr->fltr_info.vsi_handle; 2430 if (!ice_is_vsi_valid(hw, vsi_handle)) 2431 return ICE_ERR_PARAM; 2432 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2433 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 2434 /* update the src in case it is VSI num */ 2435 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 2436 return ICE_ERR_PARAM; 2437 m_list_itr->fltr_info.src = hw_vsi_id; 2438 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 2439 is_zero_ether_addr(add)) 2440 return ICE_ERR_PARAM; 2441 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2442 /* Don't overwrite the unicast address */ 2443 mutex_lock(rule_lock); 2444 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 2445 &m_list_itr->fltr_info)) { 2446 mutex_unlock(rule_lock); 2447 return ICE_ERR_ALREADY_EXISTS; 2448 } 2449 mutex_unlock(rule_lock); 2450 num_unicast++; 2451 } else if (is_multicast_ether_addr(add) || 2452 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 2453 m_list_itr->status = 2454 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 2455 m_list_itr); 2456 if (m_list_itr->status) 2457 return m_list_itr->status; 2458 } 2459 } 2460 2461 mutex_lock(rule_lock); 2462 /* Exit if no suitable entries were found for adding bulk switch rule */ 2463 if (!num_unicast) { 2464 status = 0; 2465 goto ice_add_mac_exit; 2466 } 2467 2468 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2469 2470 /* Allocate switch rule buffer for the bulk update for unicast */ 2471 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 2472 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 2473 GFP_KERNEL); 2474 if (!s_rule) { 2475 status = ICE_ERR_NO_MEMORY; 2476 goto ice_add_mac_exit; 2477 } 2478 2479 r_iter = s_rule; 2480 list_for_each_entry(m_list_itr, m_list, list_entry) { 2481 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2482 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2483 2484 if (is_unicast_ether_addr(mac_addr)) { 2485 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 2486 ice_aqc_opc_add_sw_rules); 2487 r_iter = (struct ice_aqc_sw_rules_elem *) 2488 ((u8 *)r_iter + s_rule_size); 2489 } 2490 } 2491 2492 /* Call AQ bulk switch rule update for all unicast addresses */ 2493 r_iter = s_rule; 2494 /* Call AQ switch rule in AQ_MAX chunk */ 2495 for (total_elem_left = num_unicast; total_elem_left > 0; 2496 total_elem_left -= elem_sent) { 2497 struct ice_aqc_sw_rules_elem *entry = r_iter; 2498 2499 elem_sent = min_t(u8, total_elem_left, 2500 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 2501 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 2502 elem_sent, ice_aqc_opc_add_sw_rules, 2503 NULL); 2504 if (status) 2505 goto ice_add_mac_exit; 2506 r_iter = (struct ice_aqc_sw_rules_elem *) 2507 ((u8 *)r_iter + (elem_sent * s_rule_size)); 2508 } 2509 2510 /* Fill up rule ID based on the value returned from FW */ 2511 r_iter = s_rule; 2512 list_for_each_entry(m_list_itr, m_list, list_entry) { 2513 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2514 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2515 struct ice_fltr_mgmt_list_entry *fm_entry; 2516 2517 if (is_unicast_ether_addr(mac_addr)) { 2518 f_info->fltr_rule_id = 2519 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 2520 f_info->fltr_act = ICE_FWD_TO_VSI; 2521 /* Create an entry to track this MAC address */ 2522 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 2523 sizeof(*fm_entry), GFP_KERNEL); 2524 if (!fm_entry) { 2525 status = ICE_ERR_NO_MEMORY; 2526 goto ice_add_mac_exit; 2527 } 2528 fm_entry->fltr_info = *f_info; 2529 fm_entry->vsi_count = 1; 2530 /* The book keeping entries will get removed when 2531 * base driver calls remove filter AQ command 2532 */ 2533 2534 list_add(&fm_entry->list_entry, rule_head); 2535 r_iter = (struct ice_aqc_sw_rules_elem *) 2536 ((u8 *)r_iter + s_rule_size); 2537 } 2538 } 2539 2540 ice_add_mac_exit: 2541 mutex_unlock(rule_lock); 2542 if (s_rule) 2543 devm_kfree(ice_hw_to_dev(hw), s_rule); 2544 return status; 2545 } 2546 2547 /** 2548 * ice_add_vlan_internal - Add one VLAN based filter rule 2549 * @hw: pointer to the hardware structure 2550 * @f_entry: filter entry containing one VLAN information 2551 */ 2552 static enum ice_status 2553 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 2554 { 2555 struct ice_switch_info *sw = hw->switch_info; 2556 struct ice_fltr_mgmt_list_entry *v_list_itr; 2557 struct ice_fltr_info *new_fltr, *cur_fltr; 2558 enum ice_sw_lkup_type lkup_type; 2559 u16 vsi_list_id = 0, vsi_handle; 2560 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2561 enum ice_status status = 0; 2562 2563 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2564 return ICE_ERR_PARAM; 2565 2566 f_entry->fltr_info.fwd_id.hw_vsi_id = 2567 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2568 new_fltr = &f_entry->fltr_info; 2569 2570 /* VLAN ID should only be 12 bits */ 2571 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 2572 return ICE_ERR_PARAM; 2573 2574 if (new_fltr->src_id != ICE_SRC_ID_VSI) 2575 return ICE_ERR_PARAM; 2576 2577 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 2578 lkup_type = new_fltr->lkup_type; 2579 vsi_handle = new_fltr->vsi_handle; 2580 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2581 mutex_lock(rule_lock); 2582 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 2583 if (!v_list_itr) { 2584 struct ice_vsi_list_map_info *map_info = NULL; 2585 2586 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 2587 /* All VLAN pruning rules use a VSI list. Check if 2588 * there is already a VSI list containing VSI that we 2589 * want to add. If found, use the same vsi_list_id for 2590 * this new VLAN rule or else create a new list. 2591 */ 2592 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 2593 vsi_handle, 2594 &vsi_list_id); 2595 if (!map_info) { 2596 status = ice_create_vsi_list_rule(hw, 2597 &vsi_handle, 2598 1, 2599 &vsi_list_id, 2600 lkup_type); 2601 if (status) 2602 goto exit; 2603 } 2604 /* Convert the action to forwarding to a VSI list. */ 2605 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2606 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 2607 } 2608 2609 status = ice_create_pkt_fwd_rule(hw, f_entry); 2610 if (!status) { 2611 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 2612 new_fltr); 2613 if (!v_list_itr) { 2614 status = ICE_ERR_DOES_NOT_EXIST; 2615 goto exit; 2616 } 2617 /* reuse VSI list for new rule and increment ref_cnt */ 2618 if (map_info) { 2619 v_list_itr->vsi_list_info = map_info; 2620 map_info->ref_cnt++; 2621 } else { 2622 v_list_itr->vsi_list_info = 2623 ice_create_vsi_list_map(hw, &vsi_handle, 2624 1, vsi_list_id); 2625 } 2626 } 2627 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 2628 /* Update existing VSI list to add new VSI ID only if it used 2629 * by one VLAN rule. 2630 */ 2631 cur_fltr = &v_list_itr->fltr_info; 2632 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 2633 new_fltr); 2634 } else { 2635 /* If VLAN rule exists and VSI list being used by this rule is 2636 * referenced by more than 1 VLAN rule. Then create a new VSI 2637 * list appending previous VSI with new VSI and update existing 2638 * VLAN rule to point to new VSI list ID 2639 */ 2640 struct ice_fltr_info tmp_fltr; 2641 u16 vsi_handle_arr[2]; 2642 u16 cur_handle; 2643 2644 /* Current implementation only supports reusing VSI list with 2645 * one VSI count. We should never hit below condition 2646 */ 2647 if (v_list_itr->vsi_count > 1 && 2648 v_list_itr->vsi_list_info->ref_cnt > 1) { 2649 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 2650 status = ICE_ERR_CFG; 2651 goto exit; 2652 } 2653 2654 cur_handle = 2655 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 2656 ICE_MAX_VSI); 2657 2658 /* A rule already exists with the new VSI being added */ 2659 if (cur_handle == vsi_handle) { 2660 status = ICE_ERR_ALREADY_EXISTS; 2661 goto exit; 2662 } 2663 2664 vsi_handle_arr[0] = cur_handle; 2665 vsi_handle_arr[1] = vsi_handle; 2666 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2667 &vsi_list_id, lkup_type); 2668 if (status) 2669 goto exit; 2670 2671 tmp_fltr = v_list_itr->fltr_info; 2672 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 2673 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2674 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2675 /* Update the previous switch rule to a new VSI list which 2676 * includes current VSI that is requested 2677 */ 2678 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2679 if (status) 2680 goto exit; 2681 2682 /* before overriding VSI list map info. decrement ref_cnt of 2683 * previous VSI list 2684 */ 2685 v_list_itr->vsi_list_info->ref_cnt--; 2686 2687 /* now update to newly created list */ 2688 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 2689 v_list_itr->vsi_list_info = 2690 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2691 vsi_list_id); 2692 v_list_itr->vsi_count++; 2693 } 2694 2695 exit: 2696 mutex_unlock(rule_lock); 2697 return status; 2698 } 2699 2700 /** 2701 * ice_add_vlan - Add VLAN based filter rule 2702 * @hw: pointer to the hardware structure 2703 * @v_list: list of VLAN entries and forwarding information 2704 */ 2705 enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 2706 { 2707 struct ice_fltr_list_entry *v_list_itr; 2708 2709 if (!v_list || !hw) 2710 return ICE_ERR_PARAM; 2711 2712 list_for_each_entry(v_list_itr, v_list, list_entry) { 2713 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 2714 return ICE_ERR_PARAM; 2715 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 2716 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 2717 if (v_list_itr->status) 2718 return v_list_itr->status; 2719 } 2720 return 0; 2721 } 2722 2723 /** 2724 * ice_add_eth_mac - Add ethertype and MAC based filter rule 2725 * @hw: pointer to the hardware structure 2726 * @em_list: list of ether type MAC filter, MAC is optional 2727 * 2728 * This function requires the caller to populate the entries in 2729 * the filter list with the necessary fields (including flags to 2730 * indicate Tx or Rx rules). 2731 */ 2732 enum ice_status 2733 ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2734 { 2735 struct ice_fltr_list_entry *em_list_itr; 2736 2737 if (!em_list || !hw) 2738 return ICE_ERR_PARAM; 2739 2740 list_for_each_entry(em_list_itr, em_list, list_entry) { 2741 enum ice_sw_lkup_type l_type = 2742 em_list_itr->fltr_info.lkup_type; 2743 2744 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2745 l_type != ICE_SW_LKUP_ETHERTYPE) 2746 return ICE_ERR_PARAM; 2747 2748 em_list_itr->status = ice_add_rule_internal(hw, l_type, 2749 em_list_itr); 2750 if (em_list_itr->status) 2751 return em_list_itr->status; 2752 } 2753 return 0; 2754 } 2755 2756 /** 2757 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 2758 * @hw: pointer to the hardware structure 2759 * @em_list: list of ethertype or ethertype MAC entries 2760 */ 2761 enum ice_status 2762 ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2763 { 2764 struct ice_fltr_list_entry *em_list_itr, *tmp; 2765 2766 if (!em_list || !hw) 2767 return ICE_ERR_PARAM; 2768 2769 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 2770 enum ice_sw_lkup_type l_type = 2771 em_list_itr->fltr_info.lkup_type; 2772 2773 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2774 l_type != ICE_SW_LKUP_ETHERTYPE) 2775 return ICE_ERR_PARAM; 2776 2777 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 2778 em_list_itr); 2779 if (em_list_itr->status) 2780 return em_list_itr->status; 2781 } 2782 return 0; 2783 } 2784 2785 /** 2786 * ice_rem_sw_rule_info 2787 * @hw: pointer to the hardware structure 2788 * @rule_head: pointer to the switch list structure that we want to delete 2789 */ 2790 static void 2791 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2792 { 2793 if (!list_empty(rule_head)) { 2794 struct ice_fltr_mgmt_list_entry *entry; 2795 struct ice_fltr_mgmt_list_entry *tmp; 2796 2797 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 2798 list_del(&entry->list_entry); 2799 devm_kfree(ice_hw_to_dev(hw), entry); 2800 } 2801 } 2802 } 2803 2804 /** 2805 * ice_rem_adv_rule_info 2806 * @hw: pointer to the hardware structure 2807 * @rule_head: pointer to the switch list structure that we want to delete 2808 */ 2809 static void 2810 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2811 { 2812 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 2813 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 2814 2815 if (list_empty(rule_head)) 2816 return; 2817 2818 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 2819 list_del(&lst_itr->list_entry); 2820 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 2821 devm_kfree(ice_hw_to_dev(hw), lst_itr); 2822 } 2823 } 2824 2825 /** 2826 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 2827 * @hw: pointer to the hardware structure 2828 * @vsi_handle: VSI handle to set as default 2829 * @set: true to add the above mentioned switch rule, false to remove it 2830 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 2831 * 2832 * add filter rule to set/unset given VSI as default VSI for the switch 2833 * (represented by swid) 2834 */ 2835 enum ice_status 2836 ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 2837 { 2838 struct ice_aqc_sw_rules_elem *s_rule; 2839 struct ice_fltr_info f_info; 2840 enum ice_adminq_opc opcode; 2841 enum ice_status status; 2842 u16 s_rule_size; 2843 u16 hw_vsi_id; 2844 2845 if (!ice_is_vsi_valid(hw, vsi_handle)) 2846 return ICE_ERR_PARAM; 2847 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2848 2849 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 2850 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 2851 2852 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2853 if (!s_rule) 2854 return ICE_ERR_NO_MEMORY; 2855 2856 memset(&f_info, 0, sizeof(f_info)); 2857 2858 f_info.lkup_type = ICE_SW_LKUP_DFLT; 2859 f_info.flag = direction; 2860 f_info.fltr_act = ICE_FWD_TO_VSI; 2861 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 2862 2863 if (f_info.flag & ICE_FLTR_RX) { 2864 f_info.src = hw->port_info->lport; 2865 f_info.src_id = ICE_SRC_ID_LPORT; 2866 if (!set) 2867 f_info.fltr_rule_id = 2868 hw->port_info->dflt_rx_vsi_rule_id; 2869 } else if (f_info.flag & ICE_FLTR_TX) { 2870 f_info.src_id = ICE_SRC_ID_VSI; 2871 f_info.src = hw_vsi_id; 2872 if (!set) 2873 f_info.fltr_rule_id = 2874 hw->port_info->dflt_tx_vsi_rule_id; 2875 } 2876 2877 if (set) 2878 opcode = ice_aqc_opc_add_sw_rules; 2879 else 2880 opcode = ice_aqc_opc_remove_sw_rules; 2881 2882 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 2883 2884 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 2885 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 2886 goto out; 2887 if (set) { 2888 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 2889 2890 if (f_info.flag & ICE_FLTR_TX) { 2891 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 2892 hw->port_info->dflt_tx_vsi_rule_id = index; 2893 } else if (f_info.flag & ICE_FLTR_RX) { 2894 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 2895 hw->port_info->dflt_rx_vsi_rule_id = index; 2896 } 2897 } else { 2898 if (f_info.flag & ICE_FLTR_TX) { 2899 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 2900 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 2901 } else if (f_info.flag & ICE_FLTR_RX) { 2902 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 2903 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 2904 } 2905 } 2906 2907 out: 2908 devm_kfree(ice_hw_to_dev(hw), s_rule); 2909 return status; 2910 } 2911 2912 /** 2913 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 2914 * @hw: pointer to the hardware structure 2915 * @recp_id: lookup type for which the specified rule needs to be searched 2916 * @f_info: rule information 2917 * 2918 * Helper function to search for a unicast rule entry - this is to be used 2919 * to remove unicast MAC filter that is not shared with other VSIs on the 2920 * PF switch. 2921 * 2922 * Returns pointer to entry storing the rule if found 2923 */ 2924 static struct ice_fltr_mgmt_list_entry * 2925 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 2926 struct ice_fltr_info *f_info) 2927 { 2928 struct ice_switch_info *sw = hw->switch_info; 2929 struct ice_fltr_mgmt_list_entry *list_itr; 2930 struct list_head *list_head; 2931 2932 list_head = &sw->recp_list[recp_id].filt_rules; 2933 list_for_each_entry(list_itr, list_head, list_entry) { 2934 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2935 sizeof(f_info->l_data)) && 2936 f_info->fwd_id.hw_vsi_id == 2937 list_itr->fltr_info.fwd_id.hw_vsi_id && 2938 f_info->flag == list_itr->fltr_info.flag) 2939 return list_itr; 2940 } 2941 return NULL; 2942 } 2943 2944 /** 2945 * ice_remove_mac - remove a MAC address based filter rule 2946 * @hw: pointer to the hardware structure 2947 * @m_list: list of MAC addresses and forwarding information 2948 * 2949 * This function removes either a MAC filter rule or a specific VSI from a 2950 * VSI list for a multicast MAC address. 2951 * 2952 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by 2953 * ice_add_mac. Caller should be aware that this call will only work if all 2954 * the entries passed into m_list were added previously. It will not attempt to 2955 * do a partial remove of entries that were found. 2956 */ 2957 enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 2958 { 2959 struct ice_fltr_list_entry *list_itr, *tmp; 2960 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2961 2962 if (!m_list) 2963 return ICE_ERR_PARAM; 2964 2965 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2966 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 2967 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 2968 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 2969 u16 vsi_handle; 2970 2971 if (l_type != ICE_SW_LKUP_MAC) 2972 return ICE_ERR_PARAM; 2973 2974 vsi_handle = list_itr->fltr_info.vsi_handle; 2975 if (!ice_is_vsi_valid(hw, vsi_handle)) 2976 return ICE_ERR_PARAM; 2977 2978 list_itr->fltr_info.fwd_id.hw_vsi_id = 2979 ice_get_hw_vsi_num(hw, vsi_handle); 2980 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2981 /* Don't remove the unicast address that belongs to 2982 * another VSI on the switch, since it is not being 2983 * shared... 2984 */ 2985 mutex_lock(rule_lock); 2986 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 2987 &list_itr->fltr_info)) { 2988 mutex_unlock(rule_lock); 2989 return ICE_ERR_DOES_NOT_EXIST; 2990 } 2991 mutex_unlock(rule_lock); 2992 } 2993 list_itr->status = ice_remove_rule_internal(hw, 2994 ICE_SW_LKUP_MAC, 2995 list_itr); 2996 if (list_itr->status) 2997 return list_itr->status; 2998 } 2999 return 0; 3000 } 3001 3002 /** 3003 * ice_remove_vlan - Remove VLAN based filter rule 3004 * @hw: pointer to the hardware structure 3005 * @v_list: list of VLAN entries and forwarding information 3006 */ 3007 enum ice_status 3008 ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 3009 { 3010 struct ice_fltr_list_entry *v_list_itr, *tmp; 3011 3012 if (!v_list || !hw) 3013 return ICE_ERR_PARAM; 3014 3015 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3016 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 3017 3018 if (l_type != ICE_SW_LKUP_VLAN) 3019 return ICE_ERR_PARAM; 3020 v_list_itr->status = ice_remove_rule_internal(hw, 3021 ICE_SW_LKUP_VLAN, 3022 v_list_itr); 3023 if (v_list_itr->status) 3024 return v_list_itr->status; 3025 } 3026 return 0; 3027 } 3028 3029 /** 3030 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 3031 * @fm_entry: filter entry to inspect 3032 * @vsi_handle: VSI handle to compare with filter info 3033 */ 3034 static bool 3035 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 3036 { 3037 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 3038 fm_entry->fltr_info.vsi_handle == vsi_handle) || 3039 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 3040 fm_entry->vsi_list_info && 3041 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 3042 } 3043 3044 /** 3045 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 3046 * @hw: pointer to the hardware structure 3047 * @vsi_handle: VSI handle to remove filters from 3048 * @vsi_list_head: pointer to the list to add entry to 3049 * @fi: pointer to fltr_info of filter entry to copy & add 3050 * 3051 * Helper function, used when creating a list of filters to remove from 3052 * a specific VSI. The entry added to vsi_list_head is a COPY of the 3053 * original filter entry, with the exception of fltr_info.fltr_act and 3054 * fltr_info.fwd_id fields. These are set such that later logic can 3055 * extract which VSI to remove the fltr from, and pass on that information. 3056 */ 3057 static enum ice_status 3058 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3059 struct list_head *vsi_list_head, 3060 struct ice_fltr_info *fi) 3061 { 3062 struct ice_fltr_list_entry *tmp; 3063 3064 /* this memory is freed up in the caller function 3065 * once filters for this VSI are removed 3066 */ 3067 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 3068 if (!tmp) 3069 return ICE_ERR_NO_MEMORY; 3070 3071 tmp->fltr_info = *fi; 3072 3073 /* Overwrite these fields to indicate which VSI to remove filter from, 3074 * so find and remove logic can extract the information from the 3075 * list entries. Note that original entries will still have proper 3076 * values. 3077 */ 3078 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 3079 tmp->fltr_info.vsi_handle = vsi_handle; 3080 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3081 3082 list_add(&tmp->list_entry, vsi_list_head); 3083 3084 return 0; 3085 } 3086 3087 /** 3088 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 3089 * @hw: pointer to the hardware structure 3090 * @vsi_handle: VSI handle to remove filters from 3091 * @lkup_list_head: pointer to the list that has certain lookup type filters 3092 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 3093 * 3094 * Locates all filters in lkup_list_head that are used by the given VSI, 3095 * and adds COPIES of those entries to vsi_list_head (intended to be used 3096 * to remove the listed filters). 3097 * Note that this means all entries in vsi_list_head must be explicitly 3098 * deallocated by the caller when done with list. 3099 */ 3100 static enum ice_status 3101 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3102 struct list_head *lkup_list_head, 3103 struct list_head *vsi_list_head) 3104 { 3105 struct ice_fltr_mgmt_list_entry *fm_entry; 3106 enum ice_status status = 0; 3107 3108 /* check to make sure VSI ID is valid and within boundary */ 3109 if (!ice_is_vsi_valid(hw, vsi_handle)) 3110 return ICE_ERR_PARAM; 3111 3112 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 3113 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 3114 continue; 3115 3116 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3117 vsi_list_head, 3118 &fm_entry->fltr_info); 3119 if (status) 3120 return status; 3121 } 3122 return status; 3123 } 3124 3125 /** 3126 * ice_determine_promisc_mask 3127 * @fi: filter info to parse 3128 * 3129 * Helper function to determine which ICE_PROMISC_ mask corresponds 3130 * to given filter into. 3131 */ 3132 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 3133 { 3134 u16 vid = fi->l_data.mac_vlan.vlan_id; 3135 u8 *macaddr = fi->l_data.mac.mac_addr; 3136 bool is_tx_fltr = false; 3137 u8 promisc_mask = 0; 3138 3139 if (fi->flag == ICE_FLTR_TX) 3140 is_tx_fltr = true; 3141 3142 if (is_broadcast_ether_addr(macaddr)) 3143 promisc_mask |= is_tx_fltr ? 3144 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 3145 else if (is_multicast_ether_addr(macaddr)) 3146 promisc_mask |= is_tx_fltr ? 3147 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 3148 else if (is_unicast_ether_addr(macaddr)) 3149 promisc_mask |= is_tx_fltr ? 3150 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 3151 if (vid) 3152 promisc_mask |= is_tx_fltr ? 3153 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 3154 3155 return promisc_mask; 3156 } 3157 3158 /** 3159 * ice_remove_promisc - Remove promisc based filter rules 3160 * @hw: pointer to the hardware structure 3161 * @recp_id: recipe ID for which the rule needs to removed 3162 * @v_list: list of promisc entries 3163 */ 3164 static enum ice_status 3165 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, 3166 struct list_head *v_list) 3167 { 3168 struct ice_fltr_list_entry *v_list_itr, *tmp; 3169 3170 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3171 v_list_itr->status = 3172 ice_remove_rule_internal(hw, recp_id, v_list_itr); 3173 if (v_list_itr->status) 3174 return v_list_itr->status; 3175 } 3176 return 0; 3177 } 3178 3179 /** 3180 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 3181 * @hw: pointer to the hardware structure 3182 * @vsi_handle: VSI handle to clear mode 3183 * @promisc_mask: mask of promiscuous config bits to clear 3184 * @vid: VLAN ID to clear VLAN promiscuous 3185 */ 3186 enum ice_status 3187 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3188 u16 vid) 3189 { 3190 struct ice_switch_info *sw = hw->switch_info; 3191 struct ice_fltr_list_entry *fm_entry, *tmp; 3192 struct list_head remove_list_head; 3193 struct ice_fltr_mgmt_list_entry *itr; 3194 struct list_head *rule_head; 3195 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3196 enum ice_status status = 0; 3197 u8 recipe_id; 3198 3199 if (!ice_is_vsi_valid(hw, vsi_handle)) 3200 return ICE_ERR_PARAM; 3201 3202 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 3203 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3204 else 3205 recipe_id = ICE_SW_LKUP_PROMISC; 3206 3207 rule_head = &sw->recp_list[recipe_id].filt_rules; 3208 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 3209 3210 INIT_LIST_HEAD(&remove_list_head); 3211 3212 mutex_lock(rule_lock); 3213 list_for_each_entry(itr, rule_head, list_entry) { 3214 struct ice_fltr_info *fltr_info; 3215 u8 fltr_promisc_mask = 0; 3216 3217 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 3218 continue; 3219 fltr_info = &itr->fltr_info; 3220 3221 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 3222 vid != fltr_info->l_data.mac_vlan.vlan_id) 3223 continue; 3224 3225 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 3226 3227 /* Skip if filter is not completely specified by given mask */ 3228 if (fltr_promisc_mask & ~promisc_mask) 3229 continue; 3230 3231 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3232 &remove_list_head, 3233 fltr_info); 3234 if (status) { 3235 mutex_unlock(rule_lock); 3236 goto free_fltr_list; 3237 } 3238 } 3239 mutex_unlock(rule_lock); 3240 3241 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 3242 3243 free_fltr_list: 3244 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3245 list_del(&fm_entry->list_entry); 3246 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3247 } 3248 3249 return status; 3250 } 3251 3252 /** 3253 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 3254 * @hw: pointer to the hardware structure 3255 * @vsi_handle: VSI handle to configure 3256 * @promisc_mask: mask of promiscuous config bits 3257 * @vid: VLAN ID to set VLAN promiscuous 3258 */ 3259 enum ice_status 3260 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 3261 { 3262 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 3263 struct ice_fltr_list_entry f_list_entry; 3264 struct ice_fltr_info new_fltr; 3265 enum ice_status status = 0; 3266 bool is_tx_fltr; 3267 u16 hw_vsi_id; 3268 int pkt_type; 3269 u8 recipe_id; 3270 3271 if (!ice_is_vsi_valid(hw, vsi_handle)) 3272 return ICE_ERR_PARAM; 3273 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3274 3275 memset(&new_fltr, 0, sizeof(new_fltr)); 3276 3277 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 3278 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 3279 new_fltr.l_data.mac_vlan.vlan_id = vid; 3280 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3281 } else { 3282 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 3283 recipe_id = ICE_SW_LKUP_PROMISC; 3284 } 3285 3286 /* Separate filters must be set for each direction/packet type 3287 * combination, so we will loop over the mask value, store the 3288 * individual type, and clear it out in the input mask as it 3289 * is found. 3290 */ 3291 while (promisc_mask) { 3292 u8 *mac_addr; 3293 3294 pkt_type = 0; 3295 is_tx_fltr = false; 3296 3297 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 3298 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 3299 pkt_type = UCAST_FLTR; 3300 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 3301 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 3302 pkt_type = UCAST_FLTR; 3303 is_tx_fltr = true; 3304 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 3305 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 3306 pkt_type = MCAST_FLTR; 3307 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 3308 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 3309 pkt_type = MCAST_FLTR; 3310 is_tx_fltr = true; 3311 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 3312 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 3313 pkt_type = BCAST_FLTR; 3314 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 3315 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 3316 pkt_type = BCAST_FLTR; 3317 is_tx_fltr = true; 3318 } 3319 3320 /* Check for VLAN promiscuous flag */ 3321 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 3322 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 3323 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 3324 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 3325 is_tx_fltr = true; 3326 } 3327 3328 /* Set filter DA based on packet type */ 3329 mac_addr = new_fltr.l_data.mac.mac_addr; 3330 if (pkt_type == BCAST_FLTR) { 3331 eth_broadcast_addr(mac_addr); 3332 } else if (pkt_type == MCAST_FLTR || 3333 pkt_type == UCAST_FLTR) { 3334 /* Use the dummy ether header DA */ 3335 ether_addr_copy(mac_addr, dummy_eth_header); 3336 if (pkt_type == MCAST_FLTR) 3337 mac_addr[0] |= 0x1; /* Set multicast bit */ 3338 } 3339 3340 /* Need to reset this to zero for all iterations */ 3341 new_fltr.flag = 0; 3342 if (is_tx_fltr) { 3343 new_fltr.flag |= ICE_FLTR_TX; 3344 new_fltr.src = hw_vsi_id; 3345 } else { 3346 new_fltr.flag |= ICE_FLTR_RX; 3347 new_fltr.src = hw->port_info->lport; 3348 } 3349 3350 new_fltr.fltr_act = ICE_FWD_TO_VSI; 3351 new_fltr.vsi_handle = vsi_handle; 3352 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 3353 f_list_entry.fltr_info = new_fltr; 3354 3355 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 3356 if (status) 3357 goto set_promisc_exit; 3358 } 3359 3360 set_promisc_exit: 3361 return status; 3362 } 3363 3364 /** 3365 * ice_set_vlan_vsi_promisc 3366 * @hw: pointer to the hardware structure 3367 * @vsi_handle: VSI handle to configure 3368 * @promisc_mask: mask of promiscuous config bits 3369 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 3370 * 3371 * Configure VSI with all associated VLANs to given promiscuous mode(s) 3372 */ 3373 enum ice_status 3374 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3375 bool rm_vlan_promisc) 3376 { 3377 struct ice_switch_info *sw = hw->switch_info; 3378 struct ice_fltr_list_entry *list_itr, *tmp; 3379 struct list_head vsi_list_head; 3380 struct list_head *vlan_head; 3381 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 3382 enum ice_status status; 3383 u16 vlan_id; 3384 3385 INIT_LIST_HEAD(&vsi_list_head); 3386 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3387 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3388 mutex_lock(vlan_lock); 3389 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 3390 &vsi_list_head); 3391 mutex_unlock(vlan_lock); 3392 if (status) 3393 goto free_fltr_list; 3394 3395 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 3396 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 3397 if (rm_vlan_promisc) 3398 status = ice_clear_vsi_promisc(hw, vsi_handle, 3399 promisc_mask, vlan_id); 3400 else 3401 status = ice_set_vsi_promisc(hw, vsi_handle, 3402 promisc_mask, vlan_id); 3403 if (status) 3404 break; 3405 } 3406 3407 free_fltr_list: 3408 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 3409 list_del(&list_itr->list_entry); 3410 devm_kfree(ice_hw_to_dev(hw), list_itr); 3411 } 3412 return status; 3413 } 3414 3415 /** 3416 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 3417 * @hw: pointer to the hardware structure 3418 * @vsi_handle: VSI handle to remove filters from 3419 * @lkup: switch rule filter lookup type 3420 */ 3421 static void 3422 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 3423 enum ice_sw_lkup_type lkup) 3424 { 3425 struct ice_switch_info *sw = hw->switch_info; 3426 struct ice_fltr_list_entry *fm_entry; 3427 struct list_head remove_list_head; 3428 struct list_head *rule_head; 3429 struct ice_fltr_list_entry *tmp; 3430 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3431 enum ice_status status; 3432 3433 INIT_LIST_HEAD(&remove_list_head); 3434 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 3435 rule_head = &sw->recp_list[lkup].filt_rules; 3436 mutex_lock(rule_lock); 3437 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 3438 &remove_list_head); 3439 mutex_unlock(rule_lock); 3440 if (status) 3441 goto free_fltr_list; 3442 3443 switch (lkup) { 3444 case ICE_SW_LKUP_MAC: 3445 ice_remove_mac(hw, &remove_list_head); 3446 break; 3447 case ICE_SW_LKUP_VLAN: 3448 ice_remove_vlan(hw, &remove_list_head); 3449 break; 3450 case ICE_SW_LKUP_PROMISC: 3451 case ICE_SW_LKUP_PROMISC_VLAN: 3452 ice_remove_promisc(hw, lkup, &remove_list_head); 3453 break; 3454 case ICE_SW_LKUP_MAC_VLAN: 3455 case ICE_SW_LKUP_ETHERTYPE: 3456 case ICE_SW_LKUP_ETHERTYPE_MAC: 3457 case ICE_SW_LKUP_DFLT: 3458 case ICE_SW_LKUP_LAST: 3459 default: 3460 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 3461 break; 3462 } 3463 3464 free_fltr_list: 3465 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3466 list_del(&fm_entry->list_entry); 3467 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3468 } 3469 } 3470 3471 /** 3472 * ice_remove_vsi_fltr - Remove all filters for a VSI 3473 * @hw: pointer to the hardware structure 3474 * @vsi_handle: VSI handle to remove filters from 3475 */ 3476 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 3477 { 3478 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 3479 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 3480 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 3481 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 3482 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 3483 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 3484 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 3485 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 3486 } 3487 3488 /** 3489 * ice_alloc_res_cntr - allocating resource counter 3490 * @hw: pointer to the hardware structure 3491 * @type: type of resource 3492 * @alloc_shared: if set it is shared else dedicated 3493 * @num_items: number of entries requested for FD resource type 3494 * @counter_id: counter index returned by AQ call 3495 */ 3496 enum ice_status 3497 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3498 u16 *counter_id) 3499 { 3500 struct ice_aqc_alloc_free_res_elem *buf; 3501 enum ice_status status; 3502 u16 buf_len; 3503 3504 /* Allocate resource */ 3505 buf_len = struct_size(buf, elem, 1); 3506 buf = kzalloc(buf_len, GFP_KERNEL); 3507 if (!buf) 3508 return ICE_ERR_NO_MEMORY; 3509 3510 buf->num_elems = cpu_to_le16(num_items); 3511 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3512 ICE_AQC_RES_TYPE_M) | alloc_shared); 3513 3514 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3515 ice_aqc_opc_alloc_res, NULL); 3516 if (status) 3517 goto exit; 3518 3519 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 3520 3521 exit: 3522 kfree(buf); 3523 return status; 3524 } 3525 3526 /** 3527 * ice_free_res_cntr - free resource counter 3528 * @hw: pointer to the hardware structure 3529 * @type: type of resource 3530 * @alloc_shared: if set it is shared else dedicated 3531 * @num_items: number of entries to be freed for FD resource type 3532 * @counter_id: counter ID resource which needs to be freed 3533 */ 3534 enum ice_status 3535 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3536 u16 counter_id) 3537 { 3538 struct ice_aqc_alloc_free_res_elem *buf; 3539 enum ice_status status; 3540 u16 buf_len; 3541 3542 /* Free resource */ 3543 buf_len = struct_size(buf, elem, 1); 3544 buf = kzalloc(buf_len, GFP_KERNEL); 3545 if (!buf) 3546 return ICE_ERR_NO_MEMORY; 3547 3548 buf->num_elems = cpu_to_le16(num_items); 3549 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3550 ICE_AQC_RES_TYPE_M) | alloc_shared); 3551 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 3552 3553 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3554 ice_aqc_opc_free_res, NULL); 3555 if (status) 3556 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 3557 3558 kfree(buf); 3559 return status; 3560 } 3561 3562 /* This is mapping table entry that maps every word within a given protocol 3563 * structure to the real byte offset as per the specification of that 3564 * protocol header. 3565 * for example dst address is 3 words in ethertype header and corresponding 3566 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 3567 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 3568 * matching entry describing its field. This needs to be updated if new 3569 * structure is added to that union. 3570 */ 3571 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 3572 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 3573 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 3574 { ICE_ETYPE_OL, { 0 } }, 3575 { ICE_VLAN_OFOS, { 2, 0 } }, 3576 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3577 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3578 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3579 26, 28, 30, 32, 34, 36, 38 } }, 3580 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3581 26, 28, 30, 32, 34, 36, 38 } }, 3582 { ICE_TCP_IL, { 0, 2 } }, 3583 { ICE_UDP_OF, { 0, 2 } }, 3584 { ICE_UDP_ILOS, { 0, 2 } }, 3585 }; 3586 3587 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 3588 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 3589 { ICE_MAC_IL, ICE_MAC_IL_HW }, 3590 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 3591 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 3592 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 3593 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 3594 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 3595 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 3596 { ICE_TCP_IL, ICE_TCP_IL_HW }, 3597 { ICE_UDP_OF, ICE_UDP_OF_HW }, 3598 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 3599 }; 3600 3601 /** 3602 * ice_find_recp - find a recipe 3603 * @hw: pointer to the hardware structure 3604 * @lkup_exts: extension sequence to match 3605 * 3606 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 3607 */ 3608 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts) 3609 { 3610 bool refresh_required = true; 3611 struct ice_sw_recipe *recp; 3612 u8 i; 3613 3614 /* Walk through existing recipes to find a match */ 3615 recp = hw->switch_info->recp_list; 3616 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 3617 /* If recipe was not created for this ID, in SW bookkeeping, 3618 * check if FW has an entry for this recipe. If the FW has an 3619 * entry update it in our SW bookkeeping and continue with the 3620 * matching. 3621 */ 3622 if (!recp[i].recp_created) 3623 if (ice_get_recp_frm_fw(hw, 3624 hw->switch_info->recp_list, i, 3625 &refresh_required)) 3626 continue; 3627 3628 /* Skip inverse action recipes */ 3629 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 3630 ICE_AQ_RECIPE_ACT_INV_ACT) 3631 continue; 3632 3633 /* if number of words we are looking for match */ 3634 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 3635 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 3636 struct ice_fv_word *be = lkup_exts->fv_words; 3637 u16 *cr = recp[i].lkup_exts.field_mask; 3638 u16 *de = lkup_exts->field_mask; 3639 bool found = true; 3640 u8 pe, qr; 3641 3642 /* ar, cr, and qr are related to the recipe words, while 3643 * be, de, and pe are related to the lookup words 3644 */ 3645 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 3646 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 3647 qr++) { 3648 if (ar[qr].off == be[pe].off && 3649 ar[qr].prot_id == be[pe].prot_id && 3650 cr[qr] == de[pe]) 3651 /* Found the "pe"th word in the 3652 * given recipe 3653 */ 3654 break; 3655 } 3656 /* After walking through all the words in the 3657 * "i"th recipe if "p"th word was not found then 3658 * this recipe is not what we are looking for. 3659 * So break out from this loop and try the next 3660 * recipe 3661 */ 3662 if (qr >= recp[i].lkup_exts.n_val_words) { 3663 found = false; 3664 break; 3665 } 3666 } 3667 /* If for "i"th recipe the found was never set to false 3668 * then it means we found our match 3669 */ 3670 if (found) 3671 return i; /* Return the recipe ID */ 3672 } 3673 } 3674 return ICE_MAX_NUM_RECIPES; 3675 } 3676 3677 /** 3678 * ice_prot_type_to_id - get protocol ID from protocol type 3679 * @type: protocol type 3680 * @id: pointer to variable that will receive the ID 3681 * 3682 * Returns true if found, false otherwise 3683 */ 3684 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 3685 { 3686 u8 i; 3687 3688 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 3689 if (ice_prot_id_tbl[i].type == type) { 3690 *id = ice_prot_id_tbl[i].protocol_id; 3691 return true; 3692 } 3693 return false; 3694 } 3695 3696 /** 3697 * ice_fill_valid_words - count valid words 3698 * @rule: advanced rule with lookup information 3699 * @lkup_exts: byte offset extractions of the words that are valid 3700 * 3701 * calculate valid words in a lookup rule using mask value 3702 */ 3703 static u8 3704 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 3705 struct ice_prot_lkup_ext *lkup_exts) 3706 { 3707 u8 j, word, prot_id, ret_val; 3708 3709 if (!ice_prot_type_to_id(rule->type, &prot_id)) 3710 return 0; 3711 3712 word = lkup_exts->n_val_words; 3713 3714 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 3715 if (((u16 *)&rule->m_u)[j] && 3716 rule->type < ARRAY_SIZE(ice_prot_ext)) { 3717 /* No more space to accommodate */ 3718 if (word >= ICE_MAX_CHAIN_WORDS) 3719 return 0; 3720 lkup_exts->fv_words[word].off = 3721 ice_prot_ext[rule->type].offs[j]; 3722 lkup_exts->fv_words[word].prot_id = 3723 ice_prot_id_tbl[rule->type].protocol_id; 3724 lkup_exts->field_mask[word] = 3725 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 3726 word++; 3727 } 3728 3729 ret_val = word - lkup_exts->n_val_words; 3730 lkup_exts->n_val_words = word; 3731 3732 return ret_val; 3733 } 3734 3735 /** 3736 * ice_create_first_fit_recp_def - Create a recipe grouping 3737 * @hw: pointer to the hardware structure 3738 * @lkup_exts: an array of protocol header extractions 3739 * @rg_list: pointer to a list that stores new recipe groups 3740 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 3741 * 3742 * Using first fit algorithm, take all the words that are still not done 3743 * and start grouping them in 4-word groups. Each group makes up one 3744 * recipe. 3745 */ 3746 static enum ice_status 3747 ice_create_first_fit_recp_def(struct ice_hw *hw, 3748 struct ice_prot_lkup_ext *lkup_exts, 3749 struct list_head *rg_list, 3750 u8 *recp_cnt) 3751 { 3752 struct ice_pref_recipe_group *grp = NULL; 3753 u8 j; 3754 3755 *recp_cnt = 0; 3756 3757 /* Walk through every word in the rule to check if it is not done. If so 3758 * then this word needs to be part of a new recipe. 3759 */ 3760 for (j = 0; j < lkup_exts->n_val_words; j++) 3761 if (!test_bit(j, lkup_exts->done)) { 3762 if (!grp || 3763 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 3764 struct ice_recp_grp_entry *entry; 3765 3766 entry = devm_kzalloc(ice_hw_to_dev(hw), 3767 sizeof(*entry), 3768 GFP_KERNEL); 3769 if (!entry) 3770 return ICE_ERR_NO_MEMORY; 3771 list_add(&entry->l_entry, rg_list); 3772 grp = &entry->r_group; 3773 (*recp_cnt)++; 3774 } 3775 3776 grp->pairs[grp->n_val_pairs].prot_id = 3777 lkup_exts->fv_words[j].prot_id; 3778 grp->pairs[grp->n_val_pairs].off = 3779 lkup_exts->fv_words[j].off; 3780 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 3781 grp->n_val_pairs++; 3782 } 3783 3784 return 0; 3785 } 3786 3787 /** 3788 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 3789 * @hw: pointer to the hardware structure 3790 * @fv_list: field vector with the extraction sequence information 3791 * @rg_list: recipe groupings with protocol-offset pairs 3792 * 3793 * Helper function to fill in the field vector indices for protocol-offset 3794 * pairs. These indexes are then ultimately programmed into a recipe. 3795 */ 3796 static enum ice_status 3797 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 3798 struct list_head *rg_list) 3799 { 3800 struct ice_sw_fv_list_entry *fv; 3801 struct ice_recp_grp_entry *rg; 3802 struct ice_fv_word *fv_ext; 3803 3804 if (list_empty(fv_list)) 3805 return 0; 3806 3807 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 3808 list_entry); 3809 fv_ext = fv->fv_ptr->ew; 3810 3811 list_for_each_entry(rg, rg_list, l_entry) { 3812 u8 i; 3813 3814 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 3815 struct ice_fv_word *pr; 3816 bool found = false; 3817 u16 mask; 3818 u8 j; 3819 3820 pr = &rg->r_group.pairs[i]; 3821 mask = rg->r_group.mask[i]; 3822 3823 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 3824 if (fv_ext[j].prot_id == pr->prot_id && 3825 fv_ext[j].off == pr->off) { 3826 found = true; 3827 3828 /* Store index of field vector */ 3829 rg->fv_idx[i] = j; 3830 rg->fv_mask[i] = mask; 3831 break; 3832 } 3833 3834 /* Protocol/offset could not be found, caller gave an 3835 * invalid pair 3836 */ 3837 if (!found) 3838 return ICE_ERR_PARAM; 3839 } 3840 } 3841 3842 return 0; 3843 } 3844 3845 /** 3846 * ice_find_free_recp_res_idx - find free result indexes for recipe 3847 * @hw: pointer to hardware structure 3848 * @profiles: bitmap of profiles that will be associated with the new recipe 3849 * @free_idx: pointer to variable to receive the free index bitmap 3850 * 3851 * The algorithm used here is: 3852 * 1. When creating a new recipe, create a set P which contains all 3853 * Profiles that will be associated with our new recipe 3854 * 3855 * 2. For each Profile p in set P: 3856 * a. Add all recipes associated with Profile p into set R 3857 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 3858 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 3859 * i. Or just assume they all have the same possible indexes: 3860 * 44, 45, 46, 47 3861 * i.e., PossibleIndexes = 0x0000F00000000000 3862 * 3863 * 3. For each Recipe r in set R: 3864 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 3865 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 3866 * 3867 * FreeIndexes will contain the bits indicating the indexes free for use, 3868 * then the code needs to update the recipe[r].used_result_idx_bits to 3869 * indicate which indexes were selected for use by this recipe. 3870 */ 3871 static u16 3872 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 3873 unsigned long *free_idx) 3874 { 3875 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 3876 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 3877 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 3878 u16 bit; 3879 3880 bitmap_zero(possible_idx, ICE_MAX_FV_WORDS); 3881 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 3882 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 3883 bitmap_zero(free_idx, ICE_MAX_FV_WORDS); 3884 3885 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); 3886 3887 /* For each profile we are going to associate the recipe with, add the 3888 * recipes that are associated with that profile. This will give us 3889 * the set of recipes that our recipe may collide with. Also, determine 3890 * what possible result indexes are usable given this set of profiles. 3891 */ 3892 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 3893 bitmap_or(recipes, recipes, profile_to_recipe[bit], 3894 ICE_MAX_NUM_RECIPES); 3895 bitmap_and(possible_idx, possible_idx, 3896 hw->switch_info->prof_res_bm[bit], 3897 ICE_MAX_FV_WORDS); 3898 } 3899 3900 /* For each recipe that our new recipe may collide with, determine 3901 * which indexes have been used. 3902 */ 3903 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 3904 bitmap_or(used_idx, used_idx, 3905 hw->switch_info->recp_list[bit].res_idxs, 3906 ICE_MAX_FV_WORDS); 3907 3908 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 3909 3910 /* return number of free indexes */ 3911 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 3912 } 3913 3914 /** 3915 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 3916 * @hw: pointer to hardware structure 3917 * @rm: recipe management list entry 3918 * @match_tun_mask: tunnel mask that needs to be programmed 3919 * @profiles: bitmap of profiles that will be associated. 3920 */ 3921 static enum ice_status 3922 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 3923 u16 match_tun_mask, unsigned long *profiles) 3924 { 3925 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 3926 struct ice_aqc_recipe_data_elem *tmp; 3927 struct ice_aqc_recipe_data_elem *buf; 3928 struct ice_recp_grp_entry *entry; 3929 enum ice_status status; 3930 u16 free_res_idx; 3931 u16 recipe_count; 3932 u8 chain_idx; 3933 u8 recps = 0; 3934 3935 /* When more than one recipe are required, another recipe is needed to 3936 * chain them together. Matching a tunnel metadata ID takes up one of 3937 * the match fields in the chaining recipe reducing the number of 3938 * chained recipes by one. 3939 */ 3940 /* check number of free result indices */ 3941 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 3942 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 3943 3944 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 3945 free_res_idx, rm->n_grp_count); 3946 3947 if (rm->n_grp_count > 1) { 3948 if (rm->n_grp_count > free_res_idx) 3949 return ICE_ERR_MAX_LIMIT; 3950 3951 rm->n_grp_count++; 3952 } 3953 3954 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 3955 return ICE_ERR_MAX_LIMIT; 3956 3957 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 3958 if (!tmp) 3959 return ICE_ERR_NO_MEMORY; 3960 3961 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 3962 GFP_KERNEL); 3963 if (!buf) { 3964 status = ICE_ERR_NO_MEMORY; 3965 goto err_mem; 3966 } 3967 3968 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 3969 recipe_count = ICE_MAX_NUM_RECIPES; 3970 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 3971 NULL); 3972 if (status || recipe_count == 0) 3973 goto err_unroll; 3974 3975 /* Allocate the recipe resources, and configure them according to the 3976 * match fields from protocol headers and extracted field vectors. 3977 */ 3978 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 3979 list_for_each_entry(entry, &rm->rg_list, l_entry) { 3980 u8 i; 3981 3982 status = ice_alloc_recipe(hw, &entry->rid); 3983 if (status) 3984 goto err_unroll; 3985 3986 /* Clear the result index of the located recipe, as this will be 3987 * updated, if needed, later in the recipe creation process. 3988 */ 3989 tmp[0].content.result_indx = 0; 3990 3991 buf[recps] = tmp[0]; 3992 buf[recps].recipe_indx = (u8)entry->rid; 3993 /* if the recipe is a non-root recipe RID should be programmed 3994 * as 0 for the rules to be applied correctly. 3995 */ 3996 buf[recps].content.rid = 0; 3997 memset(&buf[recps].content.lkup_indx, 0, 3998 sizeof(buf[recps].content.lkup_indx)); 3999 4000 /* All recipes use look-up index 0 to match switch ID. */ 4001 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4002 buf[recps].content.mask[0] = 4003 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4004 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 4005 * to be 0 4006 */ 4007 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4008 buf[recps].content.lkup_indx[i] = 0x80; 4009 buf[recps].content.mask[i] = 0; 4010 } 4011 4012 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 4013 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 4014 buf[recps].content.mask[i + 1] = 4015 cpu_to_le16(entry->fv_mask[i]); 4016 } 4017 4018 if (rm->n_grp_count > 1) { 4019 /* Checks to see if there really is a valid result index 4020 * that can be used. 4021 */ 4022 if (chain_idx >= ICE_MAX_FV_WORDS) { 4023 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 4024 status = ICE_ERR_MAX_LIMIT; 4025 goto err_unroll; 4026 } 4027 4028 entry->chain_idx = chain_idx; 4029 buf[recps].content.result_indx = 4030 ICE_AQ_RECIPE_RESULT_EN | 4031 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 4032 ICE_AQ_RECIPE_RESULT_DATA_M); 4033 clear_bit(chain_idx, result_idx_bm); 4034 chain_idx = find_first_bit(result_idx_bm, 4035 ICE_MAX_FV_WORDS); 4036 } 4037 4038 /* fill recipe dependencies */ 4039 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 4040 ICE_MAX_NUM_RECIPES); 4041 set_bit(buf[recps].recipe_indx, 4042 (unsigned long *)buf[recps].recipe_bitmap); 4043 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4044 recps++; 4045 } 4046 4047 if (rm->n_grp_count == 1) { 4048 rm->root_rid = buf[0].recipe_indx; 4049 set_bit(buf[0].recipe_indx, rm->r_bitmap); 4050 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 4051 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 4052 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 4053 sizeof(buf[0].recipe_bitmap)); 4054 } else { 4055 status = ICE_ERR_BAD_PTR; 4056 goto err_unroll; 4057 } 4058 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 4059 * the recipe which is getting created if specified 4060 * by user. Usually any advanced switch filter, which results 4061 * into new extraction sequence, ended up creating a new recipe 4062 * of type ROOT and usually recipes are associated with profiles 4063 * Switch rule referreing newly created recipe, needs to have 4064 * either/or 'fwd' or 'join' priority, otherwise switch rule 4065 * evaluation will not happen correctly. In other words, if 4066 * switch rule to be evaluated on priority basis, then recipe 4067 * needs to have priority, otherwise it will be evaluated last. 4068 */ 4069 buf[0].content.act_ctrl_fwd_priority = rm->priority; 4070 } else { 4071 struct ice_recp_grp_entry *last_chain_entry; 4072 u16 rid, i; 4073 4074 /* Allocate the last recipe that will chain the outcomes of the 4075 * other recipes together 4076 */ 4077 status = ice_alloc_recipe(hw, &rid); 4078 if (status) 4079 goto err_unroll; 4080 4081 buf[recps].recipe_indx = (u8)rid; 4082 buf[recps].content.rid = (u8)rid; 4083 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 4084 /* the new entry created should also be part of rg_list to 4085 * make sure we have complete recipe 4086 */ 4087 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 4088 sizeof(*last_chain_entry), 4089 GFP_KERNEL); 4090 if (!last_chain_entry) { 4091 status = ICE_ERR_NO_MEMORY; 4092 goto err_unroll; 4093 } 4094 last_chain_entry->rid = rid; 4095 memset(&buf[recps].content.lkup_indx, 0, 4096 sizeof(buf[recps].content.lkup_indx)); 4097 /* All recipes use look-up index 0 to match switch ID. */ 4098 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4099 buf[recps].content.mask[0] = 4100 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4101 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4102 buf[recps].content.lkup_indx[i] = 4103 ICE_AQ_RECIPE_LKUP_IGNORE; 4104 buf[recps].content.mask[i] = 0; 4105 } 4106 4107 i = 1; 4108 /* update r_bitmap with the recp that is used for chaining */ 4109 set_bit(rid, rm->r_bitmap); 4110 /* this is the recipe that chains all the other recipes so it 4111 * should not have a chaining ID to indicate the same 4112 */ 4113 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 4114 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4115 last_chain_entry->fv_idx[i] = entry->chain_idx; 4116 buf[recps].content.lkup_indx[i] = entry->chain_idx; 4117 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 4118 set_bit(entry->rid, rm->r_bitmap); 4119 } 4120 list_add(&last_chain_entry->l_entry, &rm->rg_list); 4121 if (sizeof(buf[recps].recipe_bitmap) >= 4122 sizeof(rm->r_bitmap)) { 4123 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 4124 sizeof(buf[recps].recipe_bitmap)); 4125 } else { 4126 status = ICE_ERR_BAD_PTR; 4127 goto err_unroll; 4128 } 4129 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4130 4131 /* To differentiate among different UDP tunnels, a meta data ID 4132 * flag is used. 4133 */ 4134 if (match_tun_mask) { 4135 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND; 4136 buf[recps].content.mask[i] = 4137 cpu_to_le16(match_tun_mask); 4138 } 4139 4140 recps++; 4141 rm->root_rid = (u8)rid; 4142 } 4143 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4144 if (status) 4145 goto err_unroll; 4146 4147 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 4148 ice_release_change_lock(hw); 4149 if (status) 4150 goto err_unroll; 4151 4152 /* Every recipe that just got created add it to the recipe 4153 * book keeping list 4154 */ 4155 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4156 struct ice_switch_info *sw = hw->switch_info; 4157 bool is_root, idx_found = false; 4158 struct ice_sw_recipe *recp; 4159 u16 idx, buf_idx = 0; 4160 4161 /* find buffer index for copying some data */ 4162 for (idx = 0; idx < rm->n_grp_count; idx++) 4163 if (buf[idx].recipe_indx == entry->rid) { 4164 buf_idx = idx; 4165 idx_found = true; 4166 } 4167 4168 if (!idx_found) { 4169 status = ICE_ERR_OUT_OF_RANGE; 4170 goto err_unroll; 4171 } 4172 4173 recp = &sw->recp_list[entry->rid]; 4174 is_root = (rm->root_rid == entry->rid); 4175 recp->is_root = is_root; 4176 4177 recp->root_rid = entry->rid; 4178 recp->big_recp = (is_root && rm->n_grp_count > 1); 4179 4180 memcpy(&recp->ext_words, entry->r_group.pairs, 4181 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 4182 4183 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 4184 sizeof(recp->r_bitmap)); 4185 4186 /* Copy non-result fv index values and masks to recipe. This 4187 * call will also update the result recipe bitmask. 4188 */ 4189 ice_collect_result_idx(&buf[buf_idx], recp); 4190 4191 /* for non-root recipes, also copy to the root, this allows 4192 * easier matching of a complete chained recipe 4193 */ 4194 if (!is_root) 4195 ice_collect_result_idx(&buf[buf_idx], 4196 &sw->recp_list[rm->root_rid]); 4197 4198 recp->n_ext_words = entry->r_group.n_val_pairs; 4199 recp->chain_idx = entry->chain_idx; 4200 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 4201 recp->n_grp_count = rm->n_grp_count; 4202 recp->recp_created = true; 4203 } 4204 rm->root_buf = buf; 4205 kfree(tmp); 4206 return status; 4207 4208 err_unroll: 4209 err_mem: 4210 kfree(tmp); 4211 devm_kfree(ice_hw_to_dev(hw), buf); 4212 return status; 4213 } 4214 4215 /** 4216 * ice_create_recipe_group - creates recipe group 4217 * @hw: pointer to hardware structure 4218 * @rm: recipe management list entry 4219 * @lkup_exts: lookup elements 4220 */ 4221 static enum ice_status 4222 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 4223 struct ice_prot_lkup_ext *lkup_exts) 4224 { 4225 enum ice_status status; 4226 u8 recp_count = 0; 4227 4228 rm->n_grp_count = 0; 4229 4230 /* Create recipes for words that are marked not done by packing them 4231 * as best fit. 4232 */ 4233 status = ice_create_first_fit_recp_def(hw, lkup_exts, 4234 &rm->rg_list, &recp_count); 4235 if (!status) { 4236 rm->n_grp_count += recp_count; 4237 rm->n_ext_words = lkup_exts->n_val_words; 4238 memcpy(&rm->ext_words, lkup_exts->fv_words, 4239 sizeof(rm->ext_words)); 4240 memcpy(rm->word_masks, lkup_exts->field_mask, 4241 sizeof(rm->word_masks)); 4242 } 4243 4244 return status; 4245 } 4246 4247 /** 4248 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types 4249 * @hw: pointer to hardware structure 4250 * @lkups: lookup elements or match criteria for the advanced recipe, one 4251 * structure per protocol header 4252 * @lkups_cnt: number of protocols 4253 * @bm: bitmap of field vectors to consider 4254 * @fv_list: pointer to a list that holds the returned field vectors 4255 */ 4256 static enum ice_status 4257 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4258 unsigned long *bm, struct list_head *fv_list) 4259 { 4260 enum ice_status status; 4261 u8 *prot_ids; 4262 u16 i; 4263 4264 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); 4265 if (!prot_ids) 4266 return ICE_ERR_NO_MEMORY; 4267 4268 for (i = 0; i < lkups_cnt; i++) 4269 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { 4270 status = ICE_ERR_CFG; 4271 goto free_mem; 4272 } 4273 4274 /* Find field vectors that include all specified protocol types */ 4275 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); 4276 4277 free_mem: 4278 kfree(prot_ids); 4279 return status; 4280 } 4281 4282 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 4283 * @hw: pointer to hardware structure 4284 * @rinfo: other information regarding the rule e.g. priority and action info 4285 * @bm: pointer to memory for returning the bitmap of field vectors 4286 */ 4287 static void 4288 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 4289 unsigned long *bm) 4290 { 4291 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 4292 4293 ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm); 4294 } 4295 4296 /** 4297 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 4298 * @hw: pointer to hardware structure 4299 * @lkups: lookup elements or match criteria for the advanced recipe, one 4300 * structure per protocol header 4301 * @lkups_cnt: number of protocols 4302 * @rinfo: other information regarding the rule e.g. priority and action info 4303 * @rid: return the recipe ID of the recipe created 4304 */ 4305 static enum ice_status 4306 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4307 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 4308 { 4309 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 4310 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 4311 struct ice_prot_lkup_ext *lkup_exts; 4312 struct ice_recp_grp_entry *r_entry; 4313 struct ice_sw_fv_list_entry *fvit; 4314 struct ice_recp_grp_entry *r_tmp; 4315 struct ice_sw_fv_list_entry *tmp; 4316 enum ice_status status = 0; 4317 struct ice_sw_recipe *rm; 4318 u16 match_tun_mask = 0; 4319 u8 i; 4320 4321 if (!lkups_cnt) 4322 return ICE_ERR_PARAM; 4323 4324 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 4325 if (!lkup_exts) 4326 return ICE_ERR_NO_MEMORY; 4327 4328 /* Determine the number of words to be matched and if it exceeds a 4329 * recipe's restrictions 4330 */ 4331 for (i = 0; i < lkups_cnt; i++) { 4332 u16 count; 4333 4334 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 4335 status = ICE_ERR_CFG; 4336 goto err_free_lkup_exts; 4337 } 4338 4339 count = ice_fill_valid_words(&lkups[i], lkup_exts); 4340 if (!count) { 4341 status = ICE_ERR_CFG; 4342 goto err_free_lkup_exts; 4343 } 4344 } 4345 4346 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 4347 if (!rm) { 4348 status = ICE_ERR_NO_MEMORY; 4349 goto err_free_lkup_exts; 4350 } 4351 4352 /* Get field vectors that contain fields extracted from all the protocol 4353 * headers being programmed. 4354 */ 4355 INIT_LIST_HEAD(&rm->fv_list); 4356 INIT_LIST_HEAD(&rm->rg_list); 4357 4358 /* Get bitmap of field vectors (profiles) that are compatible with the 4359 * rule request; only these will be searched in the subsequent call to 4360 * ice_get_fv. 4361 */ 4362 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 4363 4364 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); 4365 if (status) 4366 goto err_unroll; 4367 4368 /* Group match words into recipes using preferred recipe grouping 4369 * criteria. 4370 */ 4371 status = ice_create_recipe_group(hw, rm, lkup_exts); 4372 if (status) 4373 goto err_unroll; 4374 4375 /* set the recipe priority if specified */ 4376 rm->priority = (u8)rinfo->priority; 4377 4378 /* Find offsets from the field vector. Pick the first one for all the 4379 * recipes. 4380 */ 4381 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 4382 if (status) 4383 goto err_unroll; 4384 4385 /* get bitmap of all profiles the recipe will be associated with */ 4386 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 4387 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4388 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 4389 set_bit((u16)fvit->profile_id, profiles); 4390 } 4391 4392 /* Look for a recipe which matches our requested fv / mask list */ 4393 *rid = ice_find_recp(hw, lkup_exts); 4394 if (*rid < ICE_MAX_NUM_RECIPES) 4395 /* Success if found a recipe that match the existing criteria */ 4396 goto err_unroll; 4397 4398 /* Recipe we need does not exist, add a recipe */ 4399 status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles); 4400 if (status) 4401 goto err_unroll; 4402 4403 /* Associate all the recipes created with all the profiles in the 4404 * common field vector. 4405 */ 4406 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4407 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 4408 u16 j; 4409 4410 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 4411 (u8 *)r_bitmap, NULL); 4412 if (status) 4413 goto err_unroll; 4414 4415 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 4416 ICE_MAX_NUM_RECIPES); 4417 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4418 if (status) 4419 goto err_unroll; 4420 4421 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 4422 (u8 *)r_bitmap, 4423 NULL); 4424 ice_release_change_lock(hw); 4425 4426 if (status) 4427 goto err_unroll; 4428 4429 /* Update profile to recipe bitmap array */ 4430 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 4431 ICE_MAX_NUM_RECIPES); 4432 4433 /* Update recipe to profile bitmap array */ 4434 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 4435 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 4436 } 4437 4438 *rid = rm->root_rid; 4439 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 4440 sizeof(*lkup_exts)); 4441 err_unroll: 4442 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 4443 list_del(&r_entry->l_entry); 4444 devm_kfree(ice_hw_to_dev(hw), r_entry); 4445 } 4446 4447 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 4448 list_del(&fvit->list_entry); 4449 devm_kfree(ice_hw_to_dev(hw), fvit); 4450 } 4451 4452 if (rm->root_buf) 4453 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 4454 4455 kfree(rm); 4456 4457 err_free_lkup_exts: 4458 kfree(lkup_exts); 4459 4460 return status; 4461 } 4462 4463 /** 4464 * ice_find_dummy_packet - find dummy packet 4465 * 4466 * @lkups: lookup elements or match criteria for the advanced recipe, one 4467 * structure per protocol header 4468 * @lkups_cnt: number of protocols 4469 * @pkt: dummy packet to fill according to filter match criteria 4470 * @pkt_len: packet length of dummy packet 4471 * @offsets: pointer to receive the pointer to the offsets for the packet 4472 */ 4473 static void 4474 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4475 const u8 **pkt, u16 *pkt_len, 4476 const struct ice_dummy_pkt_offsets **offsets) 4477 { 4478 bool tcp = false, udp = false, ipv6 = false, vlan = false; 4479 u16 i; 4480 4481 for (i = 0; i < lkups_cnt; i++) { 4482 if (lkups[i].type == ICE_UDP_ILOS) 4483 udp = true; 4484 else if (lkups[i].type == ICE_TCP_IL) 4485 tcp = true; 4486 else if (lkups[i].type == ICE_IPV6_OFOS) 4487 ipv6 = true; 4488 else if (lkups[i].type == ICE_VLAN_OFOS) 4489 vlan = true; 4490 else if (lkups[i].type == ICE_ETYPE_OL && 4491 lkups[i].h_u.ethertype.ethtype_id == 4492 cpu_to_be16(ICE_IPV6_ETHER_ID) && 4493 lkups[i].m_u.ethertype.ethtype_id == 4494 cpu_to_be16(0xFFFF)) 4495 ipv6 = true; 4496 } 4497 4498 if (udp && !ipv6) { 4499 if (vlan) { 4500 *pkt = dummy_vlan_udp_packet; 4501 *pkt_len = sizeof(dummy_vlan_udp_packet); 4502 *offsets = dummy_vlan_udp_packet_offsets; 4503 return; 4504 } 4505 *pkt = dummy_udp_packet; 4506 *pkt_len = sizeof(dummy_udp_packet); 4507 *offsets = dummy_udp_packet_offsets; 4508 return; 4509 } else if (udp && ipv6) { 4510 if (vlan) { 4511 *pkt = dummy_vlan_udp_ipv6_packet; 4512 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); 4513 *offsets = dummy_vlan_udp_ipv6_packet_offsets; 4514 return; 4515 } 4516 *pkt = dummy_udp_ipv6_packet; 4517 *pkt_len = sizeof(dummy_udp_ipv6_packet); 4518 *offsets = dummy_udp_ipv6_packet_offsets; 4519 return; 4520 } else if ((tcp && ipv6) || ipv6) { 4521 if (vlan) { 4522 *pkt = dummy_vlan_tcp_ipv6_packet; 4523 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); 4524 *offsets = dummy_vlan_tcp_ipv6_packet_offsets; 4525 return; 4526 } 4527 *pkt = dummy_tcp_ipv6_packet; 4528 *pkt_len = sizeof(dummy_tcp_ipv6_packet); 4529 *offsets = dummy_tcp_ipv6_packet_offsets; 4530 return; 4531 } 4532 4533 if (vlan) { 4534 *pkt = dummy_vlan_tcp_packet; 4535 *pkt_len = sizeof(dummy_vlan_tcp_packet); 4536 *offsets = dummy_vlan_tcp_packet_offsets; 4537 } else { 4538 *pkt = dummy_tcp_packet; 4539 *pkt_len = sizeof(dummy_tcp_packet); 4540 *offsets = dummy_tcp_packet_offsets; 4541 } 4542 } 4543 4544 /** 4545 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 4546 * 4547 * @lkups: lookup elements or match criteria for the advanced recipe, one 4548 * structure per protocol header 4549 * @lkups_cnt: number of protocols 4550 * @s_rule: stores rule information from the match criteria 4551 * @dummy_pkt: dummy packet to fill according to filter match criteria 4552 * @pkt_len: packet length of dummy packet 4553 * @offsets: offset info for the dummy packet 4554 */ 4555 static enum ice_status 4556 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4557 struct ice_aqc_sw_rules_elem *s_rule, 4558 const u8 *dummy_pkt, u16 pkt_len, 4559 const struct ice_dummy_pkt_offsets *offsets) 4560 { 4561 u8 *pkt; 4562 u16 i; 4563 4564 /* Start with a packet with a pre-defined/dummy content. Then, fill 4565 * in the header values to be looked up or matched. 4566 */ 4567 pkt = s_rule->pdata.lkup_tx_rx.hdr; 4568 4569 memcpy(pkt, dummy_pkt, pkt_len); 4570 4571 for (i = 0; i < lkups_cnt; i++) { 4572 enum ice_protocol_type type; 4573 u16 offset = 0, len = 0, j; 4574 bool found = false; 4575 4576 /* find the start of this layer; it should be found since this 4577 * was already checked when search for the dummy packet 4578 */ 4579 type = lkups[i].type; 4580 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 4581 if (type == offsets[j].type) { 4582 offset = offsets[j].offset; 4583 found = true; 4584 break; 4585 } 4586 } 4587 /* this should never happen in a correct calling sequence */ 4588 if (!found) 4589 return ICE_ERR_PARAM; 4590 4591 switch (lkups[i].type) { 4592 case ICE_MAC_OFOS: 4593 case ICE_MAC_IL: 4594 len = sizeof(struct ice_ether_hdr); 4595 break; 4596 case ICE_ETYPE_OL: 4597 len = sizeof(struct ice_ethtype_hdr); 4598 break; 4599 case ICE_VLAN_OFOS: 4600 len = sizeof(struct ice_vlan_hdr); 4601 break; 4602 case ICE_IPV4_OFOS: 4603 case ICE_IPV4_IL: 4604 len = sizeof(struct ice_ipv4_hdr); 4605 break; 4606 case ICE_IPV6_OFOS: 4607 case ICE_IPV6_IL: 4608 len = sizeof(struct ice_ipv6_hdr); 4609 break; 4610 case ICE_TCP_IL: 4611 case ICE_UDP_OF: 4612 case ICE_UDP_ILOS: 4613 len = sizeof(struct ice_l4_hdr); 4614 break; 4615 case ICE_SCTP_IL: 4616 len = sizeof(struct ice_sctp_hdr); 4617 break; 4618 default: 4619 return ICE_ERR_PARAM; 4620 } 4621 4622 /* the length should be a word multiple */ 4623 if (len % ICE_BYTES_PER_WORD) 4624 return ICE_ERR_CFG; 4625 4626 /* We have the offset to the header start, the length, the 4627 * caller's header values and mask. Use this information to 4628 * copy the data into the dummy packet appropriately based on 4629 * the mask. Note that we need to only write the bits as 4630 * indicated by the mask to make sure we don't improperly write 4631 * over any significant packet data. 4632 */ 4633 for (j = 0; j < len / sizeof(u16); j++) 4634 if (((u16 *)&lkups[i].m_u)[j]) 4635 ((u16 *)(pkt + offset))[j] = 4636 (((u16 *)(pkt + offset))[j] & 4637 ~((u16 *)&lkups[i].m_u)[j]) | 4638 (((u16 *)&lkups[i].h_u)[j] & 4639 ((u16 *)&lkups[i].m_u)[j]); 4640 } 4641 4642 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); 4643 4644 return 0; 4645 } 4646 4647 /** 4648 * ice_find_adv_rule_entry - Search a rule entry 4649 * @hw: pointer to the hardware structure 4650 * @lkups: lookup elements or match criteria for the advanced recipe, one 4651 * structure per protocol header 4652 * @lkups_cnt: number of protocols 4653 * @recp_id: recipe ID for which we are finding the rule 4654 * @rinfo: other information regarding the rule e.g. priority and action info 4655 * 4656 * Helper function to search for a given advance rule entry 4657 * Returns pointer to entry storing the rule if found 4658 */ 4659 static struct ice_adv_fltr_mgmt_list_entry * 4660 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4661 u16 lkups_cnt, u16 recp_id, 4662 struct ice_adv_rule_info *rinfo) 4663 { 4664 struct ice_adv_fltr_mgmt_list_entry *list_itr; 4665 struct ice_switch_info *sw = hw->switch_info; 4666 int i; 4667 4668 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 4669 list_entry) { 4670 bool lkups_matched = true; 4671 4672 if (lkups_cnt != list_itr->lkups_cnt) 4673 continue; 4674 for (i = 0; i < list_itr->lkups_cnt; i++) 4675 if (memcmp(&list_itr->lkups[i], &lkups[i], 4676 sizeof(*lkups))) { 4677 lkups_matched = false; 4678 break; 4679 } 4680 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 4681 lkups_matched) 4682 return list_itr; 4683 } 4684 return NULL; 4685 } 4686 4687 /** 4688 * ice_adv_add_update_vsi_list 4689 * @hw: pointer to the hardware structure 4690 * @m_entry: pointer to current adv filter management list entry 4691 * @cur_fltr: filter information from the book keeping entry 4692 * @new_fltr: filter information with the new VSI to be added 4693 * 4694 * Call AQ command to add or update previously created VSI list with new VSI. 4695 * 4696 * Helper function to do book keeping associated with adding filter information 4697 * The algorithm to do the booking keeping is described below : 4698 * When a VSI needs to subscribe to a given advanced filter 4699 * if only one VSI has been added till now 4700 * Allocate a new VSI list and add two VSIs 4701 * to this list using switch rule command 4702 * Update the previously created switch rule with the 4703 * newly created VSI list ID 4704 * if a VSI list was previously created 4705 * Add the new VSI to the previously created VSI list set 4706 * using the update switch rule command 4707 */ 4708 static enum ice_status 4709 ice_adv_add_update_vsi_list(struct ice_hw *hw, 4710 struct ice_adv_fltr_mgmt_list_entry *m_entry, 4711 struct ice_adv_rule_info *cur_fltr, 4712 struct ice_adv_rule_info *new_fltr) 4713 { 4714 enum ice_status status; 4715 u16 vsi_list_id = 0; 4716 4717 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 4718 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 4719 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 4720 return ICE_ERR_NOT_IMPL; 4721 4722 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 4723 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 4724 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 4725 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 4726 return ICE_ERR_NOT_IMPL; 4727 4728 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 4729 /* Only one entry existed in the mapping and it was not already 4730 * a part of a VSI list. So, create a VSI list with the old and 4731 * new VSIs. 4732 */ 4733 struct ice_fltr_info tmp_fltr; 4734 u16 vsi_handle_arr[2]; 4735 4736 /* A rule already exists with the new VSI being added */ 4737 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 4738 new_fltr->sw_act.fwd_id.hw_vsi_id) 4739 return ICE_ERR_ALREADY_EXISTS; 4740 4741 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 4742 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 4743 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 4744 &vsi_list_id, 4745 ICE_SW_LKUP_LAST); 4746 if (status) 4747 return status; 4748 4749 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 4750 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 4751 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 4752 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 4753 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 4754 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 4755 4756 /* Update the previous switch rule of "forward to VSI" to 4757 * "fwd to VSI list" 4758 */ 4759 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 4760 if (status) 4761 return status; 4762 4763 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 4764 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 4765 m_entry->vsi_list_info = 4766 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 4767 vsi_list_id); 4768 } else { 4769 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 4770 4771 if (!m_entry->vsi_list_info) 4772 return ICE_ERR_CFG; 4773 4774 /* A rule already exists with the new VSI being added */ 4775 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 4776 return 0; 4777 4778 /* Update the previously created VSI list set with 4779 * the new VSI ID passed in 4780 */ 4781 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 4782 4783 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 4784 vsi_list_id, false, 4785 ice_aqc_opc_update_sw_rules, 4786 ICE_SW_LKUP_LAST); 4787 /* update VSI list mapping info with new VSI ID */ 4788 if (!status) 4789 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 4790 } 4791 if (!status) 4792 m_entry->vsi_count++; 4793 return status; 4794 } 4795 4796 /** 4797 * ice_add_adv_rule - helper function to create an advanced switch rule 4798 * @hw: pointer to the hardware structure 4799 * @lkups: information on the words that needs to be looked up. All words 4800 * together makes one recipe 4801 * @lkups_cnt: num of entries in the lkups array 4802 * @rinfo: other information related to the rule that needs to be programmed 4803 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 4804 * ignored is case of error. 4805 * 4806 * This function can program only 1 rule at a time. The lkups is used to 4807 * describe the all the words that forms the "lookup" portion of the recipe. 4808 * These words can span multiple protocols. Callers to this function need to 4809 * pass in a list of protocol headers with lookup information along and mask 4810 * that determines which words are valid from the given protocol header. 4811 * rinfo describes other information related to this rule such as forwarding 4812 * IDs, priority of this rule, etc. 4813 */ 4814 enum ice_status 4815 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4816 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 4817 struct ice_rule_query_data *added_entry) 4818 { 4819 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 4820 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; 4821 const struct ice_dummy_pkt_offsets *pkt_offsets; 4822 struct ice_aqc_sw_rules_elem *s_rule = NULL; 4823 struct list_head *rule_head; 4824 struct ice_switch_info *sw; 4825 enum ice_status status; 4826 const u8 *pkt = NULL; 4827 u16 word_cnt; 4828 u32 act = 0; 4829 u8 q_rgn; 4830 4831 /* Initialize profile to result index bitmap */ 4832 if (!hw->switch_info->prof_res_bm_init) { 4833 hw->switch_info->prof_res_bm_init = 1; 4834 ice_init_prof_result_bm(hw); 4835 } 4836 4837 if (!lkups_cnt) 4838 return ICE_ERR_PARAM; 4839 4840 /* get # of words we need to match */ 4841 word_cnt = 0; 4842 for (i = 0; i < lkups_cnt; i++) { 4843 u16 j, *ptr; 4844 4845 ptr = (u16 *)&lkups[i].m_u; 4846 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) 4847 if (ptr[j] != 0) 4848 word_cnt++; 4849 } 4850 4851 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) 4852 return ICE_ERR_PARAM; 4853 4854 /* make sure that we can locate a dummy packet */ 4855 ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len, 4856 &pkt_offsets); 4857 if (!pkt) { 4858 status = ICE_ERR_PARAM; 4859 goto err_ice_add_adv_rule; 4860 } 4861 4862 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 4863 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 4864 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 4865 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) 4866 return ICE_ERR_CFG; 4867 4868 vsi_handle = rinfo->sw_act.vsi_handle; 4869 if (!ice_is_vsi_valid(hw, vsi_handle)) 4870 return ICE_ERR_PARAM; 4871 4872 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 4873 rinfo->sw_act.fwd_id.hw_vsi_id = 4874 ice_get_hw_vsi_num(hw, vsi_handle); 4875 if (rinfo->sw_act.flag & ICE_FLTR_TX) 4876 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 4877 4878 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 4879 if (status) 4880 return status; 4881 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 4882 if (m_entry) { 4883 /* we have to add VSI to VSI_LIST and increment vsi_count. 4884 * Also Update VSI list so that we can change forwarding rule 4885 * if the rule already exists, we will check if it exists with 4886 * same vsi_id, if not then add it to the VSI list if it already 4887 * exists if not then create a VSI list and add the existing VSI 4888 * ID and the new VSI ID to the list 4889 * We will add that VSI to the list 4890 */ 4891 status = ice_adv_add_update_vsi_list(hw, m_entry, 4892 &m_entry->rule_info, 4893 rinfo); 4894 if (added_entry) { 4895 added_entry->rid = rid; 4896 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 4897 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 4898 } 4899 return status; 4900 } 4901 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; 4902 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 4903 if (!s_rule) 4904 return ICE_ERR_NO_MEMORY; 4905 if (!rinfo->flags_info.act_valid) { 4906 act |= ICE_SINGLE_ACT_LAN_ENABLE; 4907 act |= ICE_SINGLE_ACT_LB_ENABLE; 4908 } else { 4909 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | 4910 ICE_SINGLE_ACT_LB_ENABLE); 4911 } 4912 4913 switch (rinfo->sw_act.fltr_act) { 4914 case ICE_FWD_TO_VSI: 4915 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 4916 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 4917 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 4918 break; 4919 case ICE_FWD_TO_Q: 4920 act |= ICE_SINGLE_ACT_TO_Q; 4921 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 4922 ICE_SINGLE_ACT_Q_INDEX_M; 4923 break; 4924 case ICE_FWD_TO_QGRP: 4925 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 4926 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 4927 act |= ICE_SINGLE_ACT_TO_Q; 4928 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 4929 ICE_SINGLE_ACT_Q_INDEX_M; 4930 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 4931 ICE_SINGLE_ACT_Q_REGION_M; 4932 break; 4933 case ICE_DROP_PACKET: 4934 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 4935 ICE_SINGLE_ACT_VALID_BIT; 4936 break; 4937 default: 4938 status = ICE_ERR_CFG; 4939 goto err_ice_add_adv_rule; 4940 } 4941 4942 /* set the rule LOOKUP type based on caller specified 'Rx' 4943 * instead of hardcoding it to be either LOOKUP_TX/RX 4944 * 4945 * for 'Rx' set the source to be the port number 4946 * for 'Tx' set the source to be the source HW VSI number (determined 4947 * by caller) 4948 */ 4949 if (rinfo->rx) { 4950 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 4951 s_rule->pdata.lkup_tx_rx.src = 4952 cpu_to_le16(hw->port_info->lport); 4953 } else { 4954 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 4955 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); 4956 } 4957 4958 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); 4959 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 4960 4961 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, 4962 pkt_len, pkt_offsets); 4963 if (status) 4964 goto err_ice_add_adv_rule; 4965 4966 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 4967 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 4968 NULL); 4969 if (status) 4970 goto err_ice_add_adv_rule; 4971 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 4972 sizeof(struct ice_adv_fltr_mgmt_list_entry), 4973 GFP_KERNEL); 4974 if (!adv_fltr) { 4975 status = ICE_ERR_NO_MEMORY; 4976 goto err_ice_add_adv_rule; 4977 } 4978 4979 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 4980 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 4981 if (!adv_fltr->lkups) { 4982 status = ICE_ERR_NO_MEMORY; 4983 goto err_ice_add_adv_rule; 4984 } 4985 4986 adv_fltr->lkups_cnt = lkups_cnt; 4987 adv_fltr->rule_info = *rinfo; 4988 adv_fltr->rule_info.fltr_rule_id = 4989 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 4990 sw = hw->switch_info; 4991 sw->recp_list[rid].adv_rule = true; 4992 rule_head = &sw->recp_list[rid].filt_rules; 4993 4994 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 4995 adv_fltr->vsi_count = 1; 4996 4997 /* Add rule entry to book keeping list */ 4998 list_add(&adv_fltr->list_entry, rule_head); 4999 if (added_entry) { 5000 added_entry->rid = rid; 5001 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 5002 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 5003 } 5004 err_ice_add_adv_rule: 5005 if (status && adv_fltr) { 5006 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 5007 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 5008 } 5009 5010 kfree(s_rule); 5011 5012 return status; 5013 } 5014 5015 /** 5016 * ice_replay_vsi_fltr - Replay filters for requested VSI 5017 * @hw: pointer to the hardware structure 5018 * @vsi_handle: driver VSI handle 5019 * @recp_id: Recipe ID for which rules need to be replayed 5020 * @list_head: list for which filters need to be replayed 5021 * 5022 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 5023 * It is required to pass valid VSI handle. 5024 */ 5025 static enum ice_status 5026 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 5027 struct list_head *list_head) 5028 { 5029 struct ice_fltr_mgmt_list_entry *itr; 5030 enum ice_status status = 0; 5031 u16 hw_vsi_id; 5032 5033 if (list_empty(list_head)) 5034 return status; 5035 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 5036 5037 list_for_each_entry(itr, list_head, list_entry) { 5038 struct ice_fltr_list_entry f_entry; 5039 5040 f_entry.fltr_info = itr->fltr_info; 5041 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 5042 itr->fltr_info.vsi_handle == vsi_handle) { 5043 /* update the src in case it is VSI num */ 5044 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5045 f_entry.fltr_info.src = hw_vsi_id; 5046 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5047 if (status) 5048 goto end; 5049 continue; 5050 } 5051 if (!itr->vsi_list_info || 5052 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 5053 continue; 5054 /* Clearing it so that the logic can add it back */ 5055 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 5056 f_entry.fltr_info.vsi_handle = vsi_handle; 5057 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 5058 /* update the src in case it is VSI num */ 5059 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5060 f_entry.fltr_info.src = hw_vsi_id; 5061 if (recp_id == ICE_SW_LKUP_VLAN) 5062 status = ice_add_vlan_internal(hw, &f_entry); 5063 else 5064 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5065 if (status) 5066 goto end; 5067 } 5068 end: 5069 return status; 5070 } 5071 5072 /** 5073 * ice_adv_rem_update_vsi_list 5074 * @hw: pointer to the hardware structure 5075 * @vsi_handle: VSI handle of the VSI to remove 5076 * @fm_list: filter management entry for which the VSI list management needs to 5077 * be done 5078 */ 5079 static enum ice_status 5080 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 5081 struct ice_adv_fltr_mgmt_list_entry *fm_list) 5082 { 5083 struct ice_vsi_list_map_info *vsi_list_info; 5084 enum ice_sw_lkup_type lkup_type; 5085 enum ice_status status; 5086 u16 vsi_list_id; 5087 5088 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 5089 fm_list->vsi_count == 0) 5090 return ICE_ERR_PARAM; 5091 5092 /* A rule with the VSI being removed does not exist */ 5093 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 5094 return ICE_ERR_DOES_NOT_EXIST; 5095 5096 lkup_type = ICE_SW_LKUP_LAST; 5097 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 5098 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 5099 ice_aqc_opc_update_sw_rules, 5100 lkup_type); 5101 if (status) 5102 return status; 5103 5104 fm_list->vsi_count--; 5105 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 5106 vsi_list_info = fm_list->vsi_list_info; 5107 if (fm_list->vsi_count == 1) { 5108 struct ice_fltr_info tmp_fltr; 5109 u16 rem_vsi_handle; 5110 5111 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 5112 ICE_MAX_VSI); 5113 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 5114 return ICE_ERR_OUT_OF_RANGE; 5115 5116 /* Make sure VSI list is empty before removing it below */ 5117 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 5118 vsi_list_id, true, 5119 ice_aqc_opc_update_sw_rules, 5120 lkup_type); 5121 if (status) 5122 return status; 5123 5124 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5125 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 5126 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 5127 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 5128 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 5129 tmp_fltr.fwd_id.hw_vsi_id = 5130 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5131 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 5132 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5133 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 5134 5135 /* Update the previous switch rule of "MAC forward to VSI" to 5136 * "MAC fwd to VSI list" 5137 */ 5138 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5139 if (status) { 5140 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 5141 tmp_fltr.fwd_id.hw_vsi_id, status); 5142 return status; 5143 } 5144 fm_list->vsi_list_info->ref_cnt--; 5145 5146 /* Remove the VSI list since it is no longer used */ 5147 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 5148 if (status) { 5149 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 5150 vsi_list_id, status); 5151 return status; 5152 } 5153 5154 list_del(&vsi_list_info->list_entry); 5155 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 5156 fm_list->vsi_list_info = NULL; 5157 } 5158 5159 return status; 5160 } 5161 5162 /** 5163 * ice_rem_adv_rule - removes existing advanced switch rule 5164 * @hw: pointer to the hardware structure 5165 * @lkups: information on the words that needs to be looked up. All words 5166 * together makes one recipe 5167 * @lkups_cnt: num of entries in the lkups array 5168 * @rinfo: Its the pointer to the rule information for the rule 5169 * 5170 * This function can be used to remove 1 rule at a time. The lkups is 5171 * used to describe all the words that forms the "lookup" portion of the 5172 * rule. These words can span multiple protocols. Callers to this function 5173 * need to pass in a list of protocol headers with lookup information along 5174 * and mask that determines which words are valid from the given protocol 5175 * header. rinfo describes other information related to this rule such as 5176 * forwarding IDs, priority of this rule, etc. 5177 */ 5178 static enum ice_status 5179 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5180 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 5181 { 5182 struct ice_adv_fltr_mgmt_list_entry *list_elem; 5183 struct ice_prot_lkup_ext lkup_exts; 5184 enum ice_status status = 0; 5185 bool remove_rule = false; 5186 struct mutex *rule_lock; /* Lock to protect filter rule list */ 5187 u16 i, rid, vsi_handle; 5188 5189 memset(&lkup_exts, 0, sizeof(lkup_exts)); 5190 for (i = 0; i < lkups_cnt; i++) { 5191 u16 count; 5192 5193 if (lkups[i].type >= ICE_PROTOCOL_LAST) 5194 return ICE_ERR_CFG; 5195 5196 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 5197 if (!count) 5198 return ICE_ERR_CFG; 5199 } 5200 5201 rid = ice_find_recp(hw, &lkup_exts); 5202 /* If did not find a recipe that match the existing criteria */ 5203 if (rid == ICE_MAX_NUM_RECIPES) 5204 return ICE_ERR_PARAM; 5205 5206 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 5207 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5208 /* the rule is already removed */ 5209 if (!list_elem) 5210 return 0; 5211 mutex_lock(rule_lock); 5212 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 5213 remove_rule = true; 5214 } else if (list_elem->vsi_count > 1) { 5215 remove_rule = false; 5216 vsi_handle = rinfo->sw_act.vsi_handle; 5217 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5218 } else { 5219 vsi_handle = rinfo->sw_act.vsi_handle; 5220 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5221 if (status) { 5222 mutex_unlock(rule_lock); 5223 return status; 5224 } 5225 if (list_elem->vsi_count == 0) 5226 remove_rule = true; 5227 } 5228 mutex_unlock(rule_lock); 5229 if (remove_rule) { 5230 struct ice_aqc_sw_rules_elem *s_rule; 5231 u16 rule_buf_sz; 5232 5233 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 5234 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5235 if (!s_rule) 5236 return ICE_ERR_NO_MEMORY; 5237 s_rule->pdata.lkup_tx_rx.act = 0; 5238 s_rule->pdata.lkup_tx_rx.index = 5239 cpu_to_le16(list_elem->rule_info.fltr_rule_id); 5240 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 5241 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5242 rule_buf_sz, 1, 5243 ice_aqc_opc_remove_sw_rules, NULL); 5244 if (!status || status == ICE_ERR_DOES_NOT_EXIST) { 5245 struct ice_switch_info *sw = hw->switch_info; 5246 5247 mutex_lock(rule_lock); 5248 list_del(&list_elem->list_entry); 5249 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 5250 devm_kfree(ice_hw_to_dev(hw), list_elem); 5251 mutex_unlock(rule_lock); 5252 if (list_empty(&sw->recp_list[rid].filt_rules)) 5253 sw->recp_list[rid].adv_rule = false; 5254 } 5255 kfree(s_rule); 5256 } 5257 return status; 5258 } 5259 5260 /** 5261 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 5262 * @hw: pointer to the hardware structure 5263 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 5264 * 5265 * This function is used to remove 1 rule at a time. The removal is based on 5266 * the remove_entry parameter. This function will remove rule for a given 5267 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 5268 */ 5269 enum ice_status 5270 ice_rem_adv_rule_by_id(struct ice_hw *hw, 5271 struct ice_rule_query_data *remove_entry) 5272 { 5273 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5274 struct list_head *list_head; 5275 struct ice_adv_rule_info rinfo; 5276 struct ice_switch_info *sw; 5277 5278 sw = hw->switch_info; 5279 if (!sw->recp_list[remove_entry->rid].recp_created) 5280 return ICE_ERR_PARAM; 5281 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 5282 list_for_each_entry(list_itr, list_head, list_entry) { 5283 if (list_itr->rule_info.fltr_rule_id == 5284 remove_entry->rule_id) { 5285 rinfo = list_itr->rule_info; 5286 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 5287 return ice_rem_adv_rule(hw, list_itr->lkups, 5288 list_itr->lkups_cnt, &rinfo); 5289 } 5290 } 5291 /* either list is empty or unable to find rule */ 5292 return ICE_ERR_DOES_NOT_EXIST; 5293 } 5294 5295 /** 5296 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 5297 * @hw: pointer to the hardware structure 5298 * @vsi_handle: driver VSI handle 5299 * 5300 * Replays filters for requested VSI via vsi_handle. 5301 */ 5302 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 5303 { 5304 struct ice_switch_info *sw = hw->switch_info; 5305 enum ice_status status = 0; 5306 u8 i; 5307 5308 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 5309 struct list_head *head; 5310 5311 head = &sw->recp_list[i].filt_replay_rules; 5312 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 5313 if (status) 5314 return status; 5315 } 5316 return status; 5317 } 5318 5319 /** 5320 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 5321 * @hw: pointer to the HW struct 5322 * 5323 * Deletes the filter replay rules. 5324 */ 5325 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 5326 { 5327 struct ice_switch_info *sw = hw->switch_info; 5328 u8 i; 5329 5330 if (!sw) 5331 return; 5332 5333 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5334 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 5335 struct list_head *l_head; 5336 5337 l_head = &sw->recp_list[i].filt_replay_rules; 5338 if (!sw->recp_list[i].adv_rule) 5339 ice_rem_sw_rule_info(hw, l_head); 5340 else 5341 ice_rem_adv_rule_info(hw, l_head); 5342 } 5343 } 5344 } 5345