1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_lib.h" 5 #include "ice_switch.h" 6 7 #define ICE_ETH_DA_OFFSET 0 8 #define ICE_ETH_ETHTYPE_OFFSET 12 9 #define ICE_ETH_VLAN_TCI_OFFSET 14 10 #define ICE_MAX_VLAN_ID 0xFFF 11 #define ICE_IPV6_ETHER_ID 0x86DD 12 13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem 14 * struct to configure any switch filter rules. 15 * {DA (6 bytes), SA(6 bytes), 16 * Ether type (2 bytes for header without VLAN tag) OR 17 * VLAN tag (4 bytes for header with VLAN tag) } 18 * 19 * Word on Hardcoded values 20 * byte 0 = 0x2: to identify it as locally administered DA MAC 21 * byte 6 = 0x2: to identify it as locally administered SA MAC 22 * byte 12 = 0x81 & byte 13 = 0x00: 23 * In case of VLAN filter first two bytes defines ether type (0x8100) 24 * and remaining two bytes are placeholder for programming a given VLAN ID 25 * In case of Ether type filter it is treated as header without VLAN tag 26 * and byte 12 and 13 is used to program a given Ether type instead 27 */ 28 #define DUMMY_ETH_HDR_LEN 16 29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 30 0x2, 0, 0, 0, 0, 0, 31 0x81, 0, 0, 0}; 32 33 struct ice_dummy_pkt_offsets { 34 enum ice_protocol_type type; 35 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ 36 }; 37 38 static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { 39 { ICE_MAC_OFOS, 0 }, 40 { ICE_ETYPE_OL, 12 }, 41 { ICE_IPV4_OFOS, 14 }, 42 { ICE_NVGRE, 34 }, 43 { ICE_MAC_IL, 42 }, 44 { ICE_IPV4_IL, 56 }, 45 { ICE_TCP_IL, 76 }, 46 { ICE_PROTOCOL_LAST, 0 }, 47 }; 48 49 static const u8 dummy_gre_tcp_packet[] = { 50 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 51 0x00, 0x00, 0x00, 0x00, 52 0x00, 0x00, 0x00, 0x00, 53 54 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 55 56 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 57 0x00, 0x00, 0x00, 0x00, 58 0x00, 0x2F, 0x00, 0x00, 59 0x00, 0x00, 0x00, 0x00, 60 0x00, 0x00, 0x00, 0x00, 61 62 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 63 0x00, 0x00, 0x00, 0x00, 64 65 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 66 0x00, 0x00, 0x00, 0x00, 67 0x00, 0x00, 0x00, 0x00, 68 0x08, 0x00, 69 70 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 71 0x00, 0x00, 0x00, 0x00, 72 0x00, 0x06, 0x00, 0x00, 73 0x00, 0x00, 0x00, 0x00, 74 0x00, 0x00, 0x00, 0x00, 75 76 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */ 77 0x00, 0x00, 0x00, 0x00, 78 0x00, 0x00, 0x00, 0x00, 79 0x50, 0x02, 0x20, 0x00, 80 0x00, 0x00, 0x00, 0x00 81 }; 82 83 static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { 84 { ICE_MAC_OFOS, 0 }, 85 { ICE_ETYPE_OL, 12 }, 86 { ICE_IPV4_OFOS, 14 }, 87 { ICE_NVGRE, 34 }, 88 { ICE_MAC_IL, 42 }, 89 { ICE_IPV4_IL, 56 }, 90 { ICE_UDP_ILOS, 76 }, 91 { ICE_PROTOCOL_LAST, 0 }, 92 }; 93 94 static const u8 dummy_gre_udp_packet[] = { 95 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 96 0x00, 0x00, 0x00, 0x00, 97 0x00, 0x00, 0x00, 0x00, 98 99 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 100 101 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */ 102 0x00, 0x00, 0x00, 0x00, 103 0x00, 0x2F, 0x00, 0x00, 104 0x00, 0x00, 0x00, 0x00, 105 0x00, 0x00, 0x00, 0x00, 106 107 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */ 108 0x00, 0x00, 0x00, 0x00, 109 110 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */ 111 0x00, 0x00, 0x00, 0x00, 112 0x00, 0x00, 0x00, 0x00, 113 0x08, 0x00, 114 115 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */ 116 0x00, 0x00, 0x00, 0x00, 117 0x00, 0x11, 0x00, 0x00, 118 0x00, 0x00, 0x00, 0x00, 119 0x00, 0x00, 0x00, 0x00, 120 121 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */ 122 0x00, 0x08, 0x00, 0x00, 123 }; 124 125 static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { 126 { ICE_MAC_OFOS, 0 }, 127 { ICE_ETYPE_OL, 12 }, 128 { ICE_IPV4_OFOS, 14 }, 129 { ICE_UDP_OF, 34 }, 130 { ICE_VXLAN, 42 }, 131 { ICE_GENEVE, 42 }, 132 { ICE_VXLAN_GPE, 42 }, 133 { ICE_MAC_IL, 50 }, 134 { ICE_IPV4_IL, 64 }, 135 { ICE_TCP_IL, 84 }, 136 { ICE_PROTOCOL_LAST, 0 }, 137 }; 138 139 static const u8 dummy_udp_tun_tcp_packet[] = { 140 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 141 0x00, 0x00, 0x00, 0x00, 142 0x00, 0x00, 0x00, 0x00, 143 144 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 145 146 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */ 147 0x00, 0x01, 0x00, 0x00, 148 0x40, 0x11, 0x00, 0x00, 149 0x00, 0x00, 0x00, 0x00, 150 0x00, 0x00, 0x00, 0x00, 151 152 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 153 0x00, 0x46, 0x00, 0x00, 154 155 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 156 0x00, 0x00, 0x00, 0x00, 157 158 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 159 0x00, 0x00, 0x00, 0x00, 160 0x00, 0x00, 0x00, 0x00, 161 0x08, 0x00, 162 163 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */ 164 0x00, 0x01, 0x00, 0x00, 165 0x40, 0x06, 0x00, 0x00, 166 0x00, 0x00, 0x00, 0x00, 167 0x00, 0x00, 0x00, 0x00, 168 169 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */ 170 0x00, 0x00, 0x00, 0x00, 171 0x00, 0x00, 0x00, 0x00, 172 0x50, 0x02, 0x20, 0x00, 173 0x00, 0x00, 0x00, 0x00 174 }; 175 176 static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { 177 { ICE_MAC_OFOS, 0 }, 178 { ICE_ETYPE_OL, 12 }, 179 { ICE_IPV4_OFOS, 14 }, 180 { ICE_UDP_OF, 34 }, 181 { ICE_VXLAN, 42 }, 182 { ICE_GENEVE, 42 }, 183 { ICE_VXLAN_GPE, 42 }, 184 { ICE_MAC_IL, 50 }, 185 { ICE_IPV4_IL, 64 }, 186 { ICE_UDP_ILOS, 84 }, 187 { ICE_PROTOCOL_LAST, 0 }, 188 }; 189 190 static const u8 dummy_udp_tun_udp_packet[] = { 191 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 192 0x00, 0x00, 0x00, 0x00, 193 0x00, 0x00, 0x00, 0x00, 194 195 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 196 197 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */ 198 0x00, 0x01, 0x00, 0x00, 199 0x00, 0x11, 0x00, 0x00, 200 0x00, 0x00, 0x00, 0x00, 201 0x00, 0x00, 0x00, 0x00, 202 203 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */ 204 0x00, 0x3a, 0x00, 0x00, 205 206 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */ 207 0x00, 0x00, 0x00, 0x00, 208 209 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */ 210 0x00, 0x00, 0x00, 0x00, 211 0x00, 0x00, 0x00, 0x00, 212 0x08, 0x00, 213 214 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */ 215 0x00, 0x01, 0x00, 0x00, 216 0x00, 0x11, 0x00, 0x00, 217 0x00, 0x00, 0x00, 0x00, 218 0x00, 0x00, 0x00, 0x00, 219 220 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */ 221 0x00, 0x08, 0x00, 0x00, 222 }; 223 224 /* offset info for MAC + IPv4 + UDP dummy packet */ 225 static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { 226 { ICE_MAC_OFOS, 0 }, 227 { ICE_ETYPE_OL, 12 }, 228 { ICE_IPV4_OFOS, 14 }, 229 { ICE_UDP_ILOS, 34 }, 230 { ICE_PROTOCOL_LAST, 0 }, 231 }; 232 233 /* Dummy packet for MAC + IPv4 + UDP */ 234 static const u8 dummy_udp_packet[] = { 235 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 236 0x00, 0x00, 0x00, 0x00, 237 0x00, 0x00, 0x00, 0x00, 238 239 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 240 241 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */ 242 0x00, 0x01, 0x00, 0x00, 243 0x00, 0x11, 0x00, 0x00, 244 0x00, 0x00, 0x00, 0x00, 245 0x00, 0x00, 0x00, 0x00, 246 247 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */ 248 0x00, 0x08, 0x00, 0x00, 249 250 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 251 }; 252 253 /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ 254 static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { 255 { ICE_MAC_OFOS, 0 }, 256 { ICE_VLAN_OFOS, 12 }, 257 { ICE_ETYPE_OL, 16 }, 258 { ICE_IPV4_OFOS, 18 }, 259 { ICE_UDP_ILOS, 38 }, 260 { ICE_PROTOCOL_LAST, 0 }, 261 }; 262 263 /* C-tag (801.1Q), IPv4:UDP dummy packet */ 264 static const u8 dummy_vlan_udp_packet[] = { 265 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 266 0x00, 0x00, 0x00, 0x00, 267 0x00, 0x00, 0x00, 0x00, 268 269 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 270 271 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 272 273 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */ 274 0x00, 0x01, 0x00, 0x00, 275 0x00, 0x11, 0x00, 0x00, 276 0x00, 0x00, 0x00, 0x00, 277 0x00, 0x00, 0x00, 0x00, 278 279 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */ 280 0x00, 0x08, 0x00, 0x00, 281 282 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 283 }; 284 285 /* offset info for MAC + IPv4 + TCP dummy packet */ 286 static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { 287 { ICE_MAC_OFOS, 0 }, 288 { ICE_ETYPE_OL, 12 }, 289 { ICE_IPV4_OFOS, 14 }, 290 { ICE_TCP_IL, 34 }, 291 { ICE_PROTOCOL_LAST, 0 }, 292 }; 293 294 /* Dummy packet for MAC + IPv4 + TCP */ 295 static const u8 dummy_tcp_packet[] = { 296 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 297 0x00, 0x00, 0x00, 0x00, 298 0x00, 0x00, 0x00, 0x00, 299 300 0x08, 0x00, /* ICE_ETYPE_OL 12 */ 301 302 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */ 303 0x00, 0x01, 0x00, 0x00, 304 0x00, 0x06, 0x00, 0x00, 305 0x00, 0x00, 0x00, 0x00, 306 0x00, 0x00, 0x00, 0x00, 307 308 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */ 309 0x00, 0x00, 0x00, 0x00, 310 0x00, 0x00, 0x00, 0x00, 311 0x50, 0x00, 0x00, 0x00, 312 0x00, 0x00, 0x00, 0x00, 313 314 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 315 }; 316 317 /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ 318 static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { 319 { ICE_MAC_OFOS, 0 }, 320 { ICE_VLAN_OFOS, 12 }, 321 { ICE_ETYPE_OL, 16 }, 322 { ICE_IPV4_OFOS, 18 }, 323 { ICE_TCP_IL, 38 }, 324 { ICE_PROTOCOL_LAST, 0 }, 325 }; 326 327 /* C-tag (801.1Q), IPv4:TCP dummy packet */ 328 static const u8 dummy_vlan_tcp_packet[] = { 329 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 330 0x00, 0x00, 0x00, 0x00, 331 0x00, 0x00, 0x00, 0x00, 332 333 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 334 335 0x08, 0x00, /* ICE_ETYPE_OL 16 */ 336 337 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */ 338 0x00, 0x01, 0x00, 0x00, 339 0x00, 0x06, 0x00, 0x00, 340 0x00, 0x00, 0x00, 0x00, 341 0x00, 0x00, 0x00, 0x00, 342 343 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */ 344 0x00, 0x00, 0x00, 0x00, 345 0x00, 0x00, 0x00, 0x00, 346 0x50, 0x00, 0x00, 0x00, 347 0x00, 0x00, 0x00, 0x00, 348 349 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 350 }; 351 352 static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { 353 { ICE_MAC_OFOS, 0 }, 354 { ICE_ETYPE_OL, 12 }, 355 { ICE_IPV6_OFOS, 14 }, 356 { ICE_TCP_IL, 54 }, 357 { ICE_PROTOCOL_LAST, 0 }, 358 }; 359 360 static const u8 dummy_tcp_ipv6_packet[] = { 361 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 362 0x00, 0x00, 0x00, 0x00, 363 0x00, 0x00, 0x00, 0x00, 364 365 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 366 367 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 368 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 369 0x00, 0x00, 0x00, 0x00, 370 0x00, 0x00, 0x00, 0x00, 371 0x00, 0x00, 0x00, 0x00, 372 0x00, 0x00, 0x00, 0x00, 373 0x00, 0x00, 0x00, 0x00, 374 0x00, 0x00, 0x00, 0x00, 375 0x00, 0x00, 0x00, 0x00, 376 0x00, 0x00, 0x00, 0x00, 377 378 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */ 379 0x00, 0x00, 0x00, 0x00, 380 0x00, 0x00, 0x00, 0x00, 381 0x50, 0x00, 0x00, 0x00, 382 0x00, 0x00, 0x00, 0x00, 383 384 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 385 }; 386 387 /* C-tag (802.1Q): IPv6 + TCP */ 388 static const struct ice_dummy_pkt_offsets 389 dummy_vlan_tcp_ipv6_packet_offsets[] = { 390 { ICE_MAC_OFOS, 0 }, 391 { ICE_VLAN_OFOS, 12 }, 392 { ICE_ETYPE_OL, 16 }, 393 { ICE_IPV6_OFOS, 18 }, 394 { ICE_TCP_IL, 58 }, 395 { ICE_PROTOCOL_LAST, 0 }, 396 }; 397 398 /* C-tag (802.1Q), IPv6 + TCP dummy packet */ 399 static const u8 dummy_vlan_tcp_ipv6_packet[] = { 400 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 401 0x00, 0x00, 0x00, 0x00, 402 0x00, 0x00, 0x00, 0x00, 403 404 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */ 405 406 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 407 408 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 409 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */ 410 0x00, 0x00, 0x00, 0x00, 411 0x00, 0x00, 0x00, 0x00, 412 0x00, 0x00, 0x00, 0x00, 413 0x00, 0x00, 0x00, 0x00, 414 0x00, 0x00, 0x00, 0x00, 415 0x00, 0x00, 0x00, 0x00, 416 0x00, 0x00, 0x00, 0x00, 417 0x00, 0x00, 0x00, 0x00, 418 419 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */ 420 0x00, 0x00, 0x00, 0x00, 421 0x00, 0x00, 0x00, 0x00, 422 0x50, 0x00, 0x00, 0x00, 423 0x00, 0x00, 0x00, 0x00, 424 425 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 426 }; 427 428 /* IPv6 + UDP */ 429 static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { 430 { ICE_MAC_OFOS, 0 }, 431 { ICE_ETYPE_OL, 12 }, 432 { ICE_IPV6_OFOS, 14 }, 433 { ICE_UDP_ILOS, 54 }, 434 { ICE_PROTOCOL_LAST, 0 }, 435 }; 436 437 /* IPv6 + UDP dummy packet */ 438 static const u8 dummy_udp_ipv6_packet[] = { 439 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 440 0x00, 0x00, 0x00, 0x00, 441 0x00, 0x00, 0x00, 0x00, 442 443 0x86, 0xDD, /* ICE_ETYPE_OL 12 */ 444 445 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */ 446 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ 447 0x00, 0x00, 0x00, 0x00, 448 0x00, 0x00, 0x00, 0x00, 449 0x00, 0x00, 0x00, 0x00, 450 0x00, 0x00, 0x00, 0x00, 451 0x00, 0x00, 0x00, 0x00, 452 0x00, 0x00, 0x00, 0x00, 453 0x00, 0x00, 0x00, 0x00, 454 0x00, 0x00, 0x00, 0x00, 455 456 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */ 457 0x00, 0x10, 0x00, 0x00, 458 459 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */ 460 0x00, 0x00, 0x00, 0x00, 461 462 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 463 }; 464 465 /* C-tag (802.1Q): IPv6 + UDP */ 466 static const struct ice_dummy_pkt_offsets 467 dummy_vlan_udp_ipv6_packet_offsets[] = { 468 { ICE_MAC_OFOS, 0 }, 469 { ICE_VLAN_OFOS, 12 }, 470 { ICE_ETYPE_OL, 16 }, 471 { ICE_IPV6_OFOS, 18 }, 472 { ICE_UDP_ILOS, 58 }, 473 { ICE_PROTOCOL_LAST, 0 }, 474 }; 475 476 /* C-tag (802.1Q), IPv6 + UDP dummy packet */ 477 static const u8 dummy_vlan_udp_ipv6_packet[] = { 478 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 479 0x00, 0x00, 0x00, 0x00, 480 0x00, 0x00, 0x00, 0x00, 481 482 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */ 483 484 0x86, 0xDD, /* ICE_ETYPE_OL 16 */ 485 486 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */ 487 0x00, 0x08, 0x11, 0x00, /* Next header UDP */ 488 0x00, 0x00, 0x00, 0x00, 489 0x00, 0x00, 0x00, 0x00, 490 0x00, 0x00, 0x00, 0x00, 491 0x00, 0x00, 0x00, 0x00, 492 0x00, 0x00, 0x00, 0x00, 493 0x00, 0x00, 0x00, 0x00, 494 0x00, 0x00, 0x00, 0x00, 495 0x00, 0x00, 0x00, 0x00, 496 497 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */ 498 0x00, 0x08, 0x00, 0x00, 499 500 0x00, 0x00, /* 2 bytes for 4 byte alignment */ 501 }; 502 503 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ 504 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ 505 (DUMMY_ETH_HDR_LEN * \ 506 sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) 507 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ 508 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) 509 #define ICE_SW_RULE_LG_ACT_SIZE(n) \ 510 (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ 511 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) 512 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ 513 (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ 514 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) 515 516 /* this is a recipe to profile association bitmap */ 517 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES], 518 ICE_MAX_NUM_PROFILES); 519 520 /* this is a profile to recipe association bitmap */ 521 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES], 522 ICE_MAX_NUM_RECIPES); 523 524 /** 525 * ice_init_def_sw_recp - initialize the recipe book keeping tables 526 * @hw: pointer to the HW struct 527 * 528 * Allocate memory for the entire recipe table and initialize the structures/ 529 * entries corresponding to basic recipes. 530 */ 531 int ice_init_def_sw_recp(struct ice_hw *hw) 532 { 533 struct ice_sw_recipe *recps; 534 u8 i; 535 536 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, 537 sizeof(*recps), GFP_KERNEL); 538 if (!recps) 539 return -ENOMEM; 540 541 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 542 recps[i].root_rid = i; 543 INIT_LIST_HEAD(&recps[i].filt_rules); 544 INIT_LIST_HEAD(&recps[i].filt_replay_rules); 545 INIT_LIST_HEAD(&recps[i].rg_list); 546 mutex_init(&recps[i].filt_rule_lock); 547 } 548 549 hw->switch_info->recp_list = recps; 550 551 return 0; 552 } 553 554 /** 555 * ice_aq_get_sw_cfg - get switch configuration 556 * @hw: pointer to the hardware structure 557 * @buf: pointer to the result buffer 558 * @buf_size: length of the buffer available for response 559 * @req_desc: pointer to requested descriptor 560 * @num_elems: pointer to number of elements 561 * @cd: pointer to command details structure or NULL 562 * 563 * Get switch configuration (0x0200) to be placed in buf. 564 * This admin command returns information such as initial VSI/port number 565 * and switch ID it belongs to. 566 * 567 * NOTE: *req_desc is both an input/output parameter. 568 * The caller of this function first calls this function with *request_desc set 569 * to 0. If the response from f/w has *req_desc set to 0, all the switch 570 * configuration information has been returned; if non-zero (meaning not all 571 * the information was returned), the caller should call this function again 572 * with *req_desc set to the previous value returned by f/w to get the 573 * next block of switch configuration information. 574 * 575 * *num_elems is output only parameter. This reflects the number of elements 576 * in response buffer. The caller of this function to use *num_elems while 577 * parsing the response buffer. 578 */ 579 static int 580 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, 581 u16 buf_size, u16 *req_desc, u16 *num_elems, 582 struct ice_sq_cd *cd) 583 { 584 struct ice_aqc_get_sw_cfg *cmd; 585 struct ice_aq_desc desc; 586 int status; 587 588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); 589 cmd = &desc.params.get_sw_conf; 590 cmd->element = cpu_to_le16(*req_desc); 591 592 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 593 if (!status) { 594 *req_desc = le16_to_cpu(cmd->element); 595 *num_elems = le16_to_cpu(cmd->num_elems); 596 } 597 598 return status; 599 } 600 601 /** 602 * ice_aq_add_vsi 603 * @hw: pointer to the HW struct 604 * @vsi_ctx: pointer to a VSI context struct 605 * @cd: pointer to command details structure or NULL 606 * 607 * Add a VSI context to the hardware (0x0210) 608 */ 609 static int 610 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 611 struct ice_sq_cd *cd) 612 { 613 struct ice_aqc_add_update_free_vsi_resp *res; 614 struct ice_aqc_add_get_update_free_vsi *cmd; 615 struct ice_aq_desc desc; 616 int status; 617 618 cmd = &desc.params.vsi_cmd; 619 res = &desc.params.add_update_free_vsi_res; 620 621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); 622 623 if (!vsi_ctx->alloc_from_pool) 624 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | 625 ICE_AQ_VSI_IS_VALID); 626 cmd->vf_id = vsi_ctx->vf_num; 627 628 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 629 630 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 631 632 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 633 sizeof(vsi_ctx->info), cd); 634 635 if (!status) { 636 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M; 637 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used); 638 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free); 639 } 640 641 return status; 642 } 643 644 /** 645 * ice_aq_free_vsi 646 * @hw: pointer to the HW struct 647 * @vsi_ctx: pointer to a VSI context struct 648 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 649 * @cd: pointer to command details structure or NULL 650 * 651 * Free VSI context info from hardware (0x0213) 652 */ 653 static int 654 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 655 bool keep_vsi_alloc, struct ice_sq_cd *cd) 656 { 657 struct ice_aqc_add_update_free_vsi_resp *resp; 658 struct ice_aqc_add_get_update_free_vsi *cmd; 659 struct ice_aq_desc desc; 660 int status; 661 662 cmd = &desc.params.vsi_cmd; 663 resp = &desc.params.add_update_free_vsi_res; 664 665 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); 666 667 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 668 if (keep_vsi_alloc) 669 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); 670 671 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 672 if (!status) { 673 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 674 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 675 } 676 677 return status; 678 } 679 680 /** 681 * ice_aq_update_vsi 682 * @hw: pointer to the HW struct 683 * @vsi_ctx: pointer to a VSI context struct 684 * @cd: pointer to command details structure or NULL 685 * 686 * Update VSI context in the hardware (0x0211) 687 */ 688 static int 689 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, 690 struct ice_sq_cd *cd) 691 { 692 struct ice_aqc_add_update_free_vsi_resp *resp; 693 struct ice_aqc_add_get_update_free_vsi *cmd; 694 struct ice_aq_desc desc; 695 int status; 696 697 cmd = &desc.params.vsi_cmd; 698 resp = &desc.params.add_update_free_vsi_res; 699 700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); 701 702 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); 703 704 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 705 706 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, 707 sizeof(vsi_ctx->info), cd); 708 709 if (!status) { 710 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); 711 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 712 } 713 714 return status; 715 } 716 717 /** 718 * ice_is_vsi_valid - check whether the VSI is valid or not 719 * @hw: pointer to the HW struct 720 * @vsi_handle: VSI handle 721 * 722 * check whether the VSI is valid or not 723 */ 724 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) 725 { 726 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; 727 } 728 729 /** 730 * ice_get_hw_vsi_num - return the HW VSI number 731 * @hw: pointer to the HW struct 732 * @vsi_handle: VSI handle 733 * 734 * return the HW VSI number 735 * Caution: call this function only if VSI is valid (ice_is_vsi_valid) 736 */ 737 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) 738 { 739 return hw->vsi_ctx[vsi_handle]->vsi_num; 740 } 741 742 /** 743 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle 744 * @hw: pointer to the HW struct 745 * @vsi_handle: VSI handle 746 * 747 * return the VSI context entry for a given VSI handle 748 */ 749 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 750 { 751 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; 752 } 753 754 /** 755 * ice_save_vsi_ctx - save the VSI context for a given VSI handle 756 * @hw: pointer to the HW struct 757 * @vsi_handle: VSI handle 758 * @vsi: VSI context pointer 759 * 760 * save the VSI context entry for a given VSI handle 761 */ 762 static void 763 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) 764 { 765 hw->vsi_ctx[vsi_handle] = vsi; 766 } 767 768 /** 769 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs 770 * @hw: pointer to the HW struct 771 * @vsi_handle: VSI handle 772 */ 773 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) 774 { 775 struct ice_vsi_ctx *vsi; 776 u8 i; 777 778 vsi = ice_get_vsi_ctx(hw, vsi_handle); 779 if (!vsi) 780 return; 781 ice_for_each_traffic_class(i) { 782 if (vsi->lan_q_ctx[i]) { 783 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]); 784 vsi->lan_q_ctx[i] = NULL; 785 } 786 if (vsi->rdma_q_ctx[i]) { 787 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]); 788 vsi->rdma_q_ctx[i] = NULL; 789 } 790 } 791 } 792 793 /** 794 * ice_clear_vsi_ctx - clear the VSI context entry 795 * @hw: pointer to the HW struct 796 * @vsi_handle: VSI handle 797 * 798 * clear the VSI context entry 799 */ 800 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) 801 { 802 struct ice_vsi_ctx *vsi; 803 804 vsi = ice_get_vsi_ctx(hw, vsi_handle); 805 if (vsi) { 806 ice_clear_vsi_q_ctx(hw, vsi_handle); 807 devm_kfree(ice_hw_to_dev(hw), vsi); 808 hw->vsi_ctx[vsi_handle] = NULL; 809 } 810 } 811 812 /** 813 * ice_clear_all_vsi_ctx - clear all the VSI context entries 814 * @hw: pointer to the HW struct 815 */ 816 void ice_clear_all_vsi_ctx(struct ice_hw *hw) 817 { 818 u16 i; 819 820 for (i = 0; i < ICE_MAX_VSI; i++) 821 ice_clear_vsi_ctx(hw, i); 822 } 823 824 /** 825 * ice_add_vsi - add VSI context to the hardware and VSI handle list 826 * @hw: pointer to the HW struct 827 * @vsi_handle: unique VSI handle provided by drivers 828 * @vsi_ctx: pointer to a VSI context struct 829 * @cd: pointer to command details structure or NULL 830 * 831 * Add a VSI context to the hardware also add it into the VSI handle list. 832 * If this function gets called after reset for existing VSIs then update 833 * with the new HW VSI number in the corresponding VSI handle list entry. 834 */ 835 int 836 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 837 struct ice_sq_cd *cd) 838 { 839 struct ice_vsi_ctx *tmp_vsi_ctx; 840 int status; 841 842 if (vsi_handle >= ICE_MAX_VSI) 843 return -EINVAL; 844 status = ice_aq_add_vsi(hw, vsi_ctx, cd); 845 if (status) 846 return status; 847 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 848 if (!tmp_vsi_ctx) { 849 /* Create a new VSI context */ 850 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), 851 sizeof(*tmp_vsi_ctx), GFP_KERNEL); 852 if (!tmp_vsi_ctx) { 853 ice_aq_free_vsi(hw, vsi_ctx, false, cd); 854 return -ENOMEM; 855 } 856 *tmp_vsi_ctx = *vsi_ctx; 857 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); 858 } else { 859 /* update with new HW VSI num */ 860 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; 861 } 862 863 return 0; 864 } 865 866 /** 867 * ice_free_vsi- free VSI context from hardware and VSI handle list 868 * @hw: pointer to the HW struct 869 * @vsi_handle: unique VSI handle 870 * @vsi_ctx: pointer to a VSI context struct 871 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources 872 * @cd: pointer to command details structure or NULL 873 * 874 * Free VSI context info from hardware as well as from VSI handle list 875 */ 876 int 877 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 878 bool keep_vsi_alloc, struct ice_sq_cd *cd) 879 { 880 int status; 881 882 if (!ice_is_vsi_valid(hw, vsi_handle)) 883 return -EINVAL; 884 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 885 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); 886 if (!status) 887 ice_clear_vsi_ctx(hw, vsi_handle); 888 return status; 889 } 890 891 /** 892 * ice_update_vsi 893 * @hw: pointer to the HW struct 894 * @vsi_handle: unique VSI handle 895 * @vsi_ctx: pointer to a VSI context struct 896 * @cd: pointer to command details structure or NULL 897 * 898 * Update VSI context in the hardware 899 */ 900 int 901 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, 902 struct ice_sq_cd *cd) 903 { 904 if (!ice_is_vsi_valid(hw, vsi_handle)) 905 return -EINVAL; 906 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 907 return ice_aq_update_vsi(hw, vsi_ctx, cd); 908 } 909 910 /** 911 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI 912 * @hw: pointer to HW struct 913 * @vsi_handle: VSI SW index 914 * @enable: boolean for enable/disable 915 */ 916 int 917 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) 918 { 919 struct ice_vsi_ctx *ctx; 920 921 ctx = ice_get_vsi_ctx(hw, vsi_handle); 922 if (!ctx) 923 return -EIO; 924 925 if (enable) 926 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 927 else 928 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; 929 930 return ice_update_vsi(hw, vsi_handle, ctx, NULL); 931 } 932 933 /** 934 * ice_aq_alloc_free_vsi_list 935 * @hw: pointer to the HW struct 936 * @vsi_list_id: VSI list ID returned or used for lookup 937 * @lkup_type: switch rule filter lookup type 938 * @opc: switch rules population command type - pass in the command opcode 939 * 940 * allocates or free a VSI list resource 941 */ 942 static int 943 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, 944 enum ice_sw_lkup_type lkup_type, 945 enum ice_adminq_opc opc) 946 { 947 struct ice_aqc_alloc_free_res_elem *sw_buf; 948 struct ice_aqc_res_elem *vsi_ele; 949 u16 buf_len; 950 int status; 951 952 buf_len = struct_size(sw_buf, elem, 1); 953 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); 954 if (!sw_buf) 955 return -ENOMEM; 956 sw_buf->num_elems = cpu_to_le16(1); 957 958 if (lkup_type == ICE_SW_LKUP_MAC || 959 lkup_type == ICE_SW_LKUP_MAC_VLAN || 960 lkup_type == ICE_SW_LKUP_ETHERTYPE || 961 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 962 lkup_type == ICE_SW_LKUP_PROMISC || 963 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) { 964 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); 965 } else if (lkup_type == ICE_SW_LKUP_VLAN) { 966 sw_buf->res_type = 967 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); 968 } else { 969 status = -EINVAL; 970 goto ice_aq_alloc_free_vsi_list_exit; 971 } 972 973 if (opc == ice_aqc_opc_free_res) 974 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id); 975 976 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); 977 if (status) 978 goto ice_aq_alloc_free_vsi_list_exit; 979 980 if (opc == ice_aqc_opc_alloc_res) { 981 vsi_ele = &sw_buf->elem[0]; 982 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp); 983 } 984 985 ice_aq_alloc_free_vsi_list_exit: 986 devm_kfree(ice_hw_to_dev(hw), sw_buf); 987 return status; 988 } 989 990 /** 991 * ice_aq_sw_rules - add/update/remove switch rules 992 * @hw: pointer to the HW struct 993 * @rule_list: pointer to switch rule population list 994 * @rule_list_sz: total size of the rule list in bytes 995 * @num_rules: number of switch rules in the rule_list 996 * @opc: switch rules population command type - pass in the command opcode 997 * @cd: pointer to command details structure or NULL 998 * 999 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware 1000 */ 1001 int 1002 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, 1003 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1004 { 1005 struct ice_aq_desc desc; 1006 int status; 1007 1008 if (opc != ice_aqc_opc_add_sw_rules && 1009 opc != ice_aqc_opc_update_sw_rules && 1010 opc != ice_aqc_opc_remove_sw_rules) 1011 return -EINVAL; 1012 1013 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1014 1015 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1016 desc.params.sw_rules.num_rules_fltr_entry_index = 1017 cpu_to_le16(num_rules); 1018 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); 1019 if (opc != ice_aqc_opc_add_sw_rules && 1020 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) 1021 status = -ENOENT; 1022 1023 return status; 1024 } 1025 1026 /** 1027 * ice_aq_add_recipe - add switch recipe 1028 * @hw: pointer to the HW struct 1029 * @s_recipe_list: pointer to switch rule population list 1030 * @num_recipes: number of switch recipes in the list 1031 * @cd: pointer to command details structure or NULL 1032 * 1033 * Add(0x0290) 1034 */ 1035 static int 1036 ice_aq_add_recipe(struct ice_hw *hw, 1037 struct ice_aqc_recipe_data_elem *s_recipe_list, 1038 u16 num_recipes, struct ice_sq_cd *cd) 1039 { 1040 struct ice_aqc_add_get_recipe *cmd; 1041 struct ice_aq_desc desc; 1042 u16 buf_size; 1043 1044 cmd = &desc.params.add_get_recipe; 1045 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe); 1046 1047 cmd->num_sub_recipes = cpu_to_le16(num_recipes); 1048 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1049 1050 buf_size = num_recipes * sizeof(*s_recipe_list); 1051 1052 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1053 } 1054 1055 /** 1056 * ice_aq_get_recipe - get switch recipe 1057 * @hw: pointer to the HW struct 1058 * @s_recipe_list: pointer to switch rule population list 1059 * @num_recipes: pointer to the number of recipes (input and output) 1060 * @recipe_root: root recipe number of recipe(s) to retrieve 1061 * @cd: pointer to command details structure or NULL 1062 * 1063 * Get(0x0292) 1064 * 1065 * On input, *num_recipes should equal the number of entries in s_recipe_list. 1066 * On output, *num_recipes will equal the number of entries returned in 1067 * s_recipe_list. 1068 * 1069 * The caller must supply enough space in s_recipe_list to hold all possible 1070 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES. 1071 */ 1072 static int 1073 ice_aq_get_recipe(struct ice_hw *hw, 1074 struct ice_aqc_recipe_data_elem *s_recipe_list, 1075 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd) 1076 { 1077 struct ice_aqc_add_get_recipe *cmd; 1078 struct ice_aq_desc desc; 1079 u16 buf_size; 1080 int status; 1081 1082 if (*num_recipes != ICE_MAX_NUM_RECIPES) 1083 return -EINVAL; 1084 1085 cmd = &desc.params.add_get_recipe; 1086 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe); 1087 1088 cmd->return_index = cpu_to_le16(recipe_root); 1089 cmd->num_sub_recipes = 0; 1090 1091 buf_size = *num_recipes * sizeof(*s_recipe_list); 1092 1093 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd); 1094 *num_recipes = le16_to_cpu(cmd->num_sub_recipes); 1095 1096 return status; 1097 } 1098 1099 /** 1100 * ice_aq_map_recipe_to_profile - Map recipe to packet profile 1101 * @hw: pointer to the HW struct 1102 * @profile_id: package profile ID to associate the recipe with 1103 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1104 * @cd: pointer to command details structure or NULL 1105 * Recipe to profile association (0x0291) 1106 */ 1107 static int 1108 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1109 struct ice_sq_cd *cd) 1110 { 1111 struct ice_aqc_recipe_to_profile *cmd; 1112 struct ice_aq_desc desc; 1113 1114 cmd = &desc.params.recipe_to_profile; 1115 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile); 1116 cmd->profile_id = cpu_to_le16(profile_id); 1117 /* Set the recipe ID bit in the bitmask to let the device know which 1118 * profile we are associating the recipe to 1119 */ 1120 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc)); 1121 1122 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1123 } 1124 1125 /** 1126 * ice_aq_get_recipe_to_profile - Map recipe to packet profile 1127 * @hw: pointer to the HW struct 1128 * @profile_id: package profile ID to associate the recipe with 1129 * @r_bitmap: Recipe bitmap filled in and need to be returned as response 1130 * @cd: pointer to command details structure or NULL 1131 * Associate profile ID with given recipe (0x0293) 1132 */ 1133 static int 1134 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap, 1135 struct ice_sq_cd *cd) 1136 { 1137 struct ice_aqc_recipe_to_profile *cmd; 1138 struct ice_aq_desc desc; 1139 int status; 1140 1141 cmd = &desc.params.recipe_to_profile; 1142 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile); 1143 cmd->profile_id = cpu_to_le16(profile_id); 1144 1145 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1146 if (!status) 1147 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc)); 1148 1149 return status; 1150 } 1151 1152 /** 1153 * ice_alloc_recipe - add recipe resource 1154 * @hw: pointer to the hardware structure 1155 * @rid: recipe ID returned as response to AQ call 1156 */ 1157 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) 1158 { 1159 struct ice_aqc_alloc_free_res_elem *sw_buf; 1160 u16 buf_len; 1161 int status; 1162 1163 buf_len = struct_size(sw_buf, elem, 1); 1164 sw_buf = kzalloc(buf_len, GFP_KERNEL); 1165 if (!sw_buf) 1166 return -ENOMEM; 1167 1168 sw_buf->num_elems = cpu_to_le16(1); 1169 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << 1170 ICE_AQC_RES_TYPE_S) | 1171 ICE_AQC_RES_TYPE_FLAG_SHARED); 1172 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, 1173 ice_aqc_opc_alloc_res, NULL); 1174 if (!status) 1175 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); 1176 kfree(sw_buf); 1177 1178 return status; 1179 } 1180 1181 /** 1182 * ice_get_recp_to_prof_map - updates recipe to profile mapping 1183 * @hw: pointer to hardware structure 1184 * 1185 * This function is used to populate recipe_to_profile matrix where index to 1186 * this array is the recipe ID and the element is the mapping of which profiles 1187 * is this recipe mapped to. 1188 */ 1189 static void ice_get_recp_to_prof_map(struct ice_hw *hw) 1190 { 1191 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 1192 u16 i; 1193 1194 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) { 1195 u16 j; 1196 1197 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES); 1198 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES); 1199 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL)) 1200 continue; 1201 bitmap_copy(profile_to_recipe[i], r_bitmap, 1202 ICE_MAX_NUM_RECIPES); 1203 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES) 1204 set_bit(i, recipe_to_profile[j]); 1205 } 1206 } 1207 1208 /** 1209 * ice_collect_result_idx - copy result index values 1210 * @buf: buffer that contains the result index 1211 * @recp: the recipe struct to copy data into 1212 */ 1213 static void 1214 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, 1215 struct ice_sw_recipe *recp) 1216 { 1217 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1218 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1219 recp->res_idxs); 1220 } 1221 1222 /** 1223 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries 1224 * @hw: pointer to hardware structure 1225 * @recps: struct that we need to populate 1226 * @rid: recipe ID that we are populating 1227 * @refresh_required: true if we should get recipe to profile mapping from FW 1228 * 1229 * This function is used to populate all the necessary entries into our 1230 * bookkeeping so that we have a current list of all the recipes that are 1231 * programmed in the firmware. 1232 */ 1233 static int 1234 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, 1235 bool *refresh_required) 1236 { 1237 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); 1238 struct ice_aqc_recipe_data_elem *tmp; 1239 u16 num_recps = ICE_MAX_NUM_RECIPES; 1240 struct ice_prot_lkup_ext *lkup_exts; 1241 u8 fv_word_idx = 0; 1242 u16 sub_recps; 1243 int status; 1244 1245 bitmap_zero(result_bm, ICE_MAX_FV_WORDS); 1246 1247 /* we need a buffer big enough to accommodate all the recipes */ 1248 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 1249 if (!tmp) 1250 return -ENOMEM; 1251 1252 tmp[0].recipe_indx = rid; 1253 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL); 1254 /* non-zero status meaning recipe doesn't exist */ 1255 if (status) 1256 goto err_unroll; 1257 1258 /* Get recipe to profile map so that we can get the fv from lkups that 1259 * we read for a recipe from FW. Since we want to minimize the number of 1260 * times we make this FW call, just make one call and cache the copy 1261 * until a new recipe is added. This operation is only required the 1262 * first time to get the changes from FW. Then to search existing 1263 * entries we don't need to update the cache again until another recipe 1264 * gets added. 1265 */ 1266 if (*refresh_required) { 1267 ice_get_recp_to_prof_map(hw); 1268 *refresh_required = false; 1269 } 1270 1271 /* Start populating all the entries for recps[rid] based on lkups from 1272 * firmware. Note that we are only creating the root recipe in our 1273 * database. 1274 */ 1275 lkup_exts = &recps[rid].lkup_exts; 1276 1277 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { 1278 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; 1279 struct ice_recp_grp_entry *rg_entry; 1280 u8 i, prof, idx, prot = 0; 1281 bool is_root; 1282 u16 off = 0; 1283 1284 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), 1285 GFP_KERNEL); 1286 if (!rg_entry) { 1287 status = -ENOMEM; 1288 goto err_unroll; 1289 } 1290 1291 idx = root_bufs.recipe_indx; 1292 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; 1293 1294 /* Mark all result indices in this chain */ 1295 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) 1296 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, 1297 result_bm); 1298 1299 /* get the first profile that is associated with rid */ 1300 prof = find_first_bit(recipe_to_profile[idx], 1301 ICE_MAX_NUM_PROFILES); 1302 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { 1303 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; 1304 1305 rg_entry->fv_idx[i] = lkup_indx; 1306 rg_entry->fv_mask[i] = 1307 le16_to_cpu(root_bufs.content.mask[i + 1]); 1308 1309 /* If the recipe is a chained recipe then all its 1310 * child recipe's result will have a result index. 1311 * To fill fv_words we should not use those result 1312 * index, we only need the protocol ids and offsets. 1313 * We will skip all the fv_idx which stores result 1314 * index in them. We also need to skip any fv_idx which 1315 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a 1316 * valid offset value. 1317 */ 1318 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || 1319 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || 1320 rg_entry->fv_idx[i] == 0) 1321 continue; 1322 1323 ice_find_prot_off(hw, ICE_BLK_SW, prof, 1324 rg_entry->fv_idx[i], &prot, &off); 1325 lkup_exts->fv_words[fv_word_idx].prot_id = prot; 1326 lkup_exts->fv_words[fv_word_idx].off = off; 1327 lkup_exts->field_mask[fv_word_idx] = 1328 rg_entry->fv_mask[i]; 1329 fv_word_idx++; 1330 } 1331 /* populate rg_list with the data from the child entry of this 1332 * recipe 1333 */ 1334 list_add(&rg_entry->l_entry, &recps[rid].rg_list); 1335 1336 /* Propagate some data to the recipe database */ 1337 recps[idx].is_root = !!is_root; 1338 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1339 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); 1340 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { 1341 recps[idx].chain_idx = root_bufs.content.result_indx & 1342 ~ICE_AQ_RECIPE_RESULT_EN; 1343 set_bit(recps[idx].chain_idx, recps[idx].res_idxs); 1344 } else { 1345 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; 1346 } 1347 1348 if (!is_root) 1349 continue; 1350 1351 /* Only do the following for root recipes entries */ 1352 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, 1353 sizeof(recps[idx].r_bitmap)); 1354 recps[idx].root_rid = root_bufs.content.rid & 1355 ~ICE_AQ_RECIPE_ID_IS_ROOT; 1356 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; 1357 } 1358 1359 /* Complete initialization of the root recipe entry */ 1360 lkup_exts->n_val_words = fv_word_idx; 1361 recps[rid].big_recp = (num_recps > 1); 1362 recps[rid].n_grp_count = (u8)num_recps; 1363 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, 1364 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), 1365 GFP_KERNEL); 1366 if (!recps[rid].root_buf) { 1367 status = -ENOMEM; 1368 goto err_unroll; 1369 } 1370 1371 /* Copy result indexes */ 1372 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); 1373 recps[rid].recp_created = true; 1374 1375 err_unroll: 1376 kfree(tmp); 1377 return status; 1378 } 1379 1380 /* ice_init_port_info - Initialize port_info with switch configuration data 1381 * @pi: pointer to port_info 1382 * @vsi_port_num: VSI number or port number 1383 * @type: Type of switch element (port or VSI) 1384 * @swid: switch ID of the switch the element is attached to 1385 * @pf_vf_num: PF or VF number 1386 * @is_vf: true if the element is a VF, false otherwise 1387 */ 1388 static void 1389 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, 1390 u16 swid, u16 pf_vf_num, bool is_vf) 1391 { 1392 switch (type) { 1393 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: 1394 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); 1395 pi->sw_id = swid; 1396 pi->pf_vf_num = pf_vf_num; 1397 pi->is_vf = is_vf; 1398 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 1399 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 1400 break; 1401 default: 1402 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); 1403 break; 1404 } 1405 } 1406 1407 /* ice_get_initial_sw_cfg - Get initial port and default VSI data 1408 * @hw: pointer to the hardware structure 1409 */ 1410 int ice_get_initial_sw_cfg(struct ice_hw *hw) 1411 { 1412 struct ice_aqc_get_sw_cfg_resp_elem *rbuf; 1413 u16 req_desc = 0; 1414 u16 num_elems; 1415 int status; 1416 u16 i; 1417 1418 rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN, 1419 GFP_KERNEL); 1420 1421 if (!rbuf) 1422 return -ENOMEM; 1423 1424 /* Multiple calls to ice_aq_get_sw_cfg may be required 1425 * to get all the switch configuration information. The need 1426 * for additional calls is indicated by ice_aq_get_sw_cfg 1427 * writing a non-zero value in req_desc 1428 */ 1429 do { 1430 struct ice_aqc_get_sw_cfg_resp_elem *ele; 1431 1432 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, 1433 &req_desc, &num_elems, NULL); 1434 1435 if (status) 1436 break; 1437 1438 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { 1439 u16 pf_vf_num, swid, vsi_port_num; 1440 bool is_vf = false; 1441 u8 res_type; 1442 1443 vsi_port_num = le16_to_cpu(ele->vsi_port_num) & 1444 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; 1445 1446 pf_vf_num = le16_to_cpu(ele->pf_vf_num) & 1447 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; 1448 1449 swid = le16_to_cpu(ele->swid); 1450 1451 if (le16_to_cpu(ele->pf_vf_num) & 1452 ICE_AQC_GET_SW_CONF_RESP_IS_VF) 1453 is_vf = true; 1454 1455 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >> 1456 ICE_AQC_GET_SW_CONF_RESP_TYPE_S); 1457 1458 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { 1459 /* FW VSI is not needed. Just continue. */ 1460 continue; 1461 } 1462 1463 ice_init_port_info(hw->port_info, vsi_port_num, 1464 res_type, swid, pf_vf_num, is_vf); 1465 } 1466 } while (req_desc && !status); 1467 1468 devm_kfree(ice_hw_to_dev(hw), rbuf); 1469 return status; 1470 } 1471 1472 /** 1473 * ice_fill_sw_info - Helper function to populate lb_en and lan_en 1474 * @hw: pointer to the hardware structure 1475 * @fi: filter info structure to fill/update 1476 * 1477 * This helper function populates the lb_en and lan_en elements of the provided 1478 * ice_fltr_info struct using the switch's type and characteristics of the 1479 * switch rule being configured. 1480 */ 1481 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) 1482 { 1483 fi->lb_en = false; 1484 fi->lan_en = false; 1485 if ((fi->flag & ICE_FLTR_TX) && 1486 (fi->fltr_act == ICE_FWD_TO_VSI || 1487 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 1488 fi->fltr_act == ICE_FWD_TO_Q || 1489 fi->fltr_act == ICE_FWD_TO_QGRP)) { 1490 /* Setting LB for prune actions will result in replicated 1491 * packets to the internal switch that will be dropped. 1492 */ 1493 if (fi->lkup_type != ICE_SW_LKUP_VLAN) 1494 fi->lb_en = true; 1495 1496 /* Set lan_en to TRUE if 1497 * 1. The switch is a VEB AND 1498 * 2 1499 * 2.1 The lookup is a directional lookup like ethertype, 1500 * promiscuous, ethertype-MAC, promiscuous-VLAN 1501 * and default-port OR 1502 * 2.2 The lookup is VLAN, OR 1503 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR 1504 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. 1505 * 1506 * OR 1507 * 1508 * The switch is a VEPA. 1509 * 1510 * In all other cases, the LAN enable has to be set to false. 1511 */ 1512 if (hw->evb_veb) { 1513 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || 1514 fi->lkup_type == ICE_SW_LKUP_PROMISC || 1515 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1516 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || 1517 fi->lkup_type == ICE_SW_LKUP_DFLT || 1518 fi->lkup_type == ICE_SW_LKUP_VLAN || 1519 (fi->lkup_type == ICE_SW_LKUP_MAC && 1520 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || 1521 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && 1522 !is_unicast_ether_addr(fi->l_data.mac.mac_addr))) 1523 fi->lan_en = true; 1524 } else { 1525 fi->lan_en = true; 1526 } 1527 } 1528 } 1529 1530 /** 1531 * ice_fill_sw_rule - Helper function to fill switch rule structure 1532 * @hw: pointer to the hardware structure 1533 * @f_info: entry containing packet forwarding information 1534 * @s_rule: switch rule structure to be filled in based on mac_entry 1535 * @opc: switch rules population command type - pass in the command opcode 1536 */ 1537 static void 1538 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, 1539 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) 1540 { 1541 u16 vlan_id = ICE_MAX_VLAN_ID + 1; 1542 void *daddr = NULL; 1543 u16 eth_hdr_sz; 1544 u8 *eth_hdr; 1545 u32 act = 0; 1546 __be16 *off; 1547 u8 q_rgn; 1548 1549 if (opc == ice_aqc_opc_remove_sw_rules) { 1550 s_rule->pdata.lkup_tx_rx.act = 0; 1551 s_rule->pdata.lkup_tx_rx.index = 1552 cpu_to_le16(f_info->fltr_rule_id); 1553 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 1554 return; 1555 } 1556 1557 eth_hdr_sz = sizeof(dummy_eth_header); 1558 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; 1559 1560 /* initialize the ether header with a dummy header */ 1561 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); 1562 ice_fill_sw_info(hw, f_info); 1563 1564 switch (f_info->fltr_act) { 1565 case ICE_FWD_TO_VSI: 1566 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & 1567 ICE_SINGLE_ACT_VSI_ID_M; 1568 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1569 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1570 ICE_SINGLE_ACT_VALID_BIT; 1571 break; 1572 case ICE_FWD_TO_VSI_LIST: 1573 act |= ICE_SINGLE_ACT_VSI_LIST; 1574 act |= (f_info->fwd_id.vsi_list_id << 1575 ICE_SINGLE_ACT_VSI_LIST_ID_S) & 1576 ICE_SINGLE_ACT_VSI_LIST_ID_M; 1577 if (f_info->lkup_type != ICE_SW_LKUP_VLAN) 1578 act |= ICE_SINGLE_ACT_VSI_FORWARDING | 1579 ICE_SINGLE_ACT_VALID_BIT; 1580 break; 1581 case ICE_FWD_TO_Q: 1582 act |= ICE_SINGLE_ACT_TO_Q; 1583 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1584 ICE_SINGLE_ACT_Q_INDEX_M; 1585 break; 1586 case ICE_DROP_PACKET: 1587 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 1588 ICE_SINGLE_ACT_VALID_BIT; 1589 break; 1590 case ICE_FWD_TO_QGRP: 1591 q_rgn = f_info->qgrp_size > 0 ? 1592 (u8)ilog2(f_info->qgrp_size) : 0; 1593 act |= ICE_SINGLE_ACT_TO_Q; 1594 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 1595 ICE_SINGLE_ACT_Q_INDEX_M; 1596 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 1597 ICE_SINGLE_ACT_Q_REGION_M; 1598 break; 1599 default: 1600 return; 1601 } 1602 1603 if (f_info->lb_en) 1604 act |= ICE_SINGLE_ACT_LB_ENABLE; 1605 if (f_info->lan_en) 1606 act |= ICE_SINGLE_ACT_LAN_ENABLE; 1607 1608 switch (f_info->lkup_type) { 1609 case ICE_SW_LKUP_MAC: 1610 daddr = f_info->l_data.mac.mac_addr; 1611 break; 1612 case ICE_SW_LKUP_VLAN: 1613 vlan_id = f_info->l_data.vlan.vlan_id; 1614 if (f_info->fltr_act == ICE_FWD_TO_VSI || 1615 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 1616 act |= ICE_SINGLE_ACT_PRUNE; 1617 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; 1618 } 1619 break; 1620 case ICE_SW_LKUP_ETHERTYPE_MAC: 1621 daddr = f_info->l_data.ethertype_mac.mac_addr; 1622 fallthrough; 1623 case ICE_SW_LKUP_ETHERTYPE: 1624 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); 1625 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); 1626 break; 1627 case ICE_SW_LKUP_MAC_VLAN: 1628 daddr = f_info->l_data.mac_vlan.mac_addr; 1629 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1630 break; 1631 case ICE_SW_LKUP_PROMISC_VLAN: 1632 vlan_id = f_info->l_data.mac_vlan.vlan_id; 1633 fallthrough; 1634 case ICE_SW_LKUP_PROMISC: 1635 daddr = f_info->l_data.mac_vlan.mac_addr; 1636 break; 1637 default: 1638 break; 1639 } 1640 1641 s_rule->type = (f_info->flag & ICE_FLTR_RX) ? 1642 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) : 1643 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 1644 1645 /* Recipe set depending on lookup type */ 1646 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type); 1647 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src); 1648 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 1649 1650 if (daddr) 1651 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); 1652 1653 if (!(vlan_id > ICE_MAX_VLAN_ID)) { 1654 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); 1655 *off = cpu_to_be16(vlan_id); 1656 } 1657 1658 /* Create the switch rule with the final dummy Ethernet header */ 1659 if (opc != ice_aqc_opc_update_sw_rules) 1660 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); 1661 } 1662 1663 /** 1664 * ice_add_marker_act 1665 * @hw: pointer to the hardware structure 1666 * @m_ent: the management entry for which sw marker needs to be added 1667 * @sw_marker: sw marker to tag the Rx descriptor with 1668 * @l_id: large action resource ID 1669 * 1670 * Create a large action to hold software marker and update the switch rule 1671 * entry pointed by m_ent with newly created large action 1672 */ 1673 static int 1674 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, 1675 u16 sw_marker, u16 l_id) 1676 { 1677 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; 1678 /* For software marker we need 3 large actions 1679 * 1. FWD action: FWD TO VSI or VSI LIST 1680 * 2. GENERIC VALUE action to hold the profile ID 1681 * 3. GENERIC VALUE action to hold the software marker ID 1682 */ 1683 const u16 num_lg_acts = 3; 1684 u16 lg_act_size; 1685 u16 rules_size; 1686 int status; 1687 u32 act; 1688 u16 id; 1689 1690 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) 1691 return -EINVAL; 1692 1693 /* Create two back-to-back switch rules and submit them to the HW using 1694 * one memory buffer: 1695 * 1. Large Action 1696 * 2. Look up Tx Rx 1697 */ 1698 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); 1699 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 1700 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL); 1701 if (!lg_act) 1702 return -ENOMEM; 1703 1704 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); 1705 1706 /* Fill in the first switch rule i.e. large action */ 1707 lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT); 1708 lg_act->pdata.lg_act.index = cpu_to_le16(l_id); 1709 lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts); 1710 1711 /* First action VSI forwarding or VSI list forwarding depending on how 1712 * many VSIs 1713 */ 1714 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : 1715 m_ent->fltr_info.fwd_id.hw_vsi_id; 1716 1717 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; 1718 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; 1719 if (m_ent->vsi_count > 1) 1720 act |= ICE_LG_ACT_VSI_LIST; 1721 lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); 1722 1723 /* Second action descriptor type */ 1724 act = ICE_LG_ACT_GENERIC; 1725 1726 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 1727 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 1728 1729 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << 1730 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; 1731 1732 /* Third action Marker value */ 1733 act |= ICE_LG_ACT_GENERIC; 1734 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 1735 ICE_LG_ACT_GENERIC_VALUE_M; 1736 1737 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 1738 1739 /* call the fill switch rule to fill the lookup Tx Rx structure */ 1740 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, 1741 ice_aqc_opc_update_sw_rules); 1742 1743 /* Update the action to point to the large action ID */ 1744 rx_tx->pdata.lkup_tx_rx.act = 1745 cpu_to_le32(ICE_SINGLE_ACT_PTR | 1746 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & 1747 ICE_SINGLE_ACT_PTR_VAL_M)); 1748 1749 /* Use the filter rule ID of the previously created rule with single 1750 * act. Once the update happens, hardware will treat this as large 1751 * action 1752 */ 1753 rx_tx->pdata.lkup_tx_rx.index = 1754 cpu_to_le16(m_ent->fltr_info.fltr_rule_id); 1755 1756 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, 1757 ice_aqc_opc_update_sw_rules, NULL); 1758 if (!status) { 1759 m_ent->lg_act_idx = l_id; 1760 m_ent->sw_marker_id = sw_marker; 1761 } 1762 1763 devm_kfree(ice_hw_to_dev(hw), lg_act); 1764 return status; 1765 } 1766 1767 /** 1768 * ice_create_vsi_list_map 1769 * @hw: pointer to the hardware structure 1770 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping 1771 * @num_vsi: number of VSI handles in the array 1772 * @vsi_list_id: VSI list ID generated as part of allocate resource 1773 * 1774 * Helper function to create a new entry of VSI list ID to VSI mapping 1775 * using the given VSI list ID 1776 */ 1777 static struct ice_vsi_list_map_info * 1778 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1779 u16 vsi_list_id) 1780 { 1781 struct ice_switch_info *sw = hw->switch_info; 1782 struct ice_vsi_list_map_info *v_map; 1783 int i; 1784 1785 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL); 1786 if (!v_map) 1787 return NULL; 1788 1789 v_map->vsi_list_id = vsi_list_id; 1790 v_map->ref_cnt = 1; 1791 for (i = 0; i < num_vsi; i++) 1792 set_bit(vsi_handle_arr[i], v_map->vsi_map); 1793 1794 list_add(&v_map->list_entry, &sw->vsi_list_map_head); 1795 return v_map; 1796 } 1797 1798 /** 1799 * ice_update_vsi_list_rule 1800 * @hw: pointer to the hardware structure 1801 * @vsi_handle_arr: array of VSI handles to form a VSI list 1802 * @num_vsi: number of VSI handles in the array 1803 * @vsi_list_id: VSI list ID generated as part of allocate resource 1804 * @remove: Boolean value to indicate if this is a remove action 1805 * @opc: switch rules population command type - pass in the command opcode 1806 * @lkup_type: lookup type of the filter 1807 * 1808 * Call AQ command to add a new switch rule or update existing switch rule 1809 * using the given VSI list ID 1810 */ 1811 static int 1812 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1813 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, 1814 enum ice_sw_lkup_type lkup_type) 1815 { 1816 struct ice_aqc_sw_rules_elem *s_rule; 1817 u16 s_rule_size; 1818 u16 rule_type; 1819 int status; 1820 int i; 1821 1822 if (!num_vsi) 1823 return -EINVAL; 1824 1825 if (lkup_type == ICE_SW_LKUP_MAC || 1826 lkup_type == ICE_SW_LKUP_MAC_VLAN || 1827 lkup_type == ICE_SW_LKUP_ETHERTYPE || 1828 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || 1829 lkup_type == ICE_SW_LKUP_PROMISC || 1830 lkup_type == ICE_SW_LKUP_PROMISC_VLAN) 1831 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : 1832 ICE_AQC_SW_RULES_T_VSI_LIST_SET; 1833 else if (lkup_type == ICE_SW_LKUP_VLAN) 1834 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : 1835 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; 1836 else 1837 return -EINVAL; 1838 1839 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); 1840 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 1841 if (!s_rule) 1842 return -ENOMEM; 1843 for (i = 0; i < num_vsi; i++) { 1844 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { 1845 status = -EINVAL; 1846 goto exit; 1847 } 1848 /* AQ call requires hw_vsi_id(s) */ 1849 s_rule->pdata.vsi_list.vsi[i] = 1850 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); 1851 } 1852 1853 s_rule->type = cpu_to_le16(rule_type); 1854 s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi); 1855 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 1856 1857 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); 1858 1859 exit: 1860 devm_kfree(ice_hw_to_dev(hw), s_rule); 1861 return status; 1862 } 1863 1864 /** 1865 * ice_create_vsi_list_rule - Creates and populates a VSI list rule 1866 * @hw: pointer to the HW struct 1867 * @vsi_handle_arr: array of VSI handles to form a VSI list 1868 * @num_vsi: number of VSI handles in the array 1869 * @vsi_list_id: stores the ID of the VSI list to be created 1870 * @lkup_type: switch rule filter's lookup type 1871 */ 1872 static int 1873 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, 1874 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) 1875 { 1876 int status; 1877 1878 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, 1879 ice_aqc_opc_alloc_res); 1880 if (status) 1881 return status; 1882 1883 /* Update the newly created VSI list to include the specified VSIs */ 1884 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, 1885 *vsi_list_id, false, 1886 ice_aqc_opc_add_sw_rules, lkup_type); 1887 } 1888 1889 /** 1890 * ice_create_pkt_fwd_rule 1891 * @hw: pointer to the hardware structure 1892 * @f_entry: entry containing packet forwarding information 1893 * 1894 * Create switch rule with given filter information and add an entry 1895 * to the corresponding filter management list to track this switch rule 1896 * and VSI mapping 1897 */ 1898 static int 1899 ice_create_pkt_fwd_rule(struct ice_hw *hw, 1900 struct ice_fltr_list_entry *f_entry) 1901 { 1902 struct ice_fltr_mgmt_list_entry *fm_entry; 1903 struct ice_aqc_sw_rules_elem *s_rule; 1904 enum ice_sw_lkup_type l_type; 1905 struct ice_sw_recipe *recp; 1906 int status; 1907 1908 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1909 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1910 if (!s_rule) 1911 return -ENOMEM; 1912 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry), 1913 GFP_KERNEL); 1914 if (!fm_entry) { 1915 status = -ENOMEM; 1916 goto ice_create_pkt_fwd_rule_exit; 1917 } 1918 1919 fm_entry->fltr_info = f_entry->fltr_info; 1920 1921 /* Initialize all the fields for the management entry */ 1922 fm_entry->vsi_count = 1; 1923 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; 1924 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; 1925 fm_entry->counter_index = ICE_INVAL_COUNTER_ID; 1926 1927 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, 1928 ice_aqc_opc_add_sw_rules); 1929 1930 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1931 ice_aqc_opc_add_sw_rules, NULL); 1932 if (status) { 1933 devm_kfree(ice_hw_to_dev(hw), fm_entry); 1934 goto ice_create_pkt_fwd_rule_exit; 1935 } 1936 1937 f_entry->fltr_info.fltr_rule_id = 1938 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1939 fm_entry->fltr_info.fltr_rule_id = 1940 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 1941 1942 /* The book keeping entries will get removed when base driver 1943 * calls remove filter AQ command 1944 */ 1945 l_type = fm_entry->fltr_info.lkup_type; 1946 recp = &hw->switch_info->recp_list[l_type]; 1947 list_add(&fm_entry->list_entry, &recp->filt_rules); 1948 1949 ice_create_pkt_fwd_rule_exit: 1950 devm_kfree(ice_hw_to_dev(hw), s_rule); 1951 return status; 1952 } 1953 1954 /** 1955 * ice_update_pkt_fwd_rule 1956 * @hw: pointer to the hardware structure 1957 * @f_info: filter information for switch rule 1958 * 1959 * Call AQ command to update a previously created switch rule with a 1960 * VSI list ID 1961 */ 1962 static int 1963 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) 1964 { 1965 struct ice_aqc_sw_rules_elem *s_rule; 1966 int status; 1967 1968 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 1969 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL); 1970 if (!s_rule) 1971 return -ENOMEM; 1972 1973 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); 1974 1975 s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); 1976 1977 /* Update switch rule with new rule set to forward VSI list */ 1978 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, 1979 ice_aqc_opc_update_sw_rules, NULL); 1980 1981 devm_kfree(ice_hw_to_dev(hw), s_rule); 1982 return status; 1983 } 1984 1985 /** 1986 * ice_update_sw_rule_bridge_mode 1987 * @hw: pointer to the HW struct 1988 * 1989 * Updates unicast switch filter rules based on VEB/VEPA mode 1990 */ 1991 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw) 1992 { 1993 struct ice_switch_info *sw = hw->switch_info; 1994 struct ice_fltr_mgmt_list_entry *fm_entry; 1995 struct list_head *rule_head; 1996 struct mutex *rule_lock; /* Lock to protect filter rule list */ 1997 int status = 0; 1998 1999 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2000 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2001 2002 mutex_lock(rule_lock); 2003 list_for_each_entry(fm_entry, rule_head, list_entry) { 2004 struct ice_fltr_info *fi = &fm_entry->fltr_info; 2005 u8 *addr = fi->l_data.mac.mac_addr; 2006 2007 /* Update unicast Tx rules to reflect the selected 2008 * VEB/VEPA mode 2009 */ 2010 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && 2011 (fi->fltr_act == ICE_FWD_TO_VSI || 2012 fi->fltr_act == ICE_FWD_TO_VSI_LIST || 2013 fi->fltr_act == ICE_FWD_TO_Q || 2014 fi->fltr_act == ICE_FWD_TO_QGRP)) { 2015 status = ice_update_pkt_fwd_rule(hw, fi); 2016 if (status) 2017 break; 2018 } 2019 } 2020 2021 mutex_unlock(rule_lock); 2022 2023 return status; 2024 } 2025 2026 /** 2027 * ice_add_update_vsi_list 2028 * @hw: pointer to the hardware structure 2029 * @m_entry: pointer to current filter management list entry 2030 * @cur_fltr: filter information from the book keeping entry 2031 * @new_fltr: filter information with the new VSI to be added 2032 * 2033 * Call AQ command to add or update previously created VSI list with new VSI. 2034 * 2035 * Helper function to do book keeping associated with adding filter information 2036 * The algorithm to do the book keeping is described below : 2037 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) 2038 * if only one VSI has been added till now 2039 * Allocate a new VSI list and add two VSIs 2040 * to this list using switch rule command 2041 * Update the previously created switch rule with the 2042 * newly created VSI list ID 2043 * if a VSI list was previously created 2044 * Add the new VSI to the previously created VSI list set 2045 * using the update switch rule command 2046 */ 2047 static int 2048 ice_add_update_vsi_list(struct ice_hw *hw, 2049 struct ice_fltr_mgmt_list_entry *m_entry, 2050 struct ice_fltr_info *cur_fltr, 2051 struct ice_fltr_info *new_fltr) 2052 { 2053 u16 vsi_list_id = 0; 2054 int status = 0; 2055 2056 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || 2057 cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) 2058 return -EOPNOTSUPP; 2059 2060 if ((new_fltr->fltr_act == ICE_FWD_TO_Q || 2061 new_fltr->fltr_act == ICE_FWD_TO_QGRP) && 2062 (cur_fltr->fltr_act == ICE_FWD_TO_VSI || 2063 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) 2064 return -EOPNOTSUPP; 2065 2066 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 2067 /* Only one entry existed in the mapping and it was not already 2068 * a part of a VSI list. So, create a VSI list with the old and 2069 * new VSIs. 2070 */ 2071 struct ice_fltr_info tmp_fltr; 2072 u16 vsi_handle_arr[2]; 2073 2074 /* A rule already exists with the new VSI being added */ 2075 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) 2076 return -EEXIST; 2077 2078 vsi_handle_arr[0] = cur_fltr->vsi_handle; 2079 vsi_handle_arr[1] = new_fltr->vsi_handle; 2080 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2081 &vsi_list_id, 2082 new_fltr->lkup_type); 2083 if (status) 2084 return status; 2085 2086 tmp_fltr = *new_fltr; 2087 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 2088 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2089 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2090 /* Update the previous switch rule of "MAC forward to VSI" to 2091 * "MAC fwd to VSI list" 2092 */ 2093 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2094 if (status) 2095 return status; 2096 2097 cur_fltr->fwd_id.vsi_list_id = vsi_list_id; 2098 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2099 m_entry->vsi_list_info = 2100 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2101 vsi_list_id); 2102 2103 if (!m_entry->vsi_list_info) 2104 return -ENOMEM; 2105 2106 /* If this entry was large action then the large action needs 2107 * to be updated to point to FWD to VSI list 2108 */ 2109 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) 2110 status = 2111 ice_add_marker_act(hw, m_entry, 2112 m_entry->sw_marker_id, 2113 m_entry->lg_act_idx); 2114 } else { 2115 u16 vsi_handle = new_fltr->vsi_handle; 2116 enum ice_adminq_opc opcode; 2117 2118 if (!m_entry->vsi_list_info) 2119 return -EIO; 2120 2121 /* A rule already exists with the new VSI being added */ 2122 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 2123 return 0; 2124 2125 /* Update the previously created VSI list set with 2126 * the new VSI ID passed in 2127 */ 2128 vsi_list_id = cur_fltr->fwd_id.vsi_list_id; 2129 opcode = ice_aqc_opc_update_sw_rules; 2130 2131 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 2132 vsi_list_id, false, opcode, 2133 new_fltr->lkup_type); 2134 /* update VSI list mapping info with new VSI ID */ 2135 if (!status) 2136 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 2137 } 2138 if (!status) 2139 m_entry->vsi_count++; 2140 return status; 2141 } 2142 2143 /** 2144 * ice_find_rule_entry - Search a rule entry 2145 * @hw: pointer to the hardware structure 2146 * @recp_id: lookup type for which the specified rule needs to be searched 2147 * @f_info: rule information 2148 * 2149 * Helper function to search for a given rule entry 2150 * Returns pointer to entry storing the rule if found 2151 */ 2152 static struct ice_fltr_mgmt_list_entry * 2153 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) 2154 { 2155 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; 2156 struct ice_switch_info *sw = hw->switch_info; 2157 struct list_head *list_head; 2158 2159 list_head = &sw->recp_list[recp_id].filt_rules; 2160 list_for_each_entry(list_itr, list_head, list_entry) { 2161 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 2162 sizeof(f_info->l_data)) && 2163 f_info->flag == list_itr->fltr_info.flag) { 2164 ret = list_itr; 2165 break; 2166 } 2167 } 2168 return ret; 2169 } 2170 2171 /** 2172 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 2173 * @hw: pointer to the hardware structure 2174 * @recp_id: lookup type for which VSI lists needs to be searched 2175 * @vsi_handle: VSI handle to be found in VSI list 2176 * @vsi_list_id: VSI list ID found containing vsi_handle 2177 * 2178 * Helper function to search a VSI list with single entry containing given VSI 2179 * handle element. This can be extended further to search VSI list with more 2180 * than 1 vsi_count. Returns pointer to VSI list entry if found. 2181 */ 2182 static struct ice_vsi_list_map_info * 2183 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, 2184 u16 *vsi_list_id) 2185 { 2186 struct ice_vsi_list_map_info *map_info = NULL; 2187 struct ice_switch_info *sw = hw->switch_info; 2188 struct ice_fltr_mgmt_list_entry *list_itr; 2189 struct list_head *list_head; 2190 2191 list_head = &sw->recp_list[recp_id].filt_rules; 2192 list_for_each_entry(list_itr, list_head, list_entry) { 2193 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { 2194 map_info = list_itr->vsi_list_info; 2195 if (test_bit(vsi_handle, map_info->vsi_map)) { 2196 *vsi_list_id = map_info->vsi_list_id; 2197 return map_info; 2198 } 2199 } 2200 } 2201 return NULL; 2202 } 2203 2204 /** 2205 * ice_add_rule_internal - add rule for a given lookup type 2206 * @hw: pointer to the hardware structure 2207 * @recp_id: lookup type (recipe ID) for which rule has to be added 2208 * @f_entry: structure containing MAC forwarding information 2209 * 2210 * Adds or updates the rule lists for a given recipe 2211 */ 2212 static int 2213 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, 2214 struct ice_fltr_list_entry *f_entry) 2215 { 2216 struct ice_switch_info *sw = hw->switch_info; 2217 struct ice_fltr_info *new_fltr, *cur_fltr; 2218 struct ice_fltr_mgmt_list_entry *m_entry; 2219 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2220 int status = 0; 2221 2222 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2223 return -EINVAL; 2224 f_entry->fltr_info.fwd_id.hw_vsi_id = 2225 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2226 2227 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2228 2229 mutex_lock(rule_lock); 2230 new_fltr = &f_entry->fltr_info; 2231 if (new_fltr->flag & ICE_FLTR_RX) 2232 new_fltr->src = hw->port_info->lport; 2233 else if (new_fltr->flag & ICE_FLTR_TX) 2234 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id; 2235 2236 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); 2237 if (!m_entry) { 2238 mutex_unlock(rule_lock); 2239 return ice_create_pkt_fwd_rule(hw, f_entry); 2240 } 2241 2242 cur_fltr = &m_entry->fltr_info; 2243 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); 2244 mutex_unlock(rule_lock); 2245 2246 return status; 2247 } 2248 2249 /** 2250 * ice_remove_vsi_list_rule 2251 * @hw: pointer to the hardware structure 2252 * @vsi_list_id: VSI list ID generated as part of allocate resource 2253 * @lkup_type: switch rule filter lookup type 2254 * 2255 * The VSI list should be emptied before this function is called to remove the 2256 * VSI list. 2257 */ 2258 static int 2259 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, 2260 enum ice_sw_lkup_type lkup_type) 2261 { 2262 struct ice_aqc_sw_rules_elem *s_rule; 2263 u16 s_rule_size; 2264 int status; 2265 2266 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); 2267 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 2268 if (!s_rule) 2269 return -ENOMEM; 2270 2271 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); 2272 s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); 2273 2274 /* Free the vsi_list resource that we allocated. It is assumed that the 2275 * list is empty at this point. 2276 */ 2277 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, 2278 ice_aqc_opc_free_res); 2279 2280 devm_kfree(ice_hw_to_dev(hw), s_rule); 2281 return status; 2282 } 2283 2284 /** 2285 * ice_rem_update_vsi_list 2286 * @hw: pointer to the hardware structure 2287 * @vsi_handle: VSI handle of the VSI to remove 2288 * @fm_list: filter management entry for which the VSI list management needs to 2289 * be done 2290 */ 2291 static int 2292 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 2293 struct ice_fltr_mgmt_list_entry *fm_list) 2294 { 2295 enum ice_sw_lkup_type lkup_type; 2296 u16 vsi_list_id; 2297 int status = 0; 2298 2299 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || 2300 fm_list->vsi_count == 0) 2301 return -EINVAL; 2302 2303 /* A rule with the VSI being removed does not exist */ 2304 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 2305 return -ENOENT; 2306 2307 lkup_type = fm_list->fltr_info.lkup_type; 2308 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; 2309 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 2310 ice_aqc_opc_update_sw_rules, 2311 lkup_type); 2312 if (status) 2313 return status; 2314 2315 fm_list->vsi_count--; 2316 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 2317 2318 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { 2319 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; 2320 struct ice_vsi_list_map_info *vsi_list_info = 2321 fm_list->vsi_list_info; 2322 u16 rem_vsi_handle; 2323 2324 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 2325 ICE_MAX_VSI); 2326 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 2327 return -EIO; 2328 2329 /* Make sure VSI list is empty before removing it below */ 2330 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 2331 vsi_list_id, true, 2332 ice_aqc_opc_update_sw_rules, 2333 lkup_type); 2334 if (status) 2335 return status; 2336 2337 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; 2338 tmp_fltr_info.fwd_id.hw_vsi_id = 2339 ice_get_hw_vsi_num(hw, rem_vsi_handle); 2340 tmp_fltr_info.vsi_handle = rem_vsi_handle; 2341 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); 2342 if (status) { 2343 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 2344 tmp_fltr_info.fwd_id.hw_vsi_id, status); 2345 return status; 2346 } 2347 2348 fm_list->fltr_info = tmp_fltr_info; 2349 } 2350 2351 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || 2352 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { 2353 struct ice_vsi_list_map_info *vsi_list_info = 2354 fm_list->vsi_list_info; 2355 2356 /* Remove the VSI list since it is no longer used */ 2357 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 2358 if (status) { 2359 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 2360 vsi_list_id, status); 2361 return status; 2362 } 2363 2364 list_del(&vsi_list_info->list_entry); 2365 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 2366 fm_list->vsi_list_info = NULL; 2367 } 2368 2369 return status; 2370 } 2371 2372 /** 2373 * ice_remove_rule_internal - Remove a filter rule of a given type 2374 * @hw: pointer to the hardware structure 2375 * @recp_id: recipe ID for which the rule needs to removed 2376 * @f_entry: rule entry containing filter information 2377 */ 2378 static int 2379 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, 2380 struct ice_fltr_list_entry *f_entry) 2381 { 2382 struct ice_switch_info *sw = hw->switch_info; 2383 struct ice_fltr_mgmt_list_entry *list_elem; 2384 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2385 bool remove_rule = false; 2386 u16 vsi_handle; 2387 int status = 0; 2388 2389 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2390 return -EINVAL; 2391 f_entry->fltr_info.fwd_id.hw_vsi_id = 2392 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2393 2394 rule_lock = &sw->recp_list[recp_id].filt_rule_lock; 2395 mutex_lock(rule_lock); 2396 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); 2397 if (!list_elem) { 2398 status = -ENOENT; 2399 goto exit; 2400 } 2401 2402 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { 2403 remove_rule = true; 2404 } else if (!list_elem->vsi_list_info) { 2405 status = -ENOENT; 2406 goto exit; 2407 } else if (list_elem->vsi_list_info->ref_cnt > 1) { 2408 /* a ref_cnt > 1 indicates that the vsi_list is being 2409 * shared by multiple rules. Decrement the ref_cnt and 2410 * remove this rule, but do not modify the list, as it 2411 * is in-use by other rules. 2412 */ 2413 list_elem->vsi_list_info->ref_cnt--; 2414 remove_rule = true; 2415 } else { 2416 /* a ref_cnt of 1 indicates the vsi_list is only used 2417 * by one rule. However, the original removal request is only 2418 * for a single VSI. Update the vsi_list first, and only 2419 * remove the rule if there are no further VSIs in this list. 2420 */ 2421 vsi_handle = f_entry->fltr_info.vsi_handle; 2422 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); 2423 if (status) 2424 goto exit; 2425 /* if VSI count goes to zero after updating the VSI list */ 2426 if (list_elem->vsi_count == 0) 2427 remove_rule = true; 2428 } 2429 2430 if (remove_rule) { 2431 /* Remove the lookup rule */ 2432 struct ice_aqc_sw_rules_elem *s_rule; 2433 2434 s_rule = devm_kzalloc(ice_hw_to_dev(hw), 2435 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 2436 GFP_KERNEL); 2437 if (!s_rule) { 2438 status = -ENOMEM; 2439 goto exit; 2440 } 2441 2442 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, 2443 ice_aqc_opc_remove_sw_rules); 2444 2445 status = ice_aq_sw_rules(hw, s_rule, 2446 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, 2447 ice_aqc_opc_remove_sw_rules, NULL); 2448 2449 /* Remove a book keeping from the list */ 2450 devm_kfree(ice_hw_to_dev(hw), s_rule); 2451 2452 if (status) 2453 goto exit; 2454 2455 list_del(&list_elem->list_entry); 2456 devm_kfree(ice_hw_to_dev(hw), list_elem); 2457 } 2458 exit: 2459 mutex_unlock(rule_lock); 2460 return status; 2461 } 2462 2463 /** 2464 * ice_mac_fltr_exist - does this MAC filter exist for given VSI 2465 * @hw: pointer to the hardware structure 2466 * @mac: MAC address to be checked (for MAC filter) 2467 * @vsi_handle: check MAC filter for this VSI 2468 */ 2469 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle) 2470 { 2471 struct ice_fltr_mgmt_list_entry *entry; 2472 struct list_head *rule_head; 2473 struct ice_switch_info *sw; 2474 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2475 u16 hw_vsi_id; 2476 2477 if (!ice_is_vsi_valid(hw, vsi_handle)) 2478 return false; 2479 2480 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2481 sw = hw->switch_info; 2482 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2483 if (!rule_head) 2484 return false; 2485 2486 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2487 mutex_lock(rule_lock); 2488 list_for_each_entry(entry, rule_head, list_entry) { 2489 struct ice_fltr_info *f_info = &entry->fltr_info; 2490 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2491 2492 if (is_zero_ether_addr(mac_addr)) 2493 continue; 2494 2495 if (f_info->flag != ICE_FLTR_TX || 2496 f_info->src_id != ICE_SRC_ID_VSI || 2497 f_info->lkup_type != ICE_SW_LKUP_MAC || 2498 f_info->fltr_act != ICE_FWD_TO_VSI || 2499 hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2500 continue; 2501 2502 if (ether_addr_equal(mac, mac_addr)) { 2503 mutex_unlock(rule_lock); 2504 return true; 2505 } 2506 } 2507 mutex_unlock(rule_lock); 2508 return false; 2509 } 2510 2511 /** 2512 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI 2513 * @hw: pointer to the hardware structure 2514 * @vlan_id: VLAN ID 2515 * @vsi_handle: check MAC filter for this VSI 2516 */ 2517 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle) 2518 { 2519 struct ice_fltr_mgmt_list_entry *entry; 2520 struct list_head *rule_head; 2521 struct ice_switch_info *sw; 2522 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2523 u16 hw_vsi_id; 2524 2525 if (vlan_id > ICE_MAX_VLAN_ID) 2526 return false; 2527 2528 if (!ice_is_vsi_valid(hw, vsi_handle)) 2529 return false; 2530 2531 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2532 sw = hw->switch_info; 2533 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 2534 if (!rule_head) 2535 return false; 2536 2537 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2538 mutex_lock(rule_lock); 2539 list_for_each_entry(entry, rule_head, list_entry) { 2540 struct ice_fltr_info *f_info = &entry->fltr_info; 2541 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id; 2542 struct ice_vsi_list_map_info *map_info; 2543 2544 if (entry_vlan_id > ICE_MAX_VLAN_ID) 2545 continue; 2546 2547 if (f_info->flag != ICE_FLTR_TX || 2548 f_info->src_id != ICE_SRC_ID_VSI || 2549 f_info->lkup_type != ICE_SW_LKUP_VLAN) 2550 continue; 2551 2552 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */ 2553 if (f_info->fltr_act != ICE_FWD_TO_VSI && 2554 f_info->fltr_act != ICE_FWD_TO_VSI_LIST) 2555 continue; 2556 2557 if (f_info->fltr_act == ICE_FWD_TO_VSI) { 2558 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id) 2559 continue; 2560 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { 2561 /* If filter_action is FWD_TO_VSI_LIST, make sure 2562 * that VSI being checked is part of VSI list 2563 */ 2564 if (entry->vsi_count == 1 && 2565 entry->vsi_list_info) { 2566 map_info = entry->vsi_list_info; 2567 if (!test_bit(vsi_handle, map_info->vsi_map)) 2568 continue; 2569 } 2570 } 2571 2572 if (vlan_id == entry_vlan_id) { 2573 mutex_unlock(rule_lock); 2574 return true; 2575 } 2576 } 2577 mutex_unlock(rule_lock); 2578 2579 return false; 2580 } 2581 2582 /** 2583 * ice_add_mac - Add a MAC address based filter rule 2584 * @hw: pointer to the hardware structure 2585 * @m_list: list of MAC addresses and forwarding information 2586 * 2587 * IMPORTANT: When the ucast_shared flag is set to false and m_list has 2588 * multiple unicast addresses, the function assumes that all the 2589 * addresses are unique in a given add_mac call. It doesn't 2590 * check for duplicates in this case, removing duplicates from a given 2591 * list should be taken care of in the caller of this function. 2592 */ 2593 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list) 2594 { 2595 struct ice_aqc_sw_rules_elem *s_rule, *r_iter; 2596 struct ice_fltr_list_entry *m_list_itr; 2597 struct list_head *rule_head; 2598 u16 total_elem_left, s_rule_size; 2599 struct ice_switch_info *sw; 2600 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2601 u16 num_unicast = 0; 2602 int status = 0; 2603 u8 elem_sent; 2604 2605 if (!m_list || !hw) 2606 return -EINVAL; 2607 2608 s_rule = NULL; 2609 sw = hw->switch_info; 2610 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 2611 list_for_each_entry(m_list_itr, m_list, list_entry) { 2612 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; 2613 u16 vsi_handle; 2614 u16 hw_vsi_id; 2615 2616 m_list_itr->fltr_info.flag = ICE_FLTR_TX; 2617 vsi_handle = m_list_itr->fltr_info.vsi_handle; 2618 if (!ice_is_vsi_valid(hw, vsi_handle)) 2619 return -EINVAL; 2620 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 2621 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; 2622 /* update the src in case it is VSI num */ 2623 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) 2624 return -EINVAL; 2625 m_list_itr->fltr_info.src = hw_vsi_id; 2626 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || 2627 is_zero_ether_addr(add)) 2628 return -EINVAL; 2629 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 2630 /* Don't overwrite the unicast address */ 2631 mutex_lock(rule_lock); 2632 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, 2633 &m_list_itr->fltr_info)) { 2634 mutex_unlock(rule_lock); 2635 return -EEXIST; 2636 } 2637 mutex_unlock(rule_lock); 2638 num_unicast++; 2639 } else if (is_multicast_ether_addr(add) || 2640 (is_unicast_ether_addr(add) && hw->ucast_shared)) { 2641 m_list_itr->status = 2642 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, 2643 m_list_itr); 2644 if (m_list_itr->status) 2645 return m_list_itr->status; 2646 } 2647 } 2648 2649 mutex_lock(rule_lock); 2650 /* Exit if no suitable entries were found for adding bulk switch rule */ 2651 if (!num_unicast) { 2652 status = 0; 2653 goto ice_add_mac_exit; 2654 } 2655 2656 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; 2657 2658 /* Allocate switch rule buffer for the bulk update for unicast */ 2659 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; 2660 s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, 2661 GFP_KERNEL); 2662 if (!s_rule) { 2663 status = -ENOMEM; 2664 goto ice_add_mac_exit; 2665 } 2666 2667 r_iter = s_rule; 2668 list_for_each_entry(m_list_itr, m_list, list_entry) { 2669 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2670 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2671 2672 if (is_unicast_ether_addr(mac_addr)) { 2673 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, 2674 ice_aqc_opc_add_sw_rules); 2675 r_iter = (struct ice_aqc_sw_rules_elem *) 2676 ((u8 *)r_iter + s_rule_size); 2677 } 2678 } 2679 2680 /* Call AQ bulk switch rule update for all unicast addresses */ 2681 r_iter = s_rule; 2682 /* Call AQ switch rule in AQ_MAX chunk */ 2683 for (total_elem_left = num_unicast; total_elem_left > 0; 2684 total_elem_left -= elem_sent) { 2685 struct ice_aqc_sw_rules_elem *entry = r_iter; 2686 2687 elem_sent = min_t(u8, total_elem_left, 2688 (ICE_AQ_MAX_BUF_LEN / s_rule_size)); 2689 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, 2690 elem_sent, ice_aqc_opc_add_sw_rules, 2691 NULL); 2692 if (status) 2693 goto ice_add_mac_exit; 2694 r_iter = (struct ice_aqc_sw_rules_elem *) 2695 ((u8 *)r_iter + (elem_sent * s_rule_size)); 2696 } 2697 2698 /* Fill up rule ID based on the value returned from FW */ 2699 r_iter = s_rule; 2700 list_for_each_entry(m_list_itr, m_list, list_entry) { 2701 struct ice_fltr_info *f_info = &m_list_itr->fltr_info; 2702 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; 2703 struct ice_fltr_mgmt_list_entry *fm_entry; 2704 2705 if (is_unicast_ether_addr(mac_addr)) { 2706 f_info->fltr_rule_id = 2707 le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); 2708 f_info->fltr_act = ICE_FWD_TO_VSI; 2709 /* Create an entry to track this MAC address */ 2710 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), 2711 sizeof(*fm_entry), GFP_KERNEL); 2712 if (!fm_entry) { 2713 status = -ENOMEM; 2714 goto ice_add_mac_exit; 2715 } 2716 fm_entry->fltr_info = *f_info; 2717 fm_entry->vsi_count = 1; 2718 /* The book keeping entries will get removed when 2719 * base driver calls remove filter AQ command 2720 */ 2721 2722 list_add(&fm_entry->list_entry, rule_head); 2723 r_iter = (struct ice_aqc_sw_rules_elem *) 2724 ((u8 *)r_iter + s_rule_size); 2725 } 2726 } 2727 2728 ice_add_mac_exit: 2729 mutex_unlock(rule_lock); 2730 if (s_rule) 2731 devm_kfree(ice_hw_to_dev(hw), s_rule); 2732 return status; 2733 } 2734 2735 /** 2736 * ice_add_vlan_internal - Add one VLAN based filter rule 2737 * @hw: pointer to the hardware structure 2738 * @f_entry: filter entry containing one VLAN information 2739 */ 2740 static int 2741 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) 2742 { 2743 struct ice_switch_info *sw = hw->switch_info; 2744 struct ice_fltr_mgmt_list_entry *v_list_itr; 2745 struct ice_fltr_info *new_fltr, *cur_fltr; 2746 enum ice_sw_lkup_type lkup_type; 2747 u16 vsi_list_id = 0, vsi_handle; 2748 struct mutex *rule_lock; /* Lock to protect filter rule list */ 2749 int status = 0; 2750 2751 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) 2752 return -EINVAL; 2753 2754 f_entry->fltr_info.fwd_id.hw_vsi_id = 2755 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); 2756 new_fltr = &f_entry->fltr_info; 2757 2758 /* VLAN ID should only be 12 bits */ 2759 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) 2760 return -EINVAL; 2761 2762 if (new_fltr->src_id != ICE_SRC_ID_VSI) 2763 return -EINVAL; 2764 2765 new_fltr->src = new_fltr->fwd_id.hw_vsi_id; 2766 lkup_type = new_fltr->lkup_type; 2767 vsi_handle = new_fltr->vsi_handle; 2768 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 2769 mutex_lock(rule_lock); 2770 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); 2771 if (!v_list_itr) { 2772 struct ice_vsi_list_map_info *map_info = NULL; 2773 2774 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { 2775 /* All VLAN pruning rules use a VSI list. Check if 2776 * there is already a VSI list containing VSI that we 2777 * want to add. If found, use the same vsi_list_id for 2778 * this new VLAN rule or else create a new list. 2779 */ 2780 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN, 2781 vsi_handle, 2782 &vsi_list_id); 2783 if (!map_info) { 2784 status = ice_create_vsi_list_rule(hw, 2785 &vsi_handle, 2786 1, 2787 &vsi_list_id, 2788 lkup_type); 2789 if (status) 2790 goto exit; 2791 } 2792 /* Convert the action to forwarding to a VSI list. */ 2793 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; 2794 new_fltr->fwd_id.vsi_list_id = vsi_list_id; 2795 } 2796 2797 status = ice_create_pkt_fwd_rule(hw, f_entry); 2798 if (!status) { 2799 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, 2800 new_fltr); 2801 if (!v_list_itr) { 2802 status = -ENOENT; 2803 goto exit; 2804 } 2805 /* reuse VSI list for new rule and increment ref_cnt */ 2806 if (map_info) { 2807 v_list_itr->vsi_list_info = map_info; 2808 map_info->ref_cnt++; 2809 } else { 2810 v_list_itr->vsi_list_info = 2811 ice_create_vsi_list_map(hw, &vsi_handle, 2812 1, vsi_list_id); 2813 } 2814 } 2815 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { 2816 /* Update existing VSI list to add new VSI ID only if it used 2817 * by one VLAN rule. 2818 */ 2819 cur_fltr = &v_list_itr->fltr_info; 2820 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, 2821 new_fltr); 2822 } else { 2823 /* If VLAN rule exists and VSI list being used by this rule is 2824 * referenced by more than 1 VLAN rule. Then create a new VSI 2825 * list appending previous VSI with new VSI and update existing 2826 * VLAN rule to point to new VSI list ID 2827 */ 2828 struct ice_fltr_info tmp_fltr; 2829 u16 vsi_handle_arr[2]; 2830 u16 cur_handle; 2831 2832 /* Current implementation only supports reusing VSI list with 2833 * one VSI count. We should never hit below condition 2834 */ 2835 if (v_list_itr->vsi_count > 1 && 2836 v_list_itr->vsi_list_info->ref_cnt > 1) { 2837 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); 2838 status = -EIO; 2839 goto exit; 2840 } 2841 2842 cur_handle = 2843 find_first_bit(v_list_itr->vsi_list_info->vsi_map, 2844 ICE_MAX_VSI); 2845 2846 /* A rule already exists with the new VSI being added */ 2847 if (cur_handle == vsi_handle) { 2848 status = -EEXIST; 2849 goto exit; 2850 } 2851 2852 vsi_handle_arr[0] = cur_handle; 2853 vsi_handle_arr[1] = vsi_handle; 2854 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 2855 &vsi_list_id, lkup_type); 2856 if (status) 2857 goto exit; 2858 2859 tmp_fltr = v_list_itr->fltr_info; 2860 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; 2861 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 2862 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 2863 /* Update the previous switch rule to a new VSI list which 2864 * includes current VSI that is requested 2865 */ 2866 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 2867 if (status) 2868 goto exit; 2869 2870 /* before overriding VSI list map info. decrement ref_cnt of 2871 * previous VSI list 2872 */ 2873 v_list_itr->vsi_list_info->ref_cnt--; 2874 2875 /* now update to newly created list */ 2876 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; 2877 v_list_itr->vsi_list_info = 2878 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 2879 vsi_list_id); 2880 v_list_itr->vsi_count++; 2881 } 2882 2883 exit: 2884 mutex_unlock(rule_lock); 2885 return status; 2886 } 2887 2888 /** 2889 * ice_add_vlan - Add VLAN based filter rule 2890 * @hw: pointer to the hardware structure 2891 * @v_list: list of VLAN entries and forwarding information 2892 */ 2893 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) 2894 { 2895 struct ice_fltr_list_entry *v_list_itr; 2896 2897 if (!v_list || !hw) 2898 return -EINVAL; 2899 2900 list_for_each_entry(v_list_itr, v_list, list_entry) { 2901 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) 2902 return -EINVAL; 2903 v_list_itr->fltr_info.flag = ICE_FLTR_TX; 2904 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); 2905 if (v_list_itr->status) 2906 return v_list_itr->status; 2907 } 2908 return 0; 2909 } 2910 2911 /** 2912 * ice_add_eth_mac - Add ethertype and MAC based filter rule 2913 * @hw: pointer to the hardware structure 2914 * @em_list: list of ether type MAC filter, MAC is optional 2915 * 2916 * This function requires the caller to populate the entries in 2917 * the filter list with the necessary fields (including flags to 2918 * indicate Tx or Rx rules). 2919 */ 2920 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2921 { 2922 struct ice_fltr_list_entry *em_list_itr; 2923 2924 if (!em_list || !hw) 2925 return -EINVAL; 2926 2927 list_for_each_entry(em_list_itr, em_list, list_entry) { 2928 enum ice_sw_lkup_type l_type = 2929 em_list_itr->fltr_info.lkup_type; 2930 2931 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2932 l_type != ICE_SW_LKUP_ETHERTYPE) 2933 return -EINVAL; 2934 2935 em_list_itr->status = ice_add_rule_internal(hw, l_type, 2936 em_list_itr); 2937 if (em_list_itr->status) 2938 return em_list_itr->status; 2939 } 2940 return 0; 2941 } 2942 2943 /** 2944 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule 2945 * @hw: pointer to the hardware structure 2946 * @em_list: list of ethertype or ethertype MAC entries 2947 */ 2948 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list) 2949 { 2950 struct ice_fltr_list_entry *em_list_itr, *tmp; 2951 2952 if (!em_list || !hw) 2953 return -EINVAL; 2954 2955 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) { 2956 enum ice_sw_lkup_type l_type = 2957 em_list_itr->fltr_info.lkup_type; 2958 2959 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && 2960 l_type != ICE_SW_LKUP_ETHERTYPE) 2961 return -EINVAL; 2962 2963 em_list_itr->status = ice_remove_rule_internal(hw, l_type, 2964 em_list_itr); 2965 if (em_list_itr->status) 2966 return em_list_itr->status; 2967 } 2968 return 0; 2969 } 2970 2971 /** 2972 * ice_rem_sw_rule_info 2973 * @hw: pointer to the hardware structure 2974 * @rule_head: pointer to the switch list structure that we want to delete 2975 */ 2976 static void 2977 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2978 { 2979 if (!list_empty(rule_head)) { 2980 struct ice_fltr_mgmt_list_entry *entry; 2981 struct ice_fltr_mgmt_list_entry *tmp; 2982 2983 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { 2984 list_del(&entry->list_entry); 2985 devm_kfree(ice_hw_to_dev(hw), entry); 2986 } 2987 } 2988 } 2989 2990 /** 2991 * ice_rem_adv_rule_info 2992 * @hw: pointer to the hardware structure 2993 * @rule_head: pointer to the switch list structure that we want to delete 2994 */ 2995 static void 2996 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head) 2997 { 2998 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 2999 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 3000 3001 if (list_empty(rule_head)) 3002 return; 3003 3004 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) { 3005 list_del(&lst_itr->list_entry); 3006 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 3007 devm_kfree(ice_hw_to_dev(hw), lst_itr); 3008 } 3009 } 3010 3011 /** 3012 * ice_cfg_dflt_vsi - change state of VSI to set/clear default 3013 * @hw: pointer to the hardware structure 3014 * @vsi_handle: VSI handle to set as default 3015 * @set: true to add the above mentioned switch rule, false to remove it 3016 * @direction: ICE_FLTR_RX or ICE_FLTR_TX 3017 * 3018 * add filter rule to set/unset given VSI as default VSI for the switch 3019 * (represented by swid) 3020 */ 3021 int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) 3022 { 3023 struct ice_aqc_sw_rules_elem *s_rule; 3024 struct ice_fltr_info f_info; 3025 enum ice_adminq_opc opcode; 3026 u16 s_rule_size; 3027 u16 hw_vsi_id; 3028 int status; 3029 3030 if (!ice_is_vsi_valid(hw, vsi_handle)) 3031 return -EINVAL; 3032 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3033 3034 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : 3035 ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 3036 3037 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); 3038 if (!s_rule) 3039 return -ENOMEM; 3040 3041 memset(&f_info, 0, sizeof(f_info)); 3042 3043 f_info.lkup_type = ICE_SW_LKUP_DFLT; 3044 f_info.flag = direction; 3045 f_info.fltr_act = ICE_FWD_TO_VSI; 3046 f_info.fwd_id.hw_vsi_id = hw_vsi_id; 3047 3048 if (f_info.flag & ICE_FLTR_RX) { 3049 f_info.src = hw->port_info->lport; 3050 f_info.src_id = ICE_SRC_ID_LPORT; 3051 if (!set) 3052 f_info.fltr_rule_id = 3053 hw->port_info->dflt_rx_vsi_rule_id; 3054 } else if (f_info.flag & ICE_FLTR_TX) { 3055 f_info.src_id = ICE_SRC_ID_VSI; 3056 f_info.src = hw_vsi_id; 3057 if (!set) 3058 f_info.fltr_rule_id = 3059 hw->port_info->dflt_tx_vsi_rule_id; 3060 } 3061 3062 if (set) 3063 opcode = ice_aqc_opc_add_sw_rules; 3064 else 3065 opcode = ice_aqc_opc_remove_sw_rules; 3066 3067 ice_fill_sw_rule(hw, &f_info, s_rule, opcode); 3068 3069 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); 3070 if (status || !(f_info.flag & ICE_FLTR_TX_RX)) 3071 goto out; 3072 if (set) { 3073 u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 3074 3075 if (f_info.flag & ICE_FLTR_TX) { 3076 hw->port_info->dflt_tx_vsi_num = hw_vsi_id; 3077 hw->port_info->dflt_tx_vsi_rule_id = index; 3078 } else if (f_info.flag & ICE_FLTR_RX) { 3079 hw->port_info->dflt_rx_vsi_num = hw_vsi_id; 3080 hw->port_info->dflt_rx_vsi_rule_id = index; 3081 } 3082 } else { 3083 if (f_info.flag & ICE_FLTR_TX) { 3084 hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; 3085 hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; 3086 } else if (f_info.flag & ICE_FLTR_RX) { 3087 hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; 3088 hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; 3089 } 3090 } 3091 3092 out: 3093 devm_kfree(ice_hw_to_dev(hw), s_rule); 3094 return status; 3095 } 3096 3097 /** 3098 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry 3099 * @hw: pointer to the hardware structure 3100 * @recp_id: lookup type for which the specified rule needs to be searched 3101 * @f_info: rule information 3102 * 3103 * Helper function to search for a unicast rule entry - this is to be used 3104 * to remove unicast MAC filter that is not shared with other VSIs on the 3105 * PF switch. 3106 * 3107 * Returns pointer to entry storing the rule if found 3108 */ 3109 static struct ice_fltr_mgmt_list_entry * 3110 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id, 3111 struct ice_fltr_info *f_info) 3112 { 3113 struct ice_switch_info *sw = hw->switch_info; 3114 struct ice_fltr_mgmt_list_entry *list_itr; 3115 struct list_head *list_head; 3116 3117 list_head = &sw->recp_list[recp_id].filt_rules; 3118 list_for_each_entry(list_itr, list_head, list_entry) { 3119 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, 3120 sizeof(f_info->l_data)) && 3121 f_info->fwd_id.hw_vsi_id == 3122 list_itr->fltr_info.fwd_id.hw_vsi_id && 3123 f_info->flag == list_itr->fltr_info.flag) 3124 return list_itr; 3125 } 3126 return NULL; 3127 } 3128 3129 /** 3130 * ice_remove_mac - remove a MAC address based filter rule 3131 * @hw: pointer to the hardware structure 3132 * @m_list: list of MAC addresses and forwarding information 3133 * 3134 * This function removes either a MAC filter rule or a specific VSI from a 3135 * VSI list for a multicast MAC address. 3136 * 3137 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should 3138 * be aware that this call will only work if all the entries passed into m_list 3139 * were added previously. It will not attempt to do a partial remove of entries 3140 * that were found. 3141 */ 3142 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) 3143 { 3144 struct ice_fltr_list_entry *list_itr, *tmp; 3145 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3146 3147 if (!m_list) 3148 return -EINVAL; 3149 3150 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; 3151 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { 3152 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; 3153 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; 3154 u16 vsi_handle; 3155 3156 if (l_type != ICE_SW_LKUP_MAC) 3157 return -EINVAL; 3158 3159 vsi_handle = list_itr->fltr_info.vsi_handle; 3160 if (!ice_is_vsi_valid(hw, vsi_handle)) 3161 return -EINVAL; 3162 3163 list_itr->fltr_info.fwd_id.hw_vsi_id = 3164 ice_get_hw_vsi_num(hw, vsi_handle); 3165 if (is_unicast_ether_addr(add) && !hw->ucast_shared) { 3166 /* Don't remove the unicast address that belongs to 3167 * another VSI on the switch, since it is not being 3168 * shared... 3169 */ 3170 mutex_lock(rule_lock); 3171 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC, 3172 &list_itr->fltr_info)) { 3173 mutex_unlock(rule_lock); 3174 return -ENOENT; 3175 } 3176 mutex_unlock(rule_lock); 3177 } 3178 list_itr->status = ice_remove_rule_internal(hw, 3179 ICE_SW_LKUP_MAC, 3180 list_itr); 3181 if (list_itr->status) 3182 return list_itr->status; 3183 } 3184 return 0; 3185 } 3186 3187 /** 3188 * ice_remove_vlan - Remove VLAN based filter rule 3189 * @hw: pointer to the hardware structure 3190 * @v_list: list of VLAN entries and forwarding information 3191 */ 3192 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) 3193 { 3194 struct ice_fltr_list_entry *v_list_itr, *tmp; 3195 3196 if (!v_list || !hw) 3197 return -EINVAL; 3198 3199 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3200 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; 3201 3202 if (l_type != ICE_SW_LKUP_VLAN) 3203 return -EINVAL; 3204 v_list_itr->status = ice_remove_rule_internal(hw, 3205 ICE_SW_LKUP_VLAN, 3206 v_list_itr); 3207 if (v_list_itr->status) 3208 return v_list_itr->status; 3209 } 3210 return 0; 3211 } 3212 3213 /** 3214 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter 3215 * @fm_entry: filter entry to inspect 3216 * @vsi_handle: VSI handle to compare with filter info 3217 */ 3218 static bool 3219 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) 3220 { 3221 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && 3222 fm_entry->fltr_info.vsi_handle == vsi_handle) || 3223 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && 3224 fm_entry->vsi_list_info && 3225 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); 3226 } 3227 3228 /** 3229 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list 3230 * @hw: pointer to the hardware structure 3231 * @vsi_handle: VSI handle to remove filters from 3232 * @vsi_list_head: pointer to the list to add entry to 3233 * @fi: pointer to fltr_info of filter entry to copy & add 3234 * 3235 * Helper function, used when creating a list of filters to remove from 3236 * a specific VSI. The entry added to vsi_list_head is a COPY of the 3237 * original filter entry, with the exception of fltr_info.fltr_act and 3238 * fltr_info.fwd_id fields. These are set such that later logic can 3239 * extract which VSI to remove the fltr from, and pass on that information. 3240 */ 3241 static int 3242 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3243 struct list_head *vsi_list_head, 3244 struct ice_fltr_info *fi) 3245 { 3246 struct ice_fltr_list_entry *tmp; 3247 3248 /* this memory is freed up in the caller function 3249 * once filters for this VSI are removed 3250 */ 3251 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); 3252 if (!tmp) 3253 return -ENOMEM; 3254 3255 tmp->fltr_info = *fi; 3256 3257 /* Overwrite these fields to indicate which VSI to remove filter from, 3258 * so find and remove logic can extract the information from the 3259 * list entries. Note that original entries will still have proper 3260 * values. 3261 */ 3262 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; 3263 tmp->fltr_info.vsi_handle = vsi_handle; 3264 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3265 3266 list_add(&tmp->list_entry, vsi_list_head); 3267 3268 return 0; 3269 } 3270 3271 /** 3272 * ice_add_to_vsi_fltr_list - Add VSI filters to the list 3273 * @hw: pointer to the hardware structure 3274 * @vsi_handle: VSI handle to remove filters from 3275 * @lkup_list_head: pointer to the list that has certain lookup type filters 3276 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle 3277 * 3278 * Locates all filters in lkup_list_head that are used by the given VSI, 3279 * and adds COPIES of those entries to vsi_list_head (intended to be used 3280 * to remove the listed filters). 3281 * Note that this means all entries in vsi_list_head must be explicitly 3282 * deallocated by the caller when done with list. 3283 */ 3284 static int 3285 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, 3286 struct list_head *lkup_list_head, 3287 struct list_head *vsi_list_head) 3288 { 3289 struct ice_fltr_mgmt_list_entry *fm_entry; 3290 int status = 0; 3291 3292 /* check to make sure VSI ID is valid and within boundary */ 3293 if (!ice_is_vsi_valid(hw, vsi_handle)) 3294 return -EINVAL; 3295 3296 list_for_each_entry(fm_entry, lkup_list_head, list_entry) { 3297 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) 3298 continue; 3299 3300 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3301 vsi_list_head, 3302 &fm_entry->fltr_info); 3303 if (status) 3304 return status; 3305 } 3306 return status; 3307 } 3308 3309 /** 3310 * ice_determine_promisc_mask 3311 * @fi: filter info to parse 3312 * 3313 * Helper function to determine which ICE_PROMISC_ mask corresponds 3314 * to given filter into. 3315 */ 3316 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) 3317 { 3318 u16 vid = fi->l_data.mac_vlan.vlan_id; 3319 u8 *macaddr = fi->l_data.mac.mac_addr; 3320 bool is_tx_fltr = false; 3321 u8 promisc_mask = 0; 3322 3323 if (fi->flag == ICE_FLTR_TX) 3324 is_tx_fltr = true; 3325 3326 if (is_broadcast_ether_addr(macaddr)) 3327 promisc_mask |= is_tx_fltr ? 3328 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; 3329 else if (is_multicast_ether_addr(macaddr)) 3330 promisc_mask |= is_tx_fltr ? 3331 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; 3332 else if (is_unicast_ether_addr(macaddr)) 3333 promisc_mask |= is_tx_fltr ? 3334 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; 3335 if (vid) 3336 promisc_mask |= is_tx_fltr ? 3337 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; 3338 3339 return promisc_mask; 3340 } 3341 3342 /** 3343 * ice_remove_promisc - Remove promisc based filter rules 3344 * @hw: pointer to the hardware structure 3345 * @recp_id: recipe ID for which the rule needs to removed 3346 * @v_list: list of promisc entries 3347 */ 3348 static int 3349 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list) 3350 { 3351 struct ice_fltr_list_entry *v_list_itr, *tmp; 3352 3353 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { 3354 v_list_itr->status = 3355 ice_remove_rule_internal(hw, recp_id, v_list_itr); 3356 if (v_list_itr->status) 3357 return v_list_itr->status; 3358 } 3359 return 0; 3360 } 3361 3362 /** 3363 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI 3364 * @hw: pointer to the hardware structure 3365 * @vsi_handle: VSI handle to clear mode 3366 * @promisc_mask: mask of promiscuous config bits to clear 3367 * @vid: VLAN ID to clear VLAN promiscuous 3368 */ 3369 int 3370 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3371 u16 vid) 3372 { 3373 struct ice_switch_info *sw = hw->switch_info; 3374 struct ice_fltr_list_entry *fm_entry, *tmp; 3375 struct list_head remove_list_head; 3376 struct ice_fltr_mgmt_list_entry *itr; 3377 struct list_head *rule_head; 3378 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3379 int status = 0; 3380 u8 recipe_id; 3381 3382 if (!ice_is_vsi_valid(hw, vsi_handle)) 3383 return -EINVAL; 3384 3385 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) 3386 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3387 else 3388 recipe_id = ICE_SW_LKUP_PROMISC; 3389 3390 rule_head = &sw->recp_list[recipe_id].filt_rules; 3391 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; 3392 3393 INIT_LIST_HEAD(&remove_list_head); 3394 3395 mutex_lock(rule_lock); 3396 list_for_each_entry(itr, rule_head, list_entry) { 3397 struct ice_fltr_info *fltr_info; 3398 u8 fltr_promisc_mask = 0; 3399 3400 if (!ice_vsi_uses_fltr(itr, vsi_handle)) 3401 continue; 3402 fltr_info = &itr->fltr_info; 3403 3404 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && 3405 vid != fltr_info->l_data.mac_vlan.vlan_id) 3406 continue; 3407 3408 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); 3409 3410 /* Skip if filter is not completely specified by given mask */ 3411 if (fltr_promisc_mask & ~promisc_mask) 3412 continue; 3413 3414 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, 3415 &remove_list_head, 3416 fltr_info); 3417 if (status) { 3418 mutex_unlock(rule_lock); 3419 goto free_fltr_list; 3420 } 3421 } 3422 mutex_unlock(rule_lock); 3423 3424 status = ice_remove_promisc(hw, recipe_id, &remove_list_head); 3425 3426 free_fltr_list: 3427 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3428 list_del(&fm_entry->list_entry); 3429 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3430 } 3431 3432 return status; 3433 } 3434 3435 /** 3436 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) 3437 * @hw: pointer to the hardware structure 3438 * @vsi_handle: VSI handle to configure 3439 * @promisc_mask: mask of promiscuous config bits 3440 * @vid: VLAN ID to set VLAN promiscuous 3441 */ 3442 int 3443 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) 3444 { 3445 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; 3446 struct ice_fltr_list_entry f_list_entry; 3447 struct ice_fltr_info new_fltr; 3448 bool is_tx_fltr; 3449 int status = 0; 3450 u16 hw_vsi_id; 3451 int pkt_type; 3452 u8 recipe_id; 3453 3454 if (!ice_is_vsi_valid(hw, vsi_handle)) 3455 return -EINVAL; 3456 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3457 3458 memset(&new_fltr, 0, sizeof(new_fltr)); 3459 3460 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { 3461 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; 3462 new_fltr.l_data.mac_vlan.vlan_id = vid; 3463 recipe_id = ICE_SW_LKUP_PROMISC_VLAN; 3464 } else { 3465 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; 3466 recipe_id = ICE_SW_LKUP_PROMISC; 3467 } 3468 3469 /* Separate filters must be set for each direction/packet type 3470 * combination, so we will loop over the mask value, store the 3471 * individual type, and clear it out in the input mask as it 3472 * is found. 3473 */ 3474 while (promisc_mask) { 3475 u8 *mac_addr; 3476 3477 pkt_type = 0; 3478 is_tx_fltr = false; 3479 3480 if (promisc_mask & ICE_PROMISC_UCAST_RX) { 3481 promisc_mask &= ~ICE_PROMISC_UCAST_RX; 3482 pkt_type = UCAST_FLTR; 3483 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { 3484 promisc_mask &= ~ICE_PROMISC_UCAST_TX; 3485 pkt_type = UCAST_FLTR; 3486 is_tx_fltr = true; 3487 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { 3488 promisc_mask &= ~ICE_PROMISC_MCAST_RX; 3489 pkt_type = MCAST_FLTR; 3490 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { 3491 promisc_mask &= ~ICE_PROMISC_MCAST_TX; 3492 pkt_type = MCAST_FLTR; 3493 is_tx_fltr = true; 3494 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { 3495 promisc_mask &= ~ICE_PROMISC_BCAST_RX; 3496 pkt_type = BCAST_FLTR; 3497 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { 3498 promisc_mask &= ~ICE_PROMISC_BCAST_TX; 3499 pkt_type = BCAST_FLTR; 3500 is_tx_fltr = true; 3501 } 3502 3503 /* Check for VLAN promiscuous flag */ 3504 if (promisc_mask & ICE_PROMISC_VLAN_RX) { 3505 promisc_mask &= ~ICE_PROMISC_VLAN_RX; 3506 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { 3507 promisc_mask &= ~ICE_PROMISC_VLAN_TX; 3508 is_tx_fltr = true; 3509 } 3510 3511 /* Set filter DA based on packet type */ 3512 mac_addr = new_fltr.l_data.mac.mac_addr; 3513 if (pkt_type == BCAST_FLTR) { 3514 eth_broadcast_addr(mac_addr); 3515 } else if (pkt_type == MCAST_FLTR || 3516 pkt_type == UCAST_FLTR) { 3517 /* Use the dummy ether header DA */ 3518 ether_addr_copy(mac_addr, dummy_eth_header); 3519 if (pkt_type == MCAST_FLTR) 3520 mac_addr[0] |= 0x1; /* Set multicast bit */ 3521 } 3522 3523 /* Need to reset this to zero for all iterations */ 3524 new_fltr.flag = 0; 3525 if (is_tx_fltr) { 3526 new_fltr.flag |= ICE_FLTR_TX; 3527 new_fltr.src = hw_vsi_id; 3528 } else { 3529 new_fltr.flag |= ICE_FLTR_RX; 3530 new_fltr.src = hw->port_info->lport; 3531 } 3532 3533 new_fltr.fltr_act = ICE_FWD_TO_VSI; 3534 new_fltr.vsi_handle = vsi_handle; 3535 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; 3536 f_list_entry.fltr_info = new_fltr; 3537 3538 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry); 3539 if (status) 3540 goto set_promisc_exit; 3541 } 3542 3543 set_promisc_exit: 3544 return status; 3545 } 3546 3547 /** 3548 * ice_set_vlan_vsi_promisc 3549 * @hw: pointer to the hardware structure 3550 * @vsi_handle: VSI handle to configure 3551 * @promisc_mask: mask of promiscuous config bits 3552 * @rm_vlan_promisc: Clear VLANs VSI promisc mode 3553 * 3554 * Configure VSI with all associated VLANs to given promiscuous mode(s) 3555 */ 3556 int 3557 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, 3558 bool rm_vlan_promisc) 3559 { 3560 struct ice_switch_info *sw = hw->switch_info; 3561 struct ice_fltr_list_entry *list_itr, *tmp; 3562 struct list_head vsi_list_head; 3563 struct list_head *vlan_head; 3564 struct mutex *vlan_lock; /* Lock to protect filter rule list */ 3565 u16 vlan_id; 3566 int status; 3567 3568 INIT_LIST_HEAD(&vsi_list_head); 3569 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; 3570 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; 3571 mutex_lock(vlan_lock); 3572 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, 3573 &vsi_list_head); 3574 mutex_unlock(vlan_lock); 3575 if (status) 3576 goto free_fltr_list; 3577 3578 list_for_each_entry(list_itr, &vsi_list_head, list_entry) { 3579 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; 3580 if (rm_vlan_promisc) 3581 status = ice_clear_vsi_promisc(hw, vsi_handle, 3582 promisc_mask, vlan_id); 3583 else 3584 status = ice_set_vsi_promisc(hw, vsi_handle, 3585 promisc_mask, vlan_id); 3586 if (status) 3587 break; 3588 } 3589 3590 free_fltr_list: 3591 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) { 3592 list_del(&list_itr->list_entry); 3593 devm_kfree(ice_hw_to_dev(hw), list_itr); 3594 } 3595 return status; 3596 } 3597 3598 /** 3599 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI 3600 * @hw: pointer to the hardware structure 3601 * @vsi_handle: VSI handle to remove filters from 3602 * @lkup: switch rule filter lookup type 3603 */ 3604 static void 3605 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, 3606 enum ice_sw_lkup_type lkup) 3607 { 3608 struct ice_switch_info *sw = hw->switch_info; 3609 struct ice_fltr_list_entry *fm_entry; 3610 struct list_head remove_list_head; 3611 struct list_head *rule_head; 3612 struct ice_fltr_list_entry *tmp; 3613 struct mutex *rule_lock; /* Lock to protect filter rule list */ 3614 int status; 3615 3616 INIT_LIST_HEAD(&remove_list_head); 3617 rule_lock = &sw->recp_list[lkup].filt_rule_lock; 3618 rule_head = &sw->recp_list[lkup].filt_rules; 3619 mutex_lock(rule_lock); 3620 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, 3621 &remove_list_head); 3622 mutex_unlock(rule_lock); 3623 if (status) 3624 goto free_fltr_list; 3625 3626 switch (lkup) { 3627 case ICE_SW_LKUP_MAC: 3628 ice_remove_mac(hw, &remove_list_head); 3629 break; 3630 case ICE_SW_LKUP_VLAN: 3631 ice_remove_vlan(hw, &remove_list_head); 3632 break; 3633 case ICE_SW_LKUP_PROMISC: 3634 case ICE_SW_LKUP_PROMISC_VLAN: 3635 ice_remove_promisc(hw, lkup, &remove_list_head); 3636 break; 3637 case ICE_SW_LKUP_MAC_VLAN: 3638 case ICE_SW_LKUP_ETHERTYPE: 3639 case ICE_SW_LKUP_ETHERTYPE_MAC: 3640 case ICE_SW_LKUP_DFLT: 3641 case ICE_SW_LKUP_LAST: 3642 default: 3643 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); 3644 break; 3645 } 3646 3647 free_fltr_list: 3648 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { 3649 list_del(&fm_entry->list_entry); 3650 devm_kfree(ice_hw_to_dev(hw), fm_entry); 3651 } 3652 } 3653 3654 /** 3655 * ice_remove_vsi_fltr - Remove all filters for a VSI 3656 * @hw: pointer to the hardware structure 3657 * @vsi_handle: VSI handle to remove filters from 3658 */ 3659 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) 3660 { 3661 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC); 3662 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN); 3663 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC); 3664 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN); 3665 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT); 3666 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE); 3667 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC); 3668 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN); 3669 } 3670 3671 /** 3672 * ice_alloc_res_cntr - allocating resource counter 3673 * @hw: pointer to the hardware structure 3674 * @type: type of resource 3675 * @alloc_shared: if set it is shared else dedicated 3676 * @num_items: number of entries requested for FD resource type 3677 * @counter_id: counter index returned by AQ call 3678 */ 3679 int 3680 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3681 u16 *counter_id) 3682 { 3683 struct ice_aqc_alloc_free_res_elem *buf; 3684 u16 buf_len; 3685 int status; 3686 3687 /* Allocate resource */ 3688 buf_len = struct_size(buf, elem, 1); 3689 buf = kzalloc(buf_len, GFP_KERNEL); 3690 if (!buf) 3691 return -ENOMEM; 3692 3693 buf->num_elems = cpu_to_le16(num_items); 3694 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3695 ICE_AQC_RES_TYPE_M) | alloc_shared); 3696 3697 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3698 ice_aqc_opc_alloc_res, NULL); 3699 if (status) 3700 goto exit; 3701 3702 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp); 3703 3704 exit: 3705 kfree(buf); 3706 return status; 3707 } 3708 3709 /** 3710 * ice_free_res_cntr - free resource counter 3711 * @hw: pointer to the hardware structure 3712 * @type: type of resource 3713 * @alloc_shared: if set it is shared else dedicated 3714 * @num_items: number of entries to be freed for FD resource type 3715 * @counter_id: counter ID resource which needs to be freed 3716 */ 3717 int 3718 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, 3719 u16 counter_id) 3720 { 3721 struct ice_aqc_alloc_free_res_elem *buf; 3722 u16 buf_len; 3723 int status; 3724 3725 /* Free resource */ 3726 buf_len = struct_size(buf, elem, 1); 3727 buf = kzalloc(buf_len, GFP_KERNEL); 3728 if (!buf) 3729 return -ENOMEM; 3730 3731 buf->num_elems = cpu_to_le16(num_items); 3732 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) & 3733 ICE_AQC_RES_TYPE_M) | alloc_shared); 3734 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id); 3735 3736 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 3737 ice_aqc_opc_free_res, NULL); 3738 if (status) 3739 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); 3740 3741 kfree(buf); 3742 return status; 3743 } 3744 3745 /* This is mapping table entry that maps every word within a given protocol 3746 * structure to the real byte offset as per the specification of that 3747 * protocol header. 3748 * for example dst address is 3 words in ethertype header and corresponding 3749 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 3750 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a 3751 * matching entry describing its field. This needs to be updated if new 3752 * structure is added to that union. 3753 */ 3754 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { 3755 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } }, 3756 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } }, 3757 { ICE_ETYPE_OL, { 0 } }, 3758 { ICE_VLAN_OFOS, { 2, 0 } }, 3759 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3760 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } }, 3761 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3762 26, 28, 30, 32, 34, 36, 38 } }, 3763 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 3764 26, 28, 30, 32, 34, 36, 38 } }, 3765 { ICE_TCP_IL, { 0, 2 } }, 3766 { ICE_UDP_OF, { 0, 2 } }, 3767 { ICE_UDP_ILOS, { 0, 2 } }, 3768 { ICE_VXLAN, { 8, 10, 12, 14 } }, 3769 { ICE_GENEVE, { 8, 10, 12, 14 } }, 3770 { ICE_NVGRE, { 0, 2, 4, 6 } }, 3771 }; 3772 3773 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { 3774 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW }, 3775 { ICE_MAC_IL, ICE_MAC_IL_HW }, 3776 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW }, 3777 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW }, 3778 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW }, 3779 { ICE_IPV4_IL, ICE_IPV4_IL_HW }, 3780 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW }, 3781 { ICE_IPV6_IL, ICE_IPV6_IL_HW }, 3782 { ICE_TCP_IL, ICE_TCP_IL_HW }, 3783 { ICE_UDP_OF, ICE_UDP_OF_HW }, 3784 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW }, 3785 { ICE_VXLAN, ICE_UDP_OF_HW }, 3786 { ICE_GENEVE, ICE_UDP_OF_HW }, 3787 { ICE_NVGRE, ICE_GRE_OF_HW }, 3788 }; 3789 3790 /** 3791 * ice_find_recp - find a recipe 3792 * @hw: pointer to the hardware structure 3793 * @lkup_exts: extension sequence to match 3794 * @tun_type: type of recipe tunnel 3795 * 3796 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. 3797 */ 3798 static u16 3799 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, 3800 enum ice_sw_tunnel_type tun_type) 3801 { 3802 bool refresh_required = true; 3803 struct ice_sw_recipe *recp; 3804 u8 i; 3805 3806 /* Walk through existing recipes to find a match */ 3807 recp = hw->switch_info->recp_list; 3808 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 3809 /* If recipe was not created for this ID, in SW bookkeeping, 3810 * check if FW has an entry for this recipe. If the FW has an 3811 * entry update it in our SW bookkeeping and continue with the 3812 * matching. 3813 */ 3814 if (!recp[i].recp_created) 3815 if (ice_get_recp_frm_fw(hw, 3816 hw->switch_info->recp_list, i, 3817 &refresh_required)) 3818 continue; 3819 3820 /* Skip inverse action recipes */ 3821 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & 3822 ICE_AQ_RECIPE_ACT_INV_ACT) 3823 continue; 3824 3825 /* if number of words we are looking for match */ 3826 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { 3827 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; 3828 struct ice_fv_word *be = lkup_exts->fv_words; 3829 u16 *cr = recp[i].lkup_exts.field_mask; 3830 u16 *de = lkup_exts->field_mask; 3831 bool found = true; 3832 u8 pe, qr; 3833 3834 /* ar, cr, and qr are related to the recipe words, while 3835 * be, de, and pe are related to the lookup words 3836 */ 3837 for (pe = 0; pe < lkup_exts->n_val_words; pe++) { 3838 for (qr = 0; qr < recp[i].lkup_exts.n_val_words; 3839 qr++) { 3840 if (ar[qr].off == be[pe].off && 3841 ar[qr].prot_id == be[pe].prot_id && 3842 cr[qr] == de[pe]) 3843 /* Found the "pe"th word in the 3844 * given recipe 3845 */ 3846 break; 3847 } 3848 /* After walking through all the words in the 3849 * "i"th recipe if "p"th word was not found then 3850 * this recipe is not what we are looking for. 3851 * So break out from this loop and try the next 3852 * recipe 3853 */ 3854 if (qr >= recp[i].lkup_exts.n_val_words) { 3855 found = false; 3856 break; 3857 } 3858 } 3859 /* If for "i"th recipe the found was never set to false 3860 * then it means we found our match 3861 * Also tun type of recipe needs to be checked 3862 */ 3863 if (found && recp[i].tun_type == tun_type) 3864 return i; /* Return the recipe ID */ 3865 } 3866 } 3867 return ICE_MAX_NUM_RECIPES; 3868 } 3869 3870 /** 3871 * ice_prot_type_to_id - get protocol ID from protocol type 3872 * @type: protocol type 3873 * @id: pointer to variable that will receive the ID 3874 * 3875 * Returns true if found, false otherwise 3876 */ 3877 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id) 3878 { 3879 u8 i; 3880 3881 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++) 3882 if (ice_prot_id_tbl[i].type == type) { 3883 *id = ice_prot_id_tbl[i].protocol_id; 3884 return true; 3885 } 3886 return false; 3887 } 3888 3889 /** 3890 * ice_fill_valid_words - count valid words 3891 * @rule: advanced rule with lookup information 3892 * @lkup_exts: byte offset extractions of the words that are valid 3893 * 3894 * calculate valid words in a lookup rule using mask value 3895 */ 3896 static u8 3897 ice_fill_valid_words(struct ice_adv_lkup_elem *rule, 3898 struct ice_prot_lkup_ext *lkup_exts) 3899 { 3900 u8 j, word, prot_id, ret_val; 3901 3902 if (!ice_prot_type_to_id(rule->type, &prot_id)) 3903 return 0; 3904 3905 word = lkup_exts->n_val_words; 3906 3907 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++) 3908 if (((u16 *)&rule->m_u)[j] && 3909 rule->type < ARRAY_SIZE(ice_prot_ext)) { 3910 /* No more space to accommodate */ 3911 if (word >= ICE_MAX_CHAIN_WORDS) 3912 return 0; 3913 lkup_exts->fv_words[word].off = 3914 ice_prot_ext[rule->type].offs[j]; 3915 lkup_exts->fv_words[word].prot_id = 3916 ice_prot_id_tbl[rule->type].protocol_id; 3917 lkup_exts->field_mask[word] = 3918 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]); 3919 word++; 3920 } 3921 3922 ret_val = word - lkup_exts->n_val_words; 3923 lkup_exts->n_val_words = word; 3924 3925 return ret_val; 3926 } 3927 3928 /** 3929 * ice_create_first_fit_recp_def - Create a recipe grouping 3930 * @hw: pointer to the hardware structure 3931 * @lkup_exts: an array of protocol header extractions 3932 * @rg_list: pointer to a list that stores new recipe groups 3933 * @recp_cnt: pointer to a variable that stores returned number of recipe groups 3934 * 3935 * Using first fit algorithm, take all the words that are still not done 3936 * and start grouping them in 4-word groups. Each group makes up one 3937 * recipe. 3938 */ 3939 static int 3940 ice_create_first_fit_recp_def(struct ice_hw *hw, 3941 struct ice_prot_lkup_ext *lkup_exts, 3942 struct list_head *rg_list, 3943 u8 *recp_cnt) 3944 { 3945 struct ice_pref_recipe_group *grp = NULL; 3946 u8 j; 3947 3948 *recp_cnt = 0; 3949 3950 /* Walk through every word in the rule to check if it is not done. If so 3951 * then this word needs to be part of a new recipe. 3952 */ 3953 for (j = 0; j < lkup_exts->n_val_words; j++) 3954 if (!test_bit(j, lkup_exts->done)) { 3955 if (!grp || 3956 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { 3957 struct ice_recp_grp_entry *entry; 3958 3959 entry = devm_kzalloc(ice_hw_to_dev(hw), 3960 sizeof(*entry), 3961 GFP_KERNEL); 3962 if (!entry) 3963 return -ENOMEM; 3964 list_add(&entry->l_entry, rg_list); 3965 grp = &entry->r_group; 3966 (*recp_cnt)++; 3967 } 3968 3969 grp->pairs[grp->n_val_pairs].prot_id = 3970 lkup_exts->fv_words[j].prot_id; 3971 grp->pairs[grp->n_val_pairs].off = 3972 lkup_exts->fv_words[j].off; 3973 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; 3974 grp->n_val_pairs++; 3975 } 3976 3977 return 0; 3978 } 3979 3980 /** 3981 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group 3982 * @hw: pointer to the hardware structure 3983 * @fv_list: field vector with the extraction sequence information 3984 * @rg_list: recipe groupings with protocol-offset pairs 3985 * 3986 * Helper function to fill in the field vector indices for protocol-offset 3987 * pairs. These indexes are then ultimately programmed into a recipe. 3988 */ 3989 static int 3990 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, 3991 struct list_head *rg_list) 3992 { 3993 struct ice_sw_fv_list_entry *fv; 3994 struct ice_recp_grp_entry *rg; 3995 struct ice_fv_word *fv_ext; 3996 3997 if (list_empty(fv_list)) 3998 return 0; 3999 4000 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, 4001 list_entry); 4002 fv_ext = fv->fv_ptr->ew; 4003 4004 list_for_each_entry(rg, rg_list, l_entry) { 4005 u8 i; 4006 4007 for (i = 0; i < rg->r_group.n_val_pairs; i++) { 4008 struct ice_fv_word *pr; 4009 bool found = false; 4010 u16 mask; 4011 u8 j; 4012 4013 pr = &rg->r_group.pairs[i]; 4014 mask = rg->r_group.mask[i]; 4015 4016 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) 4017 if (fv_ext[j].prot_id == pr->prot_id && 4018 fv_ext[j].off == pr->off) { 4019 found = true; 4020 4021 /* Store index of field vector */ 4022 rg->fv_idx[i] = j; 4023 rg->fv_mask[i] = mask; 4024 break; 4025 } 4026 4027 /* Protocol/offset could not be found, caller gave an 4028 * invalid pair 4029 */ 4030 if (!found) 4031 return -EINVAL; 4032 } 4033 } 4034 4035 return 0; 4036 } 4037 4038 /** 4039 * ice_find_free_recp_res_idx - find free result indexes for recipe 4040 * @hw: pointer to hardware structure 4041 * @profiles: bitmap of profiles that will be associated with the new recipe 4042 * @free_idx: pointer to variable to receive the free index bitmap 4043 * 4044 * The algorithm used here is: 4045 * 1. When creating a new recipe, create a set P which contains all 4046 * Profiles that will be associated with our new recipe 4047 * 4048 * 2. For each Profile p in set P: 4049 * a. Add all recipes associated with Profile p into set R 4050 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes 4051 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF] 4052 * i. Or just assume they all have the same possible indexes: 4053 * 44, 45, 46, 47 4054 * i.e., PossibleIndexes = 0x0000F00000000000 4055 * 4056 * 3. For each Recipe r in set R: 4057 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes 4058 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes 4059 * 4060 * FreeIndexes will contain the bits indicating the indexes free for use, 4061 * then the code needs to update the recipe[r].used_result_idx_bits to 4062 * indicate which indexes were selected for use by this recipe. 4063 */ 4064 static u16 4065 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, 4066 unsigned long *free_idx) 4067 { 4068 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS); 4069 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES); 4070 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS); 4071 u16 bit; 4072 4073 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES); 4074 bitmap_zero(used_idx, ICE_MAX_FV_WORDS); 4075 4076 bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS); 4077 4078 /* For each profile we are going to associate the recipe with, add the 4079 * recipes that are associated with that profile. This will give us 4080 * the set of recipes that our recipe may collide with. Also, determine 4081 * what possible result indexes are usable given this set of profiles. 4082 */ 4083 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) { 4084 bitmap_or(recipes, recipes, profile_to_recipe[bit], 4085 ICE_MAX_NUM_RECIPES); 4086 bitmap_and(possible_idx, possible_idx, 4087 hw->switch_info->prof_res_bm[bit], 4088 ICE_MAX_FV_WORDS); 4089 } 4090 4091 /* For each recipe that our new recipe may collide with, determine 4092 * which indexes have been used. 4093 */ 4094 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES) 4095 bitmap_or(used_idx, used_idx, 4096 hw->switch_info->recp_list[bit].res_idxs, 4097 ICE_MAX_FV_WORDS); 4098 4099 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); 4100 4101 /* return number of free indexes */ 4102 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); 4103 } 4104 4105 /** 4106 * ice_add_sw_recipe - function to call AQ calls to create switch recipe 4107 * @hw: pointer to hardware structure 4108 * @rm: recipe management list entry 4109 * @profiles: bitmap of profiles that will be associated. 4110 */ 4111 static int 4112 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, 4113 unsigned long *profiles) 4114 { 4115 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); 4116 struct ice_aqc_recipe_data_elem *tmp; 4117 struct ice_aqc_recipe_data_elem *buf; 4118 struct ice_recp_grp_entry *entry; 4119 u16 free_res_idx; 4120 u16 recipe_count; 4121 u8 chain_idx; 4122 u8 recps = 0; 4123 int status; 4124 4125 /* When more than one recipe are required, another recipe is needed to 4126 * chain them together. Matching a tunnel metadata ID takes up one of 4127 * the match fields in the chaining recipe reducing the number of 4128 * chained recipes by one. 4129 */ 4130 /* check number of free result indices */ 4131 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); 4132 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); 4133 4134 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", 4135 free_res_idx, rm->n_grp_count); 4136 4137 if (rm->n_grp_count > 1) { 4138 if (rm->n_grp_count > free_res_idx) 4139 return -ENOSPC; 4140 4141 rm->n_grp_count++; 4142 } 4143 4144 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) 4145 return -ENOSPC; 4146 4147 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); 4148 if (!tmp) 4149 return -ENOMEM; 4150 4151 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), 4152 GFP_KERNEL); 4153 if (!buf) { 4154 status = -ENOMEM; 4155 goto err_mem; 4156 } 4157 4158 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); 4159 recipe_count = ICE_MAX_NUM_RECIPES; 4160 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, 4161 NULL); 4162 if (status || recipe_count == 0) 4163 goto err_unroll; 4164 4165 /* Allocate the recipe resources, and configure them according to the 4166 * match fields from protocol headers and extracted field vectors. 4167 */ 4168 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); 4169 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4170 u8 i; 4171 4172 status = ice_alloc_recipe(hw, &entry->rid); 4173 if (status) 4174 goto err_unroll; 4175 4176 /* Clear the result index of the located recipe, as this will be 4177 * updated, if needed, later in the recipe creation process. 4178 */ 4179 tmp[0].content.result_indx = 0; 4180 4181 buf[recps] = tmp[0]; 4182 buf[recps].recipe_indx = (u8)entry->rid; 4183 /* if the recipe is a non-root recipe RID should be programmed 4184 * as 0 for the rules to be applied correctly. 4185 */ 4186 buf[recps].content.rid = 0; 4187 memset(&buf[recps].content.lkup_indx, 0, 4188 sizeof(buf[recps].content.lkup_indx)); 4189 4190 /* All recipes use look-up index 0 to match switch ID. */ 4191 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4192 buf[recps].content.mask[0] = 4193 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4194 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask 4195 * to be 0 4196 */ 4197 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4198 buf[recps].content.lkup_indx[i] = 0x80; 4199 buf[recps].content.mask[i] = 0; 4200 } 4201 4202 for (i = 0; i < entry->r_group.n_val_pairs; i++) { 4203 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; 4204 buf[recps].content.mask[i + 1] = 4205 cpu_to_le16(entry->fv_mask[i]); 4206 } 4207 4208 if (rm->n_grp_count > 1) { 4209 /* Checks to see if there really is a valid result index 4210 * that can be used. 4211 */ 4212 if (chain_idx >= ICE_MAX_FV_WORDS) { 4213 ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); 4214 status = -ENOSPC; 4215 goto err_unroll; 4216 } 4217 4218 entry->chain_idx = chain_idx; 4219 buf[recps].content.result_indx = 4220 ICE_AQ_RECIPE_RESULT_EN | 4221 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & 4222 ICE_AQ_RECIPE_RESULT_DATA_M); 4223 clear_bit(chain_idx, result_idx_bm); 4224 chain_idx = find_first_bit(result_idx_bm, 4225 ICE_MAX_FV_WORDS); 4226 } 4227 4228 /* fill recipe dependencies */ 4229 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, 4230 ICE_MAX_NUM_RECIPES); 4231 set_bit(buf[recps].recipe_indx, 4232 (unsigned long *)buf[recps].recipe_bitmap); 4233 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4234 recps++; 4235 } 4236 4237 if (rm->n_grp_count == 1) { 4238 rm->root_rid = buf[0].recipe_indx; 4239 set_bit(buf[0].recipe_indx, rm->r_bitmap); 4240 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; 4241 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { 4242 memcpy(buf[0].recipe_bitmap, rm->r_bitmap, 4243 sizeof(buf[0].recipe_bitmap)); 4244 } else { 4245 status = -EINVAL; 4246 goto err_unroll; 4247 } 4248 /* Applicable only for ROOT_RECIPE, set the fwd_priority for 4249 * the recipe which is getting created if specified 4250 * by user. Usually any advanced switch filter, which results 4251 * into new extraction sequence, ended up creating a new recipe 4252 * of type ROOT and usually recipes are associated with profiles 4253 * Switch rule referreing newly created recipe, needs to have 4254 * either/or 'fwd' or 'join' priority, otherwise switch rule 4255 * evaluation will not happen correctly. In other words, if 4256 * switch rule to be evaluated on priority basis, then recipe 4257 * needs to have priority, otherwise it will be evaluated last. 4258 */ 4259 buf[0].content.act_ctrl_fwd_priority = rm->priority; 4260 } else { 4261 struct ice_recp_grp_entry *last_chain_entry; 4262 u16 rid, i; 4263 4264 /* Allocate the last recipe that will chain the outcomes of the 4265 * other recipes together 4266 */ 4267 status = ice_alloc_recipe(hw, &rid); 4268 if (status) 4269 goto err_unroll; 4270 4271 buf[recps].recipe_indx = (u8)rid; 4272 buf[recps].content.rid = (u8)rid; 4273 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; 4274 /* the new entry created should also be part of rg_list to 4275 * make sure we have complete recipe 4276 */ 4277 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), 4278 sizeof(*last_chain_entry), 4279 GFP_KERNEL); 4280 if (!last_chain_entry) { 4281 status = -ENOMEM; 4282 goto err_unroll; 4283 } 4284 last_chain_entry->rid = rid; 4285 memset(&buf[recps].content.lkup_indx, 0, 4286 sizeof(buf[recps].content.lkup_indx)); 4287 /* All recipes use look-up index 0 to match switch ID. */ 4288 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; 4289 buf[recps].content.mask[0] = 4290 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); 4291 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { 4292 buf[recps].content.lkup_indx[i] = 4293 ICE_AQ_RECIPE_LKUP_IGNORE; 4294 buf[recps].content.mask[i] = 0; 4295 } 4296 4297 i = 1; 4298 /* update r_bitmap with the recp that is used for chaining */ 4299 set_bit(rid, rm->r_bitmap); 4300 /* this is the recipe that chains all the other recipes so it 4301 * should not have a chaining ID to indicate the same 4302 */ 4303 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; 4304 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4305 last_chain_entry->fv_idx[i] = entry->chain_idx; 4306 buf[recps].content.lkup_indx[i] = entry->chain_idx; 4307 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); 4308 set_bit(entry->rid, rm->r_bitmap); 4309 } 4310 list_add(&last_chain_entry->l_entry, &rm->rg_list); 4311 if (sizeof(buf[recps].recipe_bitmap) >= 4312 sizeof(rm->r_bitmap)) { 4313 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, 4314 sizeof(buf[recps].recipe_bitmap)); 4315 } else { 4316 status = -EINVAL; 4317 goto err_unroll; 4318 } 4319 buf[recps].content.act_ctrl_fwd_priority = rm->priority; 4320 4321 recps++; 4322 rm->root_rid = (u8)rid; 4323 } 4324 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4325 if (status) 4326 goto err_unroll; 4327 4328 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); 4329 ice_release_change_lock(hw); 4330 if (status) 4331 goto err_unroll; 4332 4333 /* Every recipe that just got created add it to the recipe 4334 * book keeping list 4335 */ 4336 list_for_each_entry(entry, &rm->rg_list, l_entry) { 4337 struct ice_switch_info *sw = hw->switch_info; 4338 bool is_root, idx_found = false; 4339 struct ice_sw_recipe *recp; 4340 u16 idx, buf_idx = 0; 4341 4342 /* find buffer index for copying some data */ 4343 for (idx = 0; idx < rm->n_grp_count; idx++) 4344 if (buf[idx].recipe_indx == entry->rid) { 4345 buf_idx = idx; 4346 idx_found = true; 4347 } 4348 4349 if (!idx_found) { 4350 status = -EIO; 4351 goto err_unroll; 4352 } 4353 4354 recp = &sw->recp_list[entry->rid]; 4355 is_root = (rm->root_rid == entry->rid); 4356 recp->is_root = is_root; 4357 4358 recp->root_rid = entry->rid; 4359 recp->big_recp = (is_root && rm->n_grp_count > 1); 4360 4361 memcpy(&recp->ext_words, entry->r_group.pairs, 4362 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); 4363 4364 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, 4365 sizeof(recp->r_bitmap)); 4366 4367 /* Copy non-result fv index values and masks to recipe. This 4368 * call will also update the result recipe bitmask. 4369 */ 4370 ice_collect_result_idx(&buf[buf_idx], recp); 4371 4372 /* for non-root recipes, also copy to the root, this allows 4373 * easier matching of a complete chained recipe 4374 */ 4375 if (!is_root) 4376 ice_collect_result_idx(&buf[buf_idx], 4377 &sw->recp_list[rm->root_rid]); 4378 4379 recp->n_ext_words = entry->r_group.n_val_pairs; 4380 recp->chain_idx = entry->chain_idx; 4381 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; 4382 recp->n_grp_count = rm->n_grp_count; 4383 recp->tun_type = rm->tun_type; 4384 recp->recp_created = true; 4385 } 4386 rm->root_buf = buf; 4387 kfree(tmp); 4388 return status; 4389 4390 err_unroll: 4391 err_mem: 4392 kfree(tmp); 4393 devm_kfree(ice_hw_to_dev(hw), buf); 4394 return status; 4395 } 4396 4397 /** 4398 * ice_create_recipe_group - creates recipe group 4399 * @hw: pointer to hardware structure 4400 * @rm: recipe management list entry 4401 * @lkup_exts: lookup elements 4402 */ 4403 static int 4404 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, 4405 struct ice_prot_lkup_ext *lkup_exts) 4406 { 4407 u8 recp_count = 0; 4408 int status; 4409 4410 rm->n_grp_count = 0; 4411 4412 /* Create recipes for words that are marked not done by packing them 4413 * as best fit. 4414 */ 4415 status = ice_create_first_fit_recp_def(hw, lkup_exts, 4416 &rm->rg_list, &recp_count); 4417 if (!status) { 4418 rm->n_grp_count += recp_count; 4419 rm->n_ext_words = lkup_exts->n_val_words; 4420 memcpy(&rm->ext_words, lkup_exts->fv_words, 4421 sizeof(rm->ext_words)); 4422 memcpy(rm->word_masks, lkup_exts->field_mask, 4423 sizeof(rm->word_masks)); 4424 } 4425 4426 return status; 4427 } 4428 4429 /** 4430 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types 4431 * @hw: pointer to hardware structure 4432 * @lkups: lookup elements or match criteria for the advanced recipe, one 4433 * structure per protocol header 4434 * @lkups_cnt: number of protocols 4435 * @bm: bitmap of field vectors to consider 4436 * @fv_list: pointer to a list that holds the returned field vectors 4437 */ 4438 static int 4439 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4440 unsigned long *bm, struct list_head *fv_list) 4441 { 4442 u8 *prot_ids; 4443 int status; 4444 u16 i; 4445 4446 prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL); 4447 if (!prot_ids) 4448 return -ENOMEM; 4449 4450 for (i = 0; i < lkups_cnt; i++) 4451 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) { 4452 status = -EIO; 4453 goto free_mem; 4454 } 4455 4456 /* Find field vectors that include all specified protocol types */ 4457 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list); 4458 4459 free_mem: 4460 kfree(prot_ids); 4461 return status; 4462 } 4463 4464 /** 4465 * ice_tun_type_match_word - determine if tun type needs a match mask 4466 * @tun_type: tunnel type 4467 * @mask: mask to be used for the tunnel 4468 */ 4469 static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) 4470 { 4471 switch (tun_type) { 4472 case ICE_SW_TUN_GENEVE: 4473 case ICE_SW_TUN_VXLAN: 4474 case ICE_SW_TUN_NVGRE: 4475 *mask = ICE_TUN_FLAG_MASK; 4476 return true; 4477 4478 default: 4479 *mask = 0; 4480 return false; 4481 } 4482 } 4483 4484 /** 4485 * ice_add_special_words - Add words that are not protocols, such as metadata 4486 * @rinfo: other information regarding the rule e.g. priority and action info 4487 * @lkup_exts: lookup word structure 4488 */ 4489 static int 4490 ice_add_special_words(struct ice_adv_rule_info *rinfo, 4491 struct ice_prot_lkup_ext *lkup_exts) 4492 { 4493 u16 mask; 4494 4495 /* If this is a tunneled packet, then add recipe index to match the 4496 * tunnel bit in the packet metadata flags. 4497 */ 4498 if (ice_tun_type_match_word(rinfo->tun_type, &mask)) { 4499 if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) { 4500 u8 word = lkup_exts->n_val_words++; 4501 4502 lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW; 4503 lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF; 4504 lkup_exts->field_mask[word] = mask; 4505 } else { 4506 return -ENOSPC; 4507 } 4508 } 4509 4510 return 0; 4511 } 4512 4513 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule 4514 * @hw: pointer to hardware structure 4515 * @rinfo: other information regarding the rule e.g. priority and action info 4516 * @bm: pointer to memory for returning the bitmap of field vectors 4517 */ 4518 static void 4519 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, 4520 unsigned long *bm) 4521 { 4522 enum ice_prof_type prof_type; 4523 4524 bitmap_zero(bm, ICE_MAX_NUM_PROFILES); 4525 4526 switch (rinfo->tun_type) { 4527 case ICE_NON_TUN: 4528 prof_type = ICE_PROF_NON_TUN; 4529 break; 4530 case ICE_ALL_TUNNELS: 4531 prof_type = ICE_PROF_TUN_ALL; 4532 break; 4533 case ICE_SW_TUN_GENEVE: 4534 case ICE_SW_TUN_VXLAN: 4535 prof_type = ICE_PROF_TUN_UDP; 4536 break; 4537 case ICE_SW_TUN_NVGRE: 4538 prof_type = ICE_PROF_TUN_GRE; 4539 break; 4540 default: 4541 prof_type = ICE_PROF_ALL; 4542 break; 4543 } 4544 4545 ice_get_sw_fv_bitmap(hw, prof_type, bm); 4546 } 4547 4548 /** 4549 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default 4550 * @hw: pointer to hardware structure 4551 * @lkups: lookup elements or match criteria for the advanced recipe, one 4552 * structure per protocol header 4553 * @lkups_cnt: number of protocols 4554 * @rinfo: other information regarding the rule e.g. priority and action info 4555 * @rid: return the recipe ID of the recipe created 4556 */ 4557 static int 4558 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 4559 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid) 4560 { 4561 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); 4562 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); 4563 struct ice_prot_lkup_ext *lkup_exts; 4564 struct ice_recp_grp_entry *r_entry; 4565 struct ice_sw_fv_list_entry *fvit; 4566 struct ice_recp_grp_entry *r_tmp; 4567 struct ice_sw_fv_list_entry *tmp; 4568 struct ice_sw_recipe *rm; 4569 int status = 0; 4570 u8 i; 4571 4572 if (!lkups_cnt) 4573 return -EINVAL; 4574 4575 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL); 4576 if (!lkup_exts) 4577 return -ENOMEM; 4578 4579 /* Determine the number of words to be matched and if it exceeds a 4580 * recipe's restrictions 4581 */ 4582 for (i = 0; i < lkups_cnt; i++) { 4583 u16 count; 4584 4585 if (lkups[i].type >= ICE_PROTOCOL_LAST) { 4586 status = -EIO; 4587 goto err_free_lkup_exts; 4588 } 4589 4590 count = ice_fill_valid_words(&lkups[i], lkup_exts); 4591 if (!count) { 4592 status = -EIO; 4593 goto err_free_lkup_exts; 4594 } 4595 } 4596 4597 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 4598 if (!rm) { 4599 status = -ENOMEM; 4600 goto err_free_lkup_exts; 4601 } 4602 4603 /* Get field vectors that contain fields extracted from all the protocol 4604 * headers being programmed. 4605 */ 4606 INIT_LIST_HEAD(&rm->fv_list); 4607 INIT_LIST_HEAD(&rm->rg_list); 4608 4609 /* Get bitmap of field vectors (profiles) that are compatible with the 4610 * rule request; only these will be searched in the subsequent call to 4611 * ice_get_fv. 4612 */ 4613 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); 4614 4615 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); 4616 if (status) 4617 goto err_unroll; 4618 4619 /* Create any special protocol/offset pairs, such as looking at tunnel 4620 * bits by extracting metadata 4621 */ 4622 status = ice_add_special_words(rinfo, lkup_exts); 4623 if (status) 4624 goto err_free_lkup_exts; 4625 4626 /* Group match words into recipes using preferred recipe grouping 4627 * criteria. 4628 */ 4629 status = ice_create_recipe_group(hw, rm, lkup_exts); 4630 if (status) 4631 goto err_unroll; 4632 4633 /* set the recipe priority if specified */ 4634 rm->priority = (u8)rinfo->priority; 4635 4636 /* Find offsets from the field vector. Pick the first one for all the 4637 * recipes. 4638 */ 4639 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); 4640 if (status) 4641 goto err_unroll; 4642 4643 /* get bitmap of all profiles the recipe will be associated with */ 4644 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES); 4645 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4646 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id); 4647 set_bit((u16)fvit->profile_id, profiles); 4648 } 4649 4650 /* Look for a recipe which matches our requested fv / mask list */ 4651 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); 4652 if (*rid < ICE_MAX_NUM_RECIPES) 4653 /* Success if found a recipe that match the existing criteria */ 4654 goto err_unroll; 4655 4656 rm->tun_type = rinfo->tun_type; 4657 /* Recipe we need does not exist, add a recipe */ 4658 status = ice_add_sw_recipe(hw, rm, profiles); 4659 if (status) 4660 goto err_unroll; 4661 4662 /* Associate all the recipes created with all the profiles in the 4663 * common field vector. 4664 */ 4665 list_for_each_entry(fvit, &rm->fv_list, list_entry) { 4666 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); 4667 u16 j; 4668 4669 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, 4670 (u8 *)r_bitmap, NULL); 4671 if (status) 4672 goto err_unroll; 4673 4674 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, 4675 ICE_MAX_NUM_RECIPES); 4676 status = ice_acquire_change_lock(hw, ICE_RES_WRITE); 4677 if (status) 4678 goto err_unroll; 4679 4680 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, 4681 (u8 *)r_bitmap, 4682 NULL); 4683 ice_release_change_lock(hw); 4684 4685 if (status) 4686 goto err_unroll; 4687 4688 /* Update profile to recipe bitmap array */ 4689 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, 4690 ICE_MAX_NUM_RECIPES); 4691 4692 /* Update recipe to profile bitmap array */ 4693 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES) 4694 set_bit((u16)fvit->profile_id, recipe_to_profile[j]); 4695 } 4696 4697 *rid = rm->root_rid; 4698 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, 4699 sizeof(*lkup_exts)); 4700 err_unroll: 4701 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { 4702 list_del(&r_entry->l_entry); 4703 devm_kfree(ice_hw_to_dev(hw), r_entry); 4704 } 4705 4706 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { 4707 list_del(&fvit->list_entry); 4708 devm_kfree(ice_hw_to_dev(hw), fvit); 4709 } 4710 4711 if (rm->root_buf) 4712 devm_kfree(ice_hw_to_dev(hw), rm->root_buf); 4713 4714 kfree(rm); 4715 4716 err_free_lkup_exts: 4717 kfree(lkup_exts); 4718 4719 return status; 4720 } 4721 4722 /** 4723 * ice_find_dummy_packet - find dummy packet 4724 * 4725 * @lkups: lookup elements or match criteria for the advanced recipe, one 4726 * structure per protocol header 4727 * @lkups_cnt: number of protocols 4728 * @tun_type: tunnel type 4729 * @pkt: dummy packet to fill according to filter match criteria 4730 * @pkt_len: packet length of dummy packet 4731 * @offsets: pointer to receive the pointer to the offsets for the packet 4732 */ 4733 static void 4734 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4735 enum ice_sw_tunnel_type tun_type, 4736 const u8 **pkt, u16 *pkt_len, 4737 const struct ice_dummy_pkt_offsets **offsets) 4738 { 4739 bool tcp = false, udp = false, ipv6 = false, vlan = false; 4740 u16 i; 4741 4742 for (i = 0; i < lkups_cnt; i++) { 4743 if (lkups[i].type == ICE_UDP_ILOS) 4744 udp = true; 4745 else if (lkups[i].type == ICE_TCP_IL) 4746 tcp = true; 4747 else if (lkups[i].type == ICE_IPV6_OFOS) 4748 ipv6 = true; 4749 else if (lkups[i].type == ICE_VLAN_OFOS) 4750 vlan = true; 4751 else if (lkups[i].type == ICE_ETYPE_OL && 4752 lkups[i].h_u.ethertype.ethtype_id == 4753 cpu_to_be16(ICE_IPV6_ETHER_ID) && 4754 lkups[i].m_u.ethertype.ethtype_id == 4755 cpu_to_be16(0xFFFF)) 4756 ipv6 = true; 4757 } 4758 4759 if (tun_type == ICE_SW_TUN_NVGRE) { 4760 if (tcp) { 4761 *pkt = dummy_gre_tcp_packet; 4762 *pkt_len = sizeof(dummy_gre_tcp_packet); 4763 *offsets = dummy_gre_tcp_packet_offsets; 4764 return; 4765 } 4766 4767 *pkt = dummy_gre_udp_packet; 4768 *pkt_len = sizeof(dummy_gre_udp_packet); 4769 *offsets = dummy_gre_udp_packet_offsets; 4770 return; 4771 } 4772 4773 if (tun_type == ICE_SW_TUN_VXLAN || 4774 tun_type == ICE_SW_TUN_GENEVE) { 4775 if (tcp) { 4776 *pkt = dummy_udp_tun_tcp_packet; 4777 *pkt_len = sizeof(dummy_udp_tun_tcp_packet); 4778 *offsets = dummy_udp_tun_tcp_packet_offsets; 4779 return; 4780 } 4781 4782 *pkt = dummy_udp_tun_udp_packet; 4783 *pkt_len = sizeof(dummy_udp_tun_udp_packet); 4784 *offsets = dummy_udp_tun_udp_packet_offsets; 4785 return; 4786 } 4787 4788 if (udp && !ipv6) { 4789 if (vlan) { 4790 *pkt = dummy_vlan_udp_packet; 4791 *pkt_len = sizeof(dummy_vlan_udp_packet); 4792 *offsets = dummy_vlan_udp_packet_offsets; 4793 return; 4794 } 4795 *pkt = dummy_udp_packet; 4796 *pkt_len = sizeof(dummy_udp_packet); 4797 *offsets = dummy_udp_packet_offsets; 4798 return; 4799 } else if (udp && ipv6) { 4800 if (vlan) { 4801 *pkt = dummy_vlan_udp_ipv6_packet; 4802 *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); 4803 *offsets = dummy_vlan_udp_ipv6_packet_offsets; 4804 return; 4805 } 4806 *pkt = dummy_udp_ipv6_packet; 4807 *pkt_len = sizeof(dummy_udp_ipv6_packet); 4808 *offsets = dummy_udp_ipv6_packet_offsets; 4809 return; 4810 } else if ((tcp && ipv6) || ipv6) { 4811 if (vlan) { 4812 *pkt = dummy_vlan_tcp_ipv6_packet; 4813 *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); 4814 *offsets = dummy_vlan_tcp_ipv6_packet_offsets; 4815 return; 4816 } 4817 *pkt = dummy_tcp_ipv6_packet; 4818 *pkt_len = sizeof(dummy_tcp_ipv6_packet); 4819 *offsets = dummy_tcp_ipv6_packet_offsets; 4820 return; 4821 } 4822 4823 if (vlan) { 4824 *pkt = dummy_vlan_tcp_packet; 4825 *pkt_len = sizeof(dummy_vlan_tcp_packet); 4826 *offsets = dummy_vlan_tcp_packet_offsets; 4827 } else { 4828 *pkt = dummy_tcp_packet; 4829 *pkt_len = sizeof(dummy_tcp_packet); 4830 *offsets = dummy_tcp_packet_offsets; 4831 } 4832 } 4833 4834 /** 4835 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria 4836 * 4837 * @lkups: lookup elements or match criteria for the advanced recipe, one 4838 * structure per protocol header 4839 * @lkups_cnt: number of protocols 4840 * @s_rule: stores rule information from the match criteria 4841 * @dummy_pkt: dummy packet to fill according to filter match criteria 4842 * @pkt_len: packet length of dummy packet 4843 * @offsets: offset info for the dummy packet 4844 */ 4845 static int 4846 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, 4847 struct ice_aqc_sw_rules_elem *s_rule, 4848 const u8 *dummy_pkt, u16 pkt_len, 4849 const struct ice_dummy_pkt_offsets *offsets) 4850 { 4851 u8 *pkt; 4852 u16 i; 4853 4854 /* Start with a packet with a pre-defined/dummy content. Then, fill 4855 * in the header values to be looked up or matched. 4856 */ 4857 pkt = s_rule->pdata.lkup_tx_rx.hdr; 4858 4859 memcpy(pkt, dummy_pkt, pkt_len); 4860 4861 for (i = 0; i < lkups_cnt; i++) { 4862 enum ice_protocol_type type; 4863 u16 offset = 0, len = 0, j; 4864 bool found = false; 4865 4866 /* find the start of this layer; it should be found since this 4867 * was already checked when search for the dummy packet 4868 */ 4869 type = lkups[i].type; 4870 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) { 4871 if (type == offsets[j].type) { 4872 offset = offsets[j].offset; 4873 found = true; 4874 break; 4875 } 4876 } 4877 /* this should never happen in a correct calling sequence */ 4878 if (!found) 4879 return -EINVAL; 4880 4881 switch (lkups[i].type) { 4882 case ICE_MAC_OFOS: 4883 case ICE_MAC_IL: 4884 len = sizeof(struct ice_ether_hdr); 4885 break; 4886 case ICE_ETYPE_OL: 4887 len = sizeof(struct ice_ethtype_hdr); 4888 break; 4889 case ICE_VLAN_OFOS: 4890 len = sizeof(struct ice_vlan_hdr); 4891 break; 4892 case ICE_IPV4_OFOS: 4893 case ICE_IPV4_IL: 4894 len = sizeof(struct ice_ipv4_hdr); 4895 break; 4896 case ICE_IPV6_OFOS: 4897 case ICE_IPV6_IL: 4898 len = sizeof(struct ice_ipv6_hdr); 4899 break; 4900 case ICE_TCP_IL: 4901 case ICE_UDP_OF: 4902 case ICE_UDP_ILOS: 4903 len = sizeof(struct ice_l4_hdr); 4904 break; 4905 case ICE_SCTP_IL: 4906 len = sizeof(struct ice_sctp_hdr); 4907 break; 4908 case ICE_NVGRE: 4909 len = sizeof(struct ice_nvgre_hdr); 4910 break; 4911 case ICE_VXLAN: 4912 case ICE_GENEVE: 4913 len = sizeof(struct ice_udp_tnl_hdr); 4914 break; 4915 default: 4916 return -EINVAL; 4917 } 4918 4919 /* the length should be a word multiple */ 4920 if (len % ICE_BYTES_PER_WORD) 4921 return -EIO; 4922 4923 /* We have the offset to the header start, the length, the 4924 * caller's header values and mask. Use this information to 4925 * copy the data into the dummy packet appropriately based on 4926 * the mask. Note that we need to only write the bits as 4927 * indicated by the mask to make sure we don't improperly write 4928 * over any significant packet data. 4929 */ 4930 for (j = 0; j < len / sizeof(u16); j++) 4931 if (((u16 *)&lkups[i].m_u)[j]) 4932 ((u16 *)(pkt + offset))[j] = 4933 (((u16 *)(pkt + offset))[j] & 4934 ~((u16 *)&lkups[i].m_u)[j]) | 4935 (((u16 *)&lkups[i].h_u)[j] & 4936 ((u16 *)&lkups[i].m_u)[j]); 4937 } 4938 4939 s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); 4940 4941 return 0; 4942 } 4943 4944 /** 4945 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port 4946 * @hw: pointer to the hardware structure 4947 * @tun_type: tunnel type 4948 * @pkt: dummy packet to fill in 4949 * @offsets: offset info for the dummy packet 4950 */ 4951 static int 4952 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type, 4953 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets) 4954 { 4955 u16 open_port, i; 4956 4957 switch (tun_type) { 4958 case ICE_SW_TUN_VXLAN: 4959 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN)) 4960 return -EIO; 4961 break; 4962 case ICE_SW_TUN_GENEVE: 4963 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE)) 4964 return -EIO; 4965 break; 4966 default: 4967 /* Nothing needs to be done for this tunnel type */ 4968 return 0; 4969 } 4970 4971 /* Find the outer UDP protocol header and insert the port number */ 4972 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) { 4973 if (offsets[i].type == ICE_UDP_OF) { 4974 struct ice_l4_hdr *hdr; 4975 u16 offset; 4976 4977 offset = offsets[i].offset; 4978 hdr = (struct ice_l4_hdr *)&pkt[offset]; 4979 hdr->dst_port = cpu_to_be16(open_port); 4980 4981 return 0; 4982 } 4983 } 4984 4985 return -EIO; 4986 } 4987 4988 /** 4989 * ice_find_adv_rule_entry - Search a rule entry 4990 * @hw: pointer to the hardware structure 4991 * @lkups: lookup elements or match criteria for the advanced recipe, one 4992 * structure per protocol header 4993 * @lkups_cnt: number of protocols 4994 * @recp_id: recipe ID for which we are finding the rule 4995 * @rinfo: other information regarding the rule e.g. priority and action info 4996 * 4997 * Helper function to search for a given advance rule entry 4998 * Returns pointer to entry storing the rule if found 4999 */ 5000 static struct ice_adv_fltr_mgmt_list_entry * 5001 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5002 u16 lkups_cnt, u16 recp_id, 5003 struct ice_adv_rule_info *rinfo) 5004 { 5005 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5006 struct ice_switch_info *sw = hw->switch_info; 5007 int i; 5008 5009 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules, 5010 list_entry) { 5011 bool lkups_matched = true; 5012 5013 if (lkups_cnt != list_itr->lkups_cnt) 5014 continue; 5015 for (i = 0; i < list_itr->lkups_cnt; i++) 5016 if (memcmp(&list_itr->lkups[i], &lkups[i], 5017 sizeof(*lkups))) { 5018 lkups_matched = false; 5019 break; 5020 } 5021 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag && 5022 rinfo->tun_type == list_itr->rule_info.tun_type && 5023 lkups_matched) 5024 return list_itr; 5025 } 5026 return NULL; 5027 } 5028 5029 /** 5030 * ice_adv_add_update_vsi_list 5031 * @hw: pointer to the hardware structure 5032 * @m_entry: pointer to current adv filter management list entry 5033 * @cur_fltr: filter information from the book keeping entry 5034 * @new_fltr: filter information with the new VSI to be added 5035 * 5036 * Call AQ command to add or update previously created VSI list with new VSI. 5037 * 5038 * Helper function to do book keeping associated with adding filter information 5039 * The algorithm to do the booking keeping is described below : 5040 * When a VSI needs to subscribe to a given advanced filter 5041 * if only one VSI has been added till now 5042 * Allocate a new VSI list and add two VSIs 5043 * to this list using switch rule command 5044 * Update the previously created switch rule with the 5045 * newly created VSI list ID 5046 * if a VSI list was previously created 5047 * Add the new VSI to the previously created VSI list set 5048 * using the update switch rule command 5049 */ 5050 static int 5051 ice_adv_add_update_vsi_list(struct ice_hw *hw, 5052 struct ice_adv_fltr_mgmt_list_entry *m_entry, 5053 struct ice_adv_rule_info *cur_fltr, 5054 struct ice_adv_rule_info *new_fltr) 5055 { 5056 u16 vsi_list_id = 0; 5057 int status; 5058 5059 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5060 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5061 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET) 5062 return -EOPNOTSUPP; 5063 5064 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q || 5065 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) && 5066 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI || 5067 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)) 5068 return -EOPNOTSUPP; 5069 5070 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { 5071 /* Only one entry existed in the mapping and it was not already 5072 * a part of a VSI list. So, create a VSI list with the old and 5073 * new VSIs. 5074 */ 5075 struct ice_fltr_info tmp_fltr; 5076 u16 vsi_handle_arr[2]; 5077 5078 /* A rule already exists with the new VSI being added */ 5079 if (cur_fltr->sw_act.fwd_id.hw_vsi_id == 5080 new_fltr->sw_act.fwd_id.hw_vsi_id) 5081 return -EEXIST; 5082 5083 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle; 5084 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle; 5085 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, 5086 &vsi_list_id, 5087 ICE_SW_LKUP_LAST); 5088 if (status) 5089 return status; 5090 5091 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5092 tmp_fltr.flag = m_entry->rule_info.sw_act.flag; 5093 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; 5094 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; 5095 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; 5096 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST; 5097 5098 /* Update the previous switch rule of "forward to VSI" to 5099 * "fwd to VSI list" 5100 */ 5101 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5102 if (status) 5103 return status; 5104 5105 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id; 5106 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST; 5107 m_entry->vsi_list_info = 5108 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, 5109 vsi_list_id); 5110 } else { 5111 u16 vsi_handle = new_fltr->sw_act.vsi_handle; 5112 5113 if (!m_entry->vsi_list_info) 5114 return -EIO; 5115 5116 /* A rule already exists with the new VSI being added */ 5117 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) 5118 return 0; 5119 5120 /* Update the previously created VSI list set with 5121 * the new VSI ID passed in 5122 */ 5123 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id; 5124 5125 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, 5126 vsi_list_id, false, 5127 ice_aqc_opc_update_sw_rules, 5128 ICE_SW_LKUP_LAST); 5129 /* update VSI list mapping info with new VSI ID */ 5130 if (!status) 5131 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); 5132 } 5133 if (!status) 5134 m_entry->vsi_count++; 5135 return status; 5136 } 5137 5138 /** 5139 * ice_add_adv_rule - helper function to create an advanced switch rule 5140 * @hw: pointer to the hardware structure 5141 * @lkups: information on the words that needs to be looked up. All words 5142 * together makes one recipe 5143 * @lkups_cnt: num of entries in the lkups array 5144 * @rinfo: other information related to the rule that needs to be programmed 5145 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be 5146 * ignored is case of error. 5147 * 5148 * This function can program only 1 rule at a time. The lkups is used to 5149 * describe the all the words that forms the "lookup" portion of the recipe. 5150 * These words can span multiple protocols. Callers to this function need to 5151 * pass in a list of protocol headers with lookup information along and mask 5152 * that determines which words are valid from the given protocol header. 5153 * rinfo describes other information related to this rule such as forwarding 5154 * IDs, priority of this rule, etc. 5155 */ 5156 int 5157 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5158 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, 5159 struct ice_rule_query_data *added_entry) 5160 { 5161 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; 5162 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; 5163 const struct ice_dummy_pkt_offsets *pkt_offsets; 5164 struct ice_aqc_sw_rules_elem *s_rule = NULL; 5165 struct list_head *rule_head; 5166 struct ice_switch_info *sw; 5167 const u8 *pkt = NULL; 5168 u16 word_cnt; 5169 u32 act = 0; 5170 int status; 5171 u8 q_rgn; 5172 5173 /* Initialize profile to result index bitmap */ 5174 if (!hw->switch_info->prof_res_bm_init) { 5175 hw->switch_info->prof_res_bm_init = 1; 5176 ice_init_prof_result_bm(hw); 5177 } 5178 5179 if (!lkups_cnt) 5180 return -EINVAL; 5181 5182 /* get # of words we need to match */ 5183 word_cnt = 0; 5184 for (i = 0; i < lkups_cnt; i++) { 5185 u16 j, *ptr; 5186 5187 ptr = (u16 *)&lkups[i].m_u; 5188 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) 5189 if (ptr[j] != 0) 5190 word_cnt++; 5191 } 5192 5193 if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) 5194 return -EINVAL; 5195 5196 /* make sure that we can locate a dummy packet */ 5197 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, 5198 &pkt_offsets); 5199 if (!pkt) { 5200 status = -EINVAL; 5201 goto err_ice_add_adv_rule; 5202 } 5203 5204 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || 5205 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || 5206 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || 5207 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) 5208 return -EIO; 5209 5210 vsi_handle = rinfo->sw_act.vsi_handle; 5211 if (!ice_is_vsi_valid(hw, vsi_handle)) 5212 return -EINVAL; 5213 5214 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 5215 rinfo->sw_act.fwd_id.hw_vsi_id = 5216 ice_get_hw_vsi_num(hw, vsi_handle); 5217 if (rinfo->sw_act.flag & ICE_FLTR_TX) 5218 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle); 5219 5220 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid); 5221 if (status) 5222 return status; 5223 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5224 if (m_entry) { 5225 /* we have to add VSI to VSI_LIST and increment vsi_count. 5226 * Also Update VSI list so that we can change forwarding rule 5227 * if the rule already exists, we will check if it exists with 5228 * same vsi_id, if not then add it to the VSI list if it already 5229 * exists if not then create a VSI list and add the existing VSI 5230 * ID and the new VSI ID to the list 5231 * We will add that VSI to the list 5232 */ 5233 status = ice_adv_add_update_vsi_list(hw, m_entry, 5234 &m_entry->rule_info, 5235 rinfo); 5236 if (added_entry) { 5237 added_entry->rid = rid; 5238 added_entry->rule_id = m_entry->rule_info.fltr_rule_id; 5239 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 5240 } 5241 return status; 5242 } 5243 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; 5244 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5245 if (!s_rule) 5246 return -ENOMEM; 5247 if (!rinfo->flags_info.act_valid) { 5248 act |= ICE_SINGLE_ACT_LAN_ENABLE; 5249 act |= ICE_SINGLE_ACT_LB_ENABLE; 5250 } else { 5251 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE | 5252 ICE_SINGLE_ACT_LB_ENABLE); 5253 } 5254 5255 switch (rinfo->sw_act.fltr_act) { 5256 case ICE_FWD_TO_VSI: 5257 act |= (rinfo->sw_act.fwd_id.hw_vsi_id << 5258 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; 5259 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; 5260 break; 5261 case ICE_FWD_TO_Q: 5262 act |= ICE_SINGLE_ACT_TO_Q; 5263 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 5264 ICE_SINGLE_ACT_Q_INDEX_M; 5265 break; 5266 case ICE_FWD_TO_QGRP: 5267 q_rgn = rinfo->sw_act.qgrp_size > 0 ? 5268 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0; 5269 act |= ICE_SINGLE_ACT_TO_Q; 5270 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & 5271 ICE_SINGLE_ACT_Q_INDEX_M; 5272 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & 5273 ICE_SINGLE_ACT_Q_REGION_M; 5274 break; 5275 case ICE_DROP_PACKET: 5276 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | 5277 ICE_SINGLE_ACT_VALID_BIT; 5278 break; 5279 default: 5280 status = -EIO; 5281 goto err_ice_add_adv_rule; 5282 } 5283 5284 /* set the rule LOOKUP type based on caller specified 'Rx' 5285 * instead of hardcoding it to be either LOOKUP_TX/RX 5286 * 5287 * for 'Rx' set the source to be the port number 5288 * for 'Tx' set the source to be the source HW VSI number (determined 5289 * by caller) 5290 */ 5291 if (rinfo->rx) { 5292 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); 5293 s_rule->pdata.lkup_tx_rx.src = 5294 cpu_to_le16(hw->port_info->lport); 5295 } else { 5296 s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); 5297 s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src); 5298 } 5299 5300 s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); 5301 s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); 5302 5303 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, 5304 pkt_len, pkt_offsets); 5305 if (status) 5306 goto err_ice_add_adv_rule; 5307 5308 if (rinfo->tun_type != ICE_NON_TUN) { 5309 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, 5310 s_rule->pdata.lkup_tx_rx.hdr, 5311 pkt_offsets); 5312 if (status) 5313 goto err_ice_add_adv_rule; 5314 } 5315 5316 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5317 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules, 5318 NULL); 5319 if (status) 5320 goto err_ice_add_adv_rule; 5321 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw), 5322 sizeof(struct ice_adv_fltr_mgmt_list_entry), 5323 GFP_KERNEL); 5324 if (!adv_fltr) { 5325 status = -ENOMEM; 5326 goto err_ice_add_adv_rule; 5327 } 5328 5329 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups, 5330 lkups_cnt * sizeof(*lkups), GFP_KERNEL); 5331 if (!adv_fltr->lkups) { 5332 status = -ENOMEM; 5333 goto err_ice_add_adv_rule; 5334 } 5335 5336 adv_fltr->lkups_cnt = lkups_cnt; 5337 adv_fltr->rule_info = *rinfo; 5338 adv_fltr->rule_info.fltr_rule_id = 5339 le16_to_cpu(s_rule->pdata.lkup_tx_rx.index); 5340 sw = hw->switch_info; 5341 sw->recp_list[rid].adv_rule = true; 5342 rule_head = &sw->recp_list[rid].filt_rules; 5343 5344 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) 5345 adv_fltr->vsi_count = 1; 5346 5347 /* Add rule entry to book keeping list */ 5348 list_add(&adv_fltr->list_entry, rule_head); 5349 if (added_entry) { 5350 added_entry->rid = rid; 5351 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id; 5352 added_entry->vsi_handle = rinfo->sw_act.vsi_handle; 5353 } 5354 err_ice_add_adv_rule: 5355 if (status && adv_fltr) { 5356 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups); 5357 devm_kfree(ice_hw_to_dev(hw), adv_fltr); 5358 } 5359 5360 kfree(s_rule); 5361 5362 return status; 5363 } 5364 5365 /** 5366 * ice_replay_vsi_fltr - Replay filters for requested VSI 5367 * @hw: pointer to the hardware structure 5368 * @vsi_handle: driver VSI handle 5369 * @recp_id: Recipe ID for which rules need to be replayed 5370 * @list_head: list for which filters need to be replayed 5371 * 5372 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. 5373 * It is required to pass valid VSI handle. 5374 */ 5375 static int 5376 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, 5377 struct list_head *list_head) 5378 { 5379 struct ice_fltr_mgmt_list_entry *itr; 5380 int status = 0; 5381 u16 hw_vsi_id; 5382 5383 if (list_empty(list_head)) 5384 return status; 5385 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 5386 5387 list_for_each_entry(itr, list_head, list_entry) { 5388 struct ice_fltr_list_entry f_entry; 5389 5390 f_entry.fltr_info = itr->fltr_info; 5391 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && 5392 itr->fltr_info.vsi_handle == vsi_handle) { 5393 /* update the src in case it is VSI num */ 5394 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5395 f_entry.fltr_info.src = hw_vsi_id; 5396 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5397 if (status) 5398 goto end; 5399 continue; 5400 } 5401 if (!itr->vsi_list_info || 5402 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) 5403 continue; 5404 /* Clearing it so that the logic can add it back */ 5405 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); 5406 f_entry.fltr_info.vsi_handle = vsi_handle; 5407 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; 5408 /* update the src in case it is VSI num */ 5409 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) 5410 f_entry.fltr_info.src = hw_vsi_id; 5411 if (recp_id == ICE_SW_LKUP_VLAN) 5412 status = ice_add_vlan_internal(hw, &f_entry); 5413 else 5414 status = ice_add_rule_internal(hw, recp_id, &f_entry); 5415 if (status) 5416 goto end; 5417 } 5418 end: 5419 return status; 5420 } 5421 5422 /** 5423 * ice_adv_rem_update_vsi_list 5424 * @hw: pointer to the hardware structure 5425 * @vsi_handle: VSI handle of the VSI to remove 5426 * @fm_list: filter management entry for which the VSI list management needs to 5427 * be done 5428 */ 5429 static int 5430 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, 5431 struct ice_adv_fltr_mgmt_list_entry *fm_list) 5432 { 5433 struct ice_vsi_list_map_info *vsi_list_info; 5434 enum ice_sw_lkup_type lkup_type; 5435 u16 vsi_list_id; 5436 int status; 5437 5438 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST || 5439 fm_list->vsi_count == 0) 5440 return -EINVAL; 5441 5442 /* A rule with the VSI being removed does not exist */ 5443 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map)) 5444 return -ENOENT; 5445 5446 lkup_type = ICE_SW_LKUP_LAST; 5447 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id; 5448 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, 5449 ice_aqc_opc_update_sw_rules, 5450 lkup_type); 5451 if (status) 5452 return status; 5453 5454 fm_list->vsi_count--; 5455 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); 5456 vsi_list_info = fm_list->vsi_list_info; 5457 if (fm_list->vsi_count == 1) { 5458 struct ice_fltr_info tmp_fltr; 5459 u16 rem_vsi_handle; 5460 5461 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map, 5462 ICE_MAX_VSI); 5463 if (!ice_is_vsi_valid(hw, rem_vsi_handle)) 5464 return -EIO; 5465 5466 /* Make sure VSI list is empty before removing it below */ 5467 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, 5468 vsi_list_id, true, 5469 ice_aqc_opc_update_sw_rules, 5470 lkup_type); 5471 if (status) 5472 return status; 5473 5474 memset(&tmp_fltr, 0, sizeof(tmp_fltr)); 5475 tmp_fltr.flag = fm_list->rule_info.sw_act.flag; 5476 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id; 5477 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI; 5478 tmp_fltr.fltr_act = ICE_FWD_TO_VSI; 5479 tmp_fltr.fwd_id.hw_vsi_id = 5480 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5481 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id = 5482 ice_get_hw_vsi_num(hw, rem_vsi_handle); 5483 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle; 5484 5485 /* Update the previous switch rule of "MAC forward to VSI" to 5486 * "MAC fwd to VSI list" 5487 */ 5488 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); 5489 if (status) { 5490 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", 5491 tmp_fltr.fwd_id.hw_vsi_id, status); 5492 return status; 5493 } 5494 fm_list->vsi_list_info->ref_cnt--; 5495 5496 /* Remove the VSI list since it is no longer used */ 5497 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); 5498 if (status) { 5499 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", 5500 vsi_list_id, status); 5501 return status; 5502 } 5503 5504 list_del(&vsi_list_info->list_entry); 5505 devm_kfree(ice_hw_to_dev(hw), vsi_list_info); 5506 fm_list->vsi_list_info = NULL; 5507 } 5508 5509 return status; 5510 } 5511 5512 /** 5513 * ice_rem_adv_rule - removes existing advanced switch rule 5514 * @hw: pointer to the hardware structure 5515 * @lkups: information on the words that needs to be looked up. All words 5516 * together makes one recipe 5517 * @lkups_cnt: num of entries in the lkups array 5518 * @rinfo: Its the pointer to the rule information for the rule 5519 * 5520 * This function can be used to remove 1 rule at a time. The lkups is 5521 * used to describe all the words that forms the "lookup" portion of the 5522 * rule. These words can span multiple protocols. Callers to this function 5523 * need to pass in a list of protocol headers with lookup information along 5524 * and mask that determines which words are valid from the given protocol 5525 * header. rinfo describes other information related to this rule such as 5526 * forwarding IDs, priority of this rule, etc. 5527 */ 5528 static int 5529 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, 5530 u16 lkups_cnt, struct ice_adv_rule_info *rinfo) 5531 { 5532 struct ice_adv_fltr_mgmt_list_entry *list_elem; 5533 struct ice_prot_lkup_ext lkup_exts; 5534 bool remove_rule = false; 5535 struct mutex *rule_lock; /* Lock to protect filter rule list */ 5536 u16 i, rid, vsi_handle; 5537 int status = 0; 5538 5539 memset(&lkup_exts, 0, sizeof(lkup_exts)); 5540 for (i = 0; i < lkups_cnt; i++) { 5541 u16 count; 5542 5543 if (lkups[i].type >= ICE_PROTOCOL_LAST) 5544 return -EIO; 5545 5546 count = ice_fill_valid_words(&lkups[i], &lkup_exts); 5547 if (!count) 5548 return -EIO; 5549 } 5550 5551 /* Create any special protocol/offset pairs, such as looking at tunnel 5552 * bits by extracting metadata 5553 */ 5554 status = ice_add_special_words(rinfo, &lkup_exts); 5555 if (status) 5556 return status; 5557 5558 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); 5559 /* If did not find a recipe that match the existing criteria */ 5560 if (rid == ICE_MAX_NUM_RECIPES) 5561 return -EINVAL; 5562 5563 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock; 5564 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo); 5565 /* the rule is already removed */ 5566 if (!list_elem) 5567 return 0; 5568 mutex_lock(rule_lock); 5569 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) { 5570 remove_rule = true; 5571 } else if (list_elem->vsi_count > 1) { 5572 remove_rule = false; 5573 vsi_handle = rinfo->sw_act.vsi_handle; 5574 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5575 } else { 5576 vsi_handle = rinfo->sw_act.vsi_handle; 5577 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem); 5578 if (status) { 5579 mutex_unlock(rule_lock); 5580 return status; 5581 } 5582 if (list_elem->vsi_count == 0) 5583 remove_rule = true; 5584 } 5585 mutex_unlock(rule_lock); 5586 if (remove_rule) { 5587 struct ice_aqc_sw_rules_elem *s_rule; 5588 u16 rule_buf_sz; 5589 5590 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; 5591 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); 5592 if (!s_rule) 5593 return -ENOMEM; 5594 s_rule->pdata.lkup_tx_rx.act = 0; 5595 s_rule->pdata.lkup_tx_rx.index = 5596 cpu_to_le16(list_elem->rule_info.fltr_rule_id); 5597 s_rule->pdata.lkup_tx_rx.hdr_len = 0; 5598 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule, 5599 rule_buf_sz, 1, 5600 ice_aqc_opc_remove_sw_rules, NULL); 5601 if (!status || status == -ENOENT) { 5602 struct ice_switch_info *sw = hw->switch_info; 5603 5604 mutex_lock(rule_lock); 5605 list_del(&list_elem->list_entry); 5606 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); 5607 devm_kfree(ice_hw_to_dev(hw), list_elem); 5608 mutex_unlock(rule_lock); 5609 if (list_empty(&sw->recp_list[rid].filt_rules)) 5610 sw->recp_list[rid].adv_rule = false; 5611 } 5612 kfree(s_rule); 5613 } 5614 return status; 5615 } 5616 5617 /** 5618 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID 5619 * @hw: pointer to the hardware structure 5620 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID 5621 * 5622 * This function is used to remove 1 rule at a time. The removal is based on 5623 * the remove_entry parameter. This function will remove rule for a given 5624 * vsi_handle with a given rule_id which is passed as parameter in remove_entry 5625 */ 5626 int 5627 ice_rem_adv_rule_by_id(struct ice_hw *hw, 5628 struct ice_rule_query_data *remove_entry) 5629 { 5630 struct ice_adv_fltr_mgmt_list_entry *list_itr; 5631 struct list_head *list_head; 5632 struct ice_adv_rule_info rinfo; 5633 struct ice_switch_info *sw; 5634 5635 sw = hw->switch_info; 5636 if (!sw->recp_list[remove_entry->rid].recp_created) 5637 return -EINVAL; 5638 list_head = &sw->recp_list[remove_entry->rid].filt_rules; 5639 list_for_each_entry(list_itr, list_head, list_entry) { 5640 if (list_itr->rule_info.fltr_rule_id == 5641 remove_entry->rule_id) { 5642 rinfo = list_itr->rule_info; 5643 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle; 5644 return ice_rem_adv_rule(hw, list_itr->lkups, 5645 list_itr->lkups_cnt, &rinfo); 5646 } 5647 } 5648 /* either list is empty or unable to find rule */ 5649 return -ENOENT; 5650 } 5651 5652 /** 5653 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a 5654 * given VSI handle 5655 * @hw: pointer to the hardware structure 5656 * @vsi_handle: VSI handle for which we are supposed to remove all the rules. 5657 * 5658 * This function is used to remove all the rules for a given VSI and as soon 5659 * as removing a rule fails, it will return immediately with the error code, 5660 * else it will return success. 5661 */ 5662 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle) 5663 { 5664 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry; 5665 struct ice_vsi_list_map_info *map_info; 5666 struct ice_adv_rule_info rinfo; 5667 struct list_head *list_head; 5668 struct ice_switch_info *sw; 5669 int status; 5670 u8 rid; 5671 5672 sw = hw->switch_info; 5673 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) { 5674 if (!sw->recp_list[rid].recp_created) 5675 continue; 5676 if (!sw->recp_list[rid].adv_rule) 5677 continue; 5678 5679 list_head = &sw->recp_list[rid].filt_rules; 5680 list_for_each_entry_safe(list_itr, tmp_entry, list_head, 5681 list_entry) { 5682 rinfo = list_itr->rule_info; 5683 5684 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { 5685 map_info = list_itr->vsi_list_info; 5686 if (!map_info) 5687 continue; 5688 5689 if (!test_bit(vsi_handle, map_info->vsi_map)) 5690 continue; 5691 } else if (rinfo.sw_act.vsi_handle != vsi_handle) { 5692 continue; 5693 } 5694 5695 rinfo.sw_act.vsi_handle = vsi_handle; 5696 status = ice_rem_adv_rule(hw, list_itr->lkups, 5697 list_itr->lkups_cnt, &rinfo); 5698 if (status) 5699 return status; 5700 } 5701 } 5702 return 0; 5703 } 5704 5705 /** 5706 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI 5707 * @hw: pointer to the hardware structure 5708 * @vsi_handle: driver VSI handle 5709 * @list_head: list for which filters need to be replayed 5710 * 5711 * Replay the advanced rule for the given VSI. 5712 */ 5713 static int 5714 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle, 5715 struct list_head *list_head) 5716 { 5717 struct ice_rule_query_data added_entry = { 0 }; 5718 struct ice_adv_fltr_mgmt_list_entry *adv_fltr; 5719 int status = 0; 5720 5721 if (list_empty(list_head)) 5722 return status; 5723 list_for_each_entry(adv_fltr, list_head, list_entry) { 5724 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info; 5725 u16 lk_cnt = adv_fltr->lkups_cnt; 5726 5727 if (vsi_handle != rinfo->sw_act.vsi_handle) 5728 continue; 5729 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo, 5730 &added_entry); 5731 if (status) 5732 break; 5733 } 5734 return status; 5735 } 5736 5737 /** 5738 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists 5739 * @hw: pointer to the hardware structure 5740 * @vsi_handle: driver VSI handle 5741 * 5742 * Replays filters for requested VSI via vsi_handle. 5743 */ 5744 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle) 5745 { 5746 struct ice_switch_info *sw = hw->switch_info; 5747 int status; 5748 u8 i; 5749 5750 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5751 struct list_head *head; 5752 5753 head = &sw->recp_list[i].filt_replay_rules; 5754 if (!sw->recp_list[i].adv_rule) 5755 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); 5756 else 5757 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head); 5758 if (status) 5759 return status; 5760 } 5761 return status; 5762 } 5763 5764 /** 5765 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules 5766 * @hw: pointer to the HW struct 5767 * 5768 * Deletes the filter replay rules. 5769 */ 5770 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) 5771 { 5772 struct ice_switch_info *sw = hw->switch_info; 5773 u8 i; 5774 5775 if (!sw) 5776 return; 5777 5778 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 5779 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { 5780 struct list_head *l_head; 5781 5782 l_head = &sw->recp_list[i].filt_replay_rules; 5783 if (!sw->recp_list[i].adv_rule) 5784 ice_rem_sw_rule_info(hw, l_head); 5785 else 5786 ice_rem_adv_rule_info(hw, l_head); 5787 } 5788 } 5789 } 5790