1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2020, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /*$FreeBSD$*/ 32 33 #include "ice_common.h" 34 #include "ice_flow.h" 35 36 /* Size of known protocol header fields */ 37 #define ICE_FLOW_FLD_SZ_ETH_TYPE 2 38 #define ICE_FLOW_FLD_SZ_VLAN 2 39 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 40 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 41 #define ICE_FLOW_FLD_SZ_IP_DSCP 1 42 #define ICE_FLOW_FLD_SZ_IP_TTL 1 43 #define ICE_FLOW_FLD_SZ_IP_PROT 1 44 #define ICE_FLOW_FLD_SZ_PORT 2 45 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 46 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 47 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1 48 #define ICE_FLOW_FLD_SZ_ARP_OPER 2 49 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4 50 51 /* Describe properties of a protocol header field */ 52 struct ice_flow_field_info { 53 enum ice_flow_seg_hdr hdr; 54 s16 off; /* Offset from start of a protocol header, in bits */ 55 u16 size; /* Size of fields in bits */ 56 }; 57 58 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ 59 .hdr = _hdr, \ 60 .off = (_offset_bytes) * BITS_PER_BYTE, \ 61 .size = (_size_bytes) * BITS_PER_BYTE, \ 62 } 63 64 /* Table containing properties of supported protocol header fields */ 65 static const 66 struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { 67 /* Ether */ 68 /* ICE_FLOW_FIELD_IDX_ETH_DA */ 69 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), 70 /* ICE_FLOW_FIELD_IDX_ETH_SA */ 71 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), 72 /* ICE_FLOW_FIELD_IDX_S_VLAN */ 73 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN), 74 /* ICE_FLOW_FIELD_IDX_C_VLAN */ 75 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN), 76 /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ 77 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE), 78 /* IPv4 / IPv6 */ 79 /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ 80 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, ICE_FLOW_FLD_SZ_IP_DSCP), 81 /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ 82 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP), 83 /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ 84 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, ICE_FLOW_FLD_SZ_IP_TTL), 85 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ 86 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT), 87 /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ 88 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 7, ICE_FLOW_FLD_SZ_IP_TTL), 89 /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ 90 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 6, ICE_FLOW_FLD_SZ_IP_PROT), 91 /* ICE_FLOW_FIELD_IDX_IPV4_SA */ 92 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR), 93 /* ICE_FLOW_FIELD_IDX_IPV4_DA */ 94 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR), 95 /* ICE_FLOW_FIELD_IDX_IPV6_SA */ 96 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), 97 /* ICE_FLOW_FIELD_IDX_IPV6_DA */ 98 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), 99 /* Transport */ 100 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ 101 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT), 102 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ 103 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT), 104 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ 105 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT), 106 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ 107 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT), 108 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ 109 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT), 110 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ 111 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT), 112 /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ 113 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), 114 /* ARP */ 115 /* ICE_FLOW_FIELD_IDX_ARP_SIP */ 116 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR), 117 /* ICE_FLOW_FIELD_IDX_ARP_DIP */ 118 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR), 119 /* ICE_FLOW_FIELD_IDX_ARP_SHA */ 120 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), 121 /* ICE_FLOW_FIELD_IDX_ARP_DHA */ 122 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), 123 /* ICE_FLOW_FIELD_IDX_ARP_OP */ 124 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER), 125 /* ICMP */ 126 /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ 127 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE), 128 /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ 129 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE), 130 /* GRE */ 131 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ 132 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID), 133 }; 134 135 /* Bitmaps indicating relevant packet types for a particular protocol header 136 * 137 * Packet types for packets with an Outer/First/Single MAC header 138 */ 139 static const u32 ice_ptypes_mac_ofos[] = { 140 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 141 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 142 0x00000000, 0x00000000, 0x00000000, 0x00000000, 143 0x00000000, 0x00000000, 0x00000000, 0x00000000, 144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 145 0x00000000, 0x00000000, 0x00000000, 0x00000000, 146 0x00000000, 0x00000000, 0x00000000, 0x00000000, 147 0x00000000, 0x00000000, 0x00000000, 0x00000000, 148 }; 149 150 /* Packet types for packets with an Innermost/Last MAC VLAN header */ 151 static const u32 ice_ptypes_macvlan_il[] = { 152 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 153 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 154 0x00000000, 0x00000000, 0x00000000, 0x00000000, 155 0x00000000, 0x00000000, 0x00000000, 0x00000000, 156 0x00000000, 0x00000000, 0x00000000, 0x00000000, 157 0x00000000, 0x00000000, 0x00000000, 0x00000000, 158 0x00000000, 0x00000000, 0x00000000, 0x00000000, 159 0x00000000, 0x00000000, 0x00000000, 0x00000000, 160 }; 161 162 /* Packet types for packets with an Outer/First/Single IPv4 header */ 163 static const u32 ice_ptypes_ipv4_ofos[] = { 164 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 165 0x00000000, 0x00000000, 0x00000000, 0x00000000, 166 0x00000000, 0x00000000, 0x00000000, 0x00000000, 167 0x00000000, 0x00000000, 0x00000000, 0x00000000, 168 0x00000000, 0x00000000, 0x00000000, 0x00000000, 169 0x00000000, 0x00000000, 0x00000000, 0x00000000, 170 0x00000000, 0x00000000, 0x00000000, 0x00000000, 171 0x00000000, 0x00000000, 0x00000000, 0x00000000, 172 }; 173 174 /* Packet types for packets with an Innermost/Last IPv4 header */ 175 static const u32 ice_ptypes_ipv4_il[] = { 176 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 177 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 178 0x00000000, 0x00000000, 0x00000000, 0x00000000, 179 0x00000000, 0x00000000, 0x00000000, 0x00000000, 180 0x00000000, 0x00000000, 0x00000000, 0x00000000, 181 0x00000000, 0x00000000, 0x00000000, 0x00000000, 182 0x00000000, 0x00000000, 0x00000000, 0x00000000, 183 0x00000000, 0x00000000, 0x00000000, 0x00000000, 184 }; 185 186 /* Packet types for packets with an Outer/First/Single IPv6 header */ 187 static const u32 ice_ptypes_ipv6_ofos[] = { 188 0x00000000, 0x00000000, 0x77000000, 0x10002000, 189 0x00000000, 0x00000000, 0x00000000, 0x00000000, 190 0x00000000, 0x00000000, 0x00000000, 0x00000000, 191 0x00000000, 0x00000000, 0x00000000, 0x00000000, 192 0x00000000, 0x00000000, 0x00000000, 0x00000000, 193 0x00000000, 0x00000000, 0x00000000, 0x00000000, 194 0x00000000, 0x00000000, 0x00000000, 0x00000000, 195 0x00000000, 0x00000000, 0x00000000, 0x00000000, 196 }; 197 198 /* Packet types for packets with an Innermost/Last IPv6 header */ 199 static const u32 ice_ptypes_ipv6_il[] = { 200 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 201 0x00000770, 0x00000000, 0x00000000, 0x00000000, 202 0x00000000, 0x00000000, 0x00000000, 0x00000000, 203 0x00000000, 0x00000000, 0x00000000, 0x00000000, 204 0x00000000, 0x00000000, 0x00000000, 0x00000000, 205 0x00000000, 0x00000000, 0x00000000, 0x00000000, 206 0x00000000, 0x00000000, 0x00000000, 0x00000000, 207 0x00000000, 0x00000000, 0x00000000, 0x00000000, 208 }; 209 210 /* Packet types for packets with an Outermost/First ARP header */ 211 static const u32 ice_ptypes_arp_of[] = { 212 0x00000800, 0x00000000, 0x00000000, 0x00000000, 213 0x00000000, 0x00000000, 0x00000000, 0x00000000, 214 0x00000000, 0x00000000, 0x00000000, 0x00000000, 215 0x00000000, 0x00000000, 0x00000000, 0x00000000, 216 0x00000000, 0x00000000, 0x00000000, 0x00000000, 217 0x00000000, 0x00000000, 0x00000000, 0x00000000, 218 0x00000000, 0x00000000, 0x00000000, 0x00000000, 219 0x00000000, 0x00000000, 0x00000000, 0x00000000, 220 }; 221 222 /* UDP Packet types for non-tunneled packets or tunneled 223 * packets with inner UDP. 224 */ 225 static const u32 ice_ptypes_udp_il[] = { 226 0x81000000, 0x20204040, 0x04000010, 0x80810102, 227 0x00000040, 0x00000000, 0x00000000, 0x00000000, 228 0x00000000, 0x00000000, 0x00000000, 0x00000000, 229 0x00000000, 0x00000000, 0x00000000, 0x00000000, 230 0x00000000, 0x00000000, 0x00000000, 0x00000000, 231 0x00000000, 0x00000000, 0x00000000, 0x00000000, 232 0x00000000, 0x00000000, 0x00000000, 0x00000000, 233 0x00000000, 0x00000000, 0x00000000, 0x00000000, 234 }; 235 236 /* Packet types for packets with an Innermost/Last TCP header */ 237 static const u32 ice_ptypes_tcp_il[] = { 238 0x04000000, 0x80810102, 0x10000040, 0x02040408, 239 0x00000102, 0x00000000, 0x00000000, 0x00000000, 240 0x00000000, 0x00000000, 0x00000000, 0x00000000, 241 0x00000000, 0x00000000, 0x00000000, 0x00000000, 242 0x00000000, 0x00000000, 0x00000000, 0x00000000, 243 0x00000000, 0x00000000, 0x00000000, 0x00000000, 244 0x00000000, 0x00000000, 0x00000000, 0x00000000, 245 0x00000000, 0x00000000, 0x00000000, 0x00000000, 246 }; 247 248 /* Packet types for packets with an Innermost/Last SCTP header */ 249 static const u32 ice_ptypes_sctp_il[] = { 250 0x08000000, 0x01020204, 0x20000081, 0x04080810, 251 0x00000204, 0x00000000, 0x00000000, 0x00000000, 252 0x00000000, 0x00000000, 0x00000000, 0x00000000, 253 0x00000000, 0x00000000, 0x00000000, 0x00000000, 254 0x00000000, 0x00000000, 0x00000000, 0x00000000, 255 0x00000000, 0x00000000, 0x00000000, 0x00000000, 256 0x00000000, 0x00000000, 0x00000000, 0x00000000, 257 0x00000000, 0x00000000, 0x00000000, 0x00000000, 258 }; 259 260 /* Packet types for packets with an Outermost/First ICMP header */ 261 static const u32 ice_ptypes_icmp_of[] = { 262 0x10000000, 0x00000000, 0x00000000, 0x00000000, 263 0x00000000, 0x00000000, 0x00000000, 0x00000000, 264 0x00000000, 0x00000000, 0x00000000, 0x00000000, 265 0x00000000, 0x00000000, 0x00000000, 0x00000000, 266 0x00000000, 0x00000000, 0x00000000, 0x00000000, 267 0x00000000, 0x00000000, 0x00000000, 0x00000000, 268 0x00000000, 0x00000000, 0x00000000, 0x00000000, 269 0x00000000, 0x00000000, 0x00000000, 0x00000000, 270 }; 271 272 /* Packet types for packets with an Innermost/Last ICMP header */ 273 static const u32 ice_ptypes_icmp_il[] = { 274 0x00000000, 0x02040408, 0x40000102, 0x08101020, 275 0x00000408, 0x00000000, 0x00000000, 0x00000000, 276 0x00000000, 0x00000000, 0x00000000, 0x00000000, 277 0x00000000, 0x00000000, 0x00000000, 0x00000000, 278 0x00000000, 0x00000000, 0x00000000, 0x00000000, 279 0x00000000, 0x00000000, 0x00000000, 0x00000000, 280 0x00000000, 0x00000000, 0x00000000, 0x00000000, 281 0x00000000, 0x00000000, 0x00000000, 0x00000000, 282 }; 283 284 /* Packet types for packets with an Outermost/First GRE header */ 285 static const u32 ice_ptypes_gre_of[] = { 286 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 287 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 288 0x00000000, 0x00000000, 0x00000000, 0x00000000, 289 0x00000000, 0x00000000, 0x00000000, 0x00000000, 290 0x00000000, 0x00000000, 0x00000000, 0x00000000, 291 0x00000000, 0x00000000, 0x00000000, 0x00000000, 292 0x00000000, 0x00000000, 0x00000000, 0x00000000, 293 0x00000000, 0x00000000, 0x00000000, 0x00000000, 294 }; 295 296 /* Packet types for packets with an Innermost/Last MAC header */ 297 static const u32 ice_ptypes_mac_il[] = { 298 0x00000000, 0x00000000, 0x00000000, 0x00000000, 299 0x00000000, 0x00000000, 0x00000000, 0x00000000, 300 0x00000000, 0x00000000, 0x00000000, 0x00000000, 301 0x00000000, 0x00000000, 0x00000000, 0x00000000, 302 0x00000000, 0x00000000, 0x00000000, 0x00000000, 303 0x00000000, 0x00000000, 0x00000000, 0x00000000, 304 0x00000000, 0x00000000, 0x00000000, 0x00000000, 305 0x00000000, 0x00000000, 0x00000000, 0x00000000, 306 }; 307 308 /* Manage parameters and info. used during the creation of a flow profile */ 309 struct ice_flow_prof_params { 310 enum ice_block blk; 311 u16 entry_length; /* # of bytes formatted entry will require */ 312 u8 es_cnt; 313 struct ice_flow_prof *prof; 314 315 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 316 * This will give us the direction flags. 317 */ 318 struct ice_fv_word es[ICE_MAX_FV_WORDS]; 319 ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX); 320 }; 321 322 #define ICE_FLOW_SEG_HDRS_L3_MASK \ 323 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \ 324 ICE_FLOW_SEG_HDR_ARP) 325 #define ICE_FLOW_SEG_HDRS_L4_MASK \ 326 (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ 327 ICE_FLOW_SEG_HDR_SCTP) 328 329 /** 330 * ice_flow_val_hdrs - validates packet segments for valid protocol headers 331 * @segs: array of one or more packet segments that describe the flow 332 * @segs_cnt: number of packet segments provided 333 */ 334 static enum ice_status 335 ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) 336 { 337 u8 i; 338 339 for (i = 0; i < segs_cnt; i++) { 340 /* Multiple L3 headers */ 341 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && 342 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) 343 return ICE_ERR_PARAM; 344 345 /* Multiple L4 headers */ 346 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && 347 !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) 348 return ICE_ERR_PARAM; 349 } 350 351 return ICE_SUCCESS; 352 } 353 354 /* Sizes of fixed known protocol headers without header options */ 355 #define ICE_FLOW_PROT_HDR_SZ_MAC 14 356 #define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2) 357 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20 358 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40 359 #define ICE_FLOW_PROT_HDR_SZ_ARP 28 360 #define ICE_FLOW_PROT_HDR_SZ_ICMP 8 361 #define ICE_FLOW_PROT_HDR_SZ_TCP 20 362 #define ICE_FLOW_PROT_HDR_SZ_UDP 8 363 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12 364 365 /** 366 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers 367 * @params: information about the flow to be processed 368 * @seg: index of packet segment whose header size is to be determined 369 */ 370 static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg) 371 { 372 u16 sz; 373 374 /* L2 headers */ 375 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? 376 ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC; 377 378 /* L3 headers */ 379 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) 380 sz += ICE_FLOW_PROT_HDR_SZ_IPV4; 381 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) 382 sz += ICE_FLOW_PROT_HDR_SZ_IPV6; 383 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) 384 sz += ICE_FLOW_PROT_HDR_SZ_ARP; 385 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) 386 /* A L3 header is required if L4 is specified */ 387 return 0; 388 389 /* L4 headers */ 390 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP) 391 sz += ICE_FLOW_PROT_HDR_SZ_ICMP; 392 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP) 393 sz += ICE_FLOW_PROT_HDR_SZ_TCP; 394 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP) 395 sz += ICE_FLOW_PROT_HDR_SZ_UDP; 396 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP) 397 sz += ICE_FLOW_PROT_HDR_SZ_SCTP; 398 399 return sz; 400 } 401 402 /** 403 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments 404 * @params: information about the flow to be processed 405 * 406 * This function identifies the packet types associated with the protocol 407 * headers being present in packet segments of the specified flow profile. 408 */ 409 static enum ice_status 410 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) 411 { 412 struct ice_flow_prof *prof; 413 u8 i; 414 415 ice_memset(params->ptypes, 0xff, sizeof(params->ptypes), 416 ICE_NONDMA_MEM); 417 418 prof = params->prof; 419 420 for (i = 0; i < params->prof->segs_cnt; i++) { 421 const ice_bitmap_t *src; 422 u32 hdrs; 423 424 hdrs = prof->segs[i].hdrs; 425 426 if (hdrs & ICE_FLOW_SEG_HDR_ETH) { 427 src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos : 428 (const ice_bitmap_t *)ice_ptypes_mac_il; 429 ice_and_bitmap(params->ptypes, params->ptypes, src, 430 ICE_FLOW_PTYPE_MAX); 431 } 432 433 if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { 434 src = (const ice_bitmap_t *)ice_ptypes_macvlan_il; 435 ice_and_bitmap(params->ptypes, params->ptypes, src, 436 ICE_FLOW_PTYPE_MAX); 437 } 438 439 if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { 440 ice_and_bitmap(params->ptypes, params->ptypes, 441 (const ice_bitmap_t *)ice_ptypes_arp_of, 442 ICE_FLOW_PTYPE_MAX); 443 } 444 445 if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { 446 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos : 447 (const ice_bitmap_t *)ice_ptypes_ipv4_il; 448 ice_and_bitmap(params->ptypes, params->ptypes, src, 449 ICE_FLOW_PTYPE_MAX); 450 } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { 451 src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos : 452 (const ice_bitmap_t *)ice_ptypes_ipv6_il; 453 ice_and_bitmap(params->ptypes, params->ptypes, src, 454 ICE_FLOW_PTYPE_MAX); 455 } 456 457 if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { 458 src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of : 459 (const ice_bitmap_t *)ice_ptypes_icmp_il; 460 ice_and_bitmap(params->ptypes, params->ptypes, src, 461 ICE_FLOW_PTYPE_MAX); 462 } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) { 463 src = (const ice_bitmap_t *)ice_ptypes_udp_il; 464 ice_and_bitmap(params->ptypes, params->ptypes, src, 465 ICE_FLOW_PTYPE_MAX); 466 } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { 467 ice_and_bitmap(params->ptypes, params->ptypes, 468 (const ice_bitmap_t *)ice_ptypes_tcp_il, 469 ICE_FLOW_PTYPE_MAX); 470 } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { 471 src = (const ice_bitmap_t *)ice_ptypes_sctp_il; 472 ice_and_bitmap(params->ptypes, params->ptypes, src, 473 ICE_FLOW_PTYPE_MAX); 474 } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { 475 if (!i) { 476 src = (const ice_bitmap_t *)ice_ptypes_gre_of; 477 ice_and_bitmap(params->ptypes, params->ptypes, 478 src, ICE_FLOW_PTYPE_MAX); 479 } 480 } 481 } 482 483 return ICE_SUCCESS; 484 } 485 486 /** 487 * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags 488 * @hw: pointer to the HW struct 489 * @params: information about the flow to be processed 490 * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata. 491 * 492 * This function will allocate an extraction sequence entries for a DWORD size 493 * chunk of the packet flags. 494 */ 495 static enum ice_status 496 ice_flow_xtract_pkt_flags(struct ice_hw *hw, 497 struct ice_flow_prof_params *params, 498 enum ice_flex_mdid_pkt_flags flags) 499 { 500 u8 fv_words = hw->blk[params->blk].es.fvw; 501 u8 idx; 502 503 /* Make sure the number of extraction sequence entries required does not 504 * exceed the block's capacity. 505 */ 506 if (params->es_cnt >= fv_words) 507 return ICE_ERR_MAX_LIMIT; 508 509 /* some blocks require a reversed field vector layout */ 510 if (hw->blk[params->blk].es.reverse) 511 idx = fv_words - params->es_cnt - 1; 512 else 513 idx = params->es_cnt; 514 515 params->es[idx].prot_id = ICE_PROT_META_ID; 516 params->es[idx].off = flags; 517 params->es_cnt++; 518 519 return ICE_SUCCESS; 520 } 521 522 /** 523 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field 524 * @hw: pointer to the HW struct 525 * @params: information about the flow to be processed 526 * @seg: packet segment index of the field to be extracted 527 * @fld: ID of field to be extracted 528 * 529 * This function determines the protocol ID, offset, and size of the given 530 * field. It then allocates one or more extraction sequence entries for the 531 * given field, and fill the entries with protocol ID and offset information. 532 */ 533 static enum ice_status 534 ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, 535 u8 seg, enum ice_flow_field fld) 536 { 537 enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; 538 enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; 539 u8 fv_words = hw->blk[params->blk].es.fvw; 540 struct ice_flow_fld_info *flds; 541 u16 cnt, ese_bits, i; 542 s16 adj = 0; 543 u16 off; 544 545 flds = params->prof->segs[seg].fields; 546 547 switch (fld) { 548 case ICE_FLOW_FIELD_IDX_ETH_DA: 549 case ICE_FLOW_FIELD_IDX_ETH_SA: 550 case ICE_FLOW_FIELD_IDX_S_VLAN: 551 case ICE_FLOW_FIELD_IDX_C_VLAN: 552 prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; 553 break; 554 case ICE_FLOW_FIELD_IDX_ETH_TYPE: 555 prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; 556 break; 557 case ICE_FLOW_FIELD_IDX_IPV4_DSCP: 558 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 559 break; 560 case ICE_FLOW_FIELD_IDX_IPV6_DSCP: 561 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 562 break; 563 case ICE_FLOW_FIELD_IDX_IPV4_TTL: 564 case ICE_FLOW_FIELD_IDX_IPV4_PROT: 565 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 566 567 /* TTL and PROT share the same extraction seq. entry. 568 * Each is considered a sibling to the other in terms of sharing 569 * the same extraction sequence entry. 570 */ 571 if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) 572 sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; 573 else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT) 574 sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; 575 break; 576 case ICE_FLOW_FIELD_IDX_IPV6_TTL: 577 case ICE_FLOW_FIELD_IDX_IPV6_PROT: 578 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 579 580 /* TTL and PROT share the same extraction seq. entry. 581 * Each is considered a sibling to the other in terms of sharing 582 * the same extraction sequence entry. 583 */ 584 if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) 585 sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; 586 else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT) 587 sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; 588 break; 589 case ICE_FLOW_FIELD_IDX_IPV4_SA: 590 case ICE_FLOW_FIELD_IDX_IPV4_DA: 591 prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; 592 break; 593 case ICE_FLOW_FIELD_IDX_IPV6_SA: 594 case ICE_FLOW_FIELD_IDX_IPV6_DA: 595 prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; 596 break; 597 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: 598 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: 599 case ICE_FLOW_FIELD_IDX_TCP_FLAGS: 600 prot_id = ICE_PROT_TCP_IL; 601 break; 602 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: 603 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: 604 prot_id = ICE_PROT_UDP_IL_OR_S; 605 break; 606 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: 607 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: 608 prot_id = ICE_PROT_SCTP_IL; 609 break; 610 case ICE_FLOW_FIELD_IDX_ARP_SIP: 611 case ICE_FLOW_FIELD_IDX_ARP_DIP: 612 case ICE_FLOW_FIELD_IDX_ARP_SHA: 613 case ICE_FLOW_FIELD_IDX_ARP_DHA: 614 case ICE_FLOW_FIELD_IDX_ARP_OP: 615 prot_id = ICE_PROT_ARP_OF; 616 break; 617 case ICE_FLOW_FIELD_IDX_ICMP_TYPE: 618 case ICE_FLOW_FIELD_IDX_ICMP_CODE: 619 /* ICMP type and code share the same extraction seq. entry */ 620 prot_id = (params->prof->segs[seg].hdrs & 621 ICE_FLOW_SEG_HDR_IPV4) ? 622 ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; 623 sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? 624 ICE_FLOW_FIELD_IDX_ICMP_CODE : 625 ICE_FLOW_FIELD_IDX_ICMP_TYPE; 626 break; 627 case ICE_FLOW_FIELD_IDX_GRE_KEYID: 628 prot_id = ICE_PROT_GRE_OF; 629 break; 630 default: 631 return ICE_ERR_NOT_IMPL; 632 } 633 634 /* Each extraction sequence entry is a word in size, and extracts a 635 * word-aligned offset from a protocol header. 636 */ 637 ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; 638 639 flds[fld].xtrct.prot_id = prot_id; 640 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * 641 ICE_FLOW_FV_EXTRACT_SZ; 642 flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits); 643 flds[fld].xtrct.idx = params->es_cnt; 644 645 /* Adjust the next field-entry index after accommodating the number of 646 * entries this field consumes 647 */ 648 cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp + 649 ice_flds_info[fld].size, ese_bits); 650 651 /* Fill in the extraction sequence entries needed for this field */ 652 off = flds[fld].xtrct.off; 653 for (i = 0; i < cnt; i++) { 654 /* Only consume an extraction sequence entry if there is no 655 * sibling field associated with this field or the sibling entry 656 * already extracts the word shared with this field. 657 */ 658 if (sib == ICE_FLOW_FIELD_IDX_MAX || 659 flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || 660 flds[sib].xtrct.off != off) { 661 u8 idx; 662 663 /* Make sure the number of extraction sequence required 664 * does not exceed the block's capability 665 */ 666 if (params->es_cnt >= fv_words) 667 return ICE_ERR_MAX_LIMIT; 668 669 /* some blocks require a reversed field vector layout */ 670 if (hw->blk[params->blk].es.reverse) 671 idx = fv_words - params->es_cnt - 1; 672 else 673 idx = params->es_cnt; 674 675 params->es[idx].prot_id = prot_id; 676 params->es[idx].off = off; 677 params->es_cnt++; 678 } 679 680 off += ICE_FLOW_FV_EXTRACT_SZ; 681 } 682 683 return ICE_SUCCESS; 684 } 685 686 /** 687 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes 688 * @hw: pointer to the HW struct 689 * @params: information about the flow to be processed 690 * @seg: index of packet segment whose raw fields are to be be extracted 691 */ 692 static enum ice_status 693 ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params, 694 u8 seg) 695 { 696 u16 fv_words; 697 u16 hdrs_sz; 698 u8 i; 699 700 if (!params->prof->segs[seg].raws_cnt) 701 return ICE_SUCCESS; 702 703 if (params->prof->segs[seg].raws_cnt > 704 ARRAY_SIZE(params->prof->segs[seg].raws)) 705 return ICE_ERR_MAX_LIMIT; 706 707 /* Offsets within the segment headers are not supported */ 708 hdrs_sz = ice_flow_calc_seg_sz(params, seg); 709 if (!hdrs_sz) 710 return ICE_ERR_PARAM; 711 712 fv_words = hw->blk[params->blk].es.fvw; 713 714 for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) { 715 struct ice_flow_seg_fld_raw *raw; 716 u16 off, cnt, j; 717 718 raw = ¶ms->prof->segs[seg].raws[i]; 719 720 /* Storing extraction information */ 721 raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S; 722 raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) * 723 ICE_FLOW_FV_EXTRACT_SZ; 724 raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) * 725 BITS_PER_BYTE; 726 raw->info.xtrct.idx = params->es_cnt; 727 728 /* Determine the number of field vector entries this raw field 729 * consumes. 730 */ 731 cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp + 732 (raw->info.src.last * BITS_PER_BYTE), 733 (ICE_FLOW_FV_EXTRACT_SZ * 734 BITS_PER_BYTE)); 735 off = raw->info.xtrct.off; 736 for (j = 0; j < cnt; j++) { 737 u16 idx; 738 739 /* Make sure the number of extraction sequence required 740 * does not exceed the block's capability 741 */ 742 if (params->es_cnt >= hw->blk[params->blk].es.count || 743 params->es_cnt >= ICE_MAX_FV_WORDS) 744 return ICE_ERR_MAX_LIMIT; 745 746 /* some blocks require a reversed field vector layout */ 747 if (hw->blk[params->blk].es.reverse) 748 idx = fv_words - params->es_cnt - 1; 749 else 750 idx = params->es_cnt; 751 752 params->es[idx].prot_id = raw->info.xtrct.prot_id; 753 params->es[idx].off = off; 754 params->es_cnt++; 755 off += ICE_FLOW_FV_EXTRACT_SZ; 756 } 757 } 758 759 return ICE_SUCCESS; 760 } 761 762 /** 763 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments 764 * @hw: pointer to the HW struct 765 * @params: information about the flow to be processed 766 * 767 * This function iterates through all matched fields in the given segments, and 768 * creates an extraction sequence for the fields. 769 */ 770 static enum ice_status 771 ice_flow_create_xtrct_seq(struct ice_hw *hw, 772 struct ice_flow_prof_params *params) 773 { 774 enum ice_status status = ICE_SUCCESS; 775 u8 i; 776 777 /* For ACL, we also need to extract the direction bit (Rx,Tx) data from 778 * packet flags 779 */ 780 if (params->blk == ICE_BLK_ACL) { 781 status = ice_flow_xtract_pkt_flags(hw, params, 782 ICE_RX_MDID_PKT_FLAGS_15_0); 783 if (status) 784 return status; 785 } 786 787 for (i = 0; i < params->prof->segs_cnt; i++) { 788 u64 match = params->prof->segs[i].match; 789 enum ice_flow_field j; 790 791 for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) { 792 const u64 bit = BIT_ULL(j); 793 794 if (match & bit) { 795 status = ice_flow_xtract_fld(hw, params, i, j); 796 if (status) 797 return status; 798 match &= ~bit; 799 } 800 } 801 802 /* Process raw matching bytes */ 803 status = ice_flow_xtract_raws(hw, params, i); 804 if (status) 805 return status; 806 } 807 808 return status; 809 } 810 811 /** 812 * ice_flow_proc_segs - process all packet segments associated with a profile 813 * @hw: pointer to the HW struct 814 * @params: information about the flow to be processed 815 */ 816 static enum ice_status 817 ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) 818 { 819 enum ice_status status; 820 821 status = ice_flow_proc_seg_hdrs(params); 822 if (status) 823 return status; 824 825 status = ice_flow_create_xtrct_seq(hw, params); 826 if (status) 827 return status; 828 829 switch (params->blk) { 830 case ICE_BLK_RSS: 831 /* Only header information is provided for RSS configuration. 832 * No further processing is needed. 833 */ 834 status = ICE_SUCCESS; 835 break; 836 case ICE_BLK_FD: 837 status = ICE_SUCCESS; 838 break; 839 case ICE_BLK_SW: 840 default: 841 return ICE_ERR_NOT_IMPL; 842 } 843 844 return status; 845 } 846 847 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 848 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 849 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 850 851 /** 852 * ice_flow_find_prof_conds - Find a profile matching headers and conditions 853 * @hw: pointer to the HW struct 854 * @blk: classification stage 855 * @dir: flow direction 856 * @segs: array of one or more packet segments that describe the flow 857 * @segs_cnt: number of packet segments provided 858 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) 859 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) 860 */ 861 static struct ice_flow_prof * 862 ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, 863 enum ice_flow_dir dir, struct ice_flow_seg_info *segs, 864 u8 segs_cnt, u16 vsi_handle, u32 conds) 865 { 866 struct ice_flow_prof *p, *prof = NULL; 867 868 ice_acquire_lock(&hw->fl_profs_locks[blk]); 869 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { 870 if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && 871 segs_cnt && segs_cnt == p->segs_cnt) { 872 u8 i; 873 874 /* Check for profile-VSI association if specified */ 875 if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && 876 ice_is_vsi_valid(hw, vsi_handle) && 877 !ice_is_bit_set(p->vsis, vsi_handle)) 878 continue; 879 880 /* Protocol headers must be checked. Matched fields are 881 * checked if specified. 882 */ 883 for (i = 0; i < segs_cnt; i++) 884 if (segs[i].hdrs != p->segs[i].hdrs || 885 ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && 886 segs[i].match != p->segs[i].match)) 887 break; 888 889 /* A match is found if all segments are matched */ 890 if (i == segs_cnt) { 891 prof = p; 892 break; 893 } 894 } 895 } 896 ice_release_lock(&hw->fl_profs_locks[blk]); 897 898 return prof; 899 } 900 901 /** 902 * ice_flow_find_prof - Look up a profile matching headers and matched fields 903 * @hw: pointer to the HW struct 904 * @blk: classification stage 905 * @dir: flow direction 906 * @segs: array of one or more packet segments that describe the flow 907 * @segs_cnt: number of packet segments provided 908 */ 909 u64 910 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 911 struct ice_flow_seg_info *segs, u8 segs_cnt) 912 { 913 struct ice_flow_prof *p; 914 915 p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt, 916 ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS); 917 918 return p ? p->id : ICE_FLOW_PROF_ID_INVAL; 919 } 920 921 /** 922 * ice_flow_find_prof_id - Look up a profile with given profile ID 923 * @hw: pointer to the HW struct 924 * @blk: classification stage 925 * @prof_id: unique ID to identify this flow profile 926 */ 927 static struct ice_flow_prof * 928 ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 929 { 930 struct ice_flow_prof *p; 931 932 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { 933 if (p->id == prof_id) 934 return p; 935 } 936 937 return NULL; 938 } 939 940 /** 941 * ice_dealloc_flow_entry - Deallocate flow entry memory 942 * @hw: pointer to the HW struct 943 * @entry: flow entry to be removed 944 */ 945 static void 946 ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry) 947 { 948 if (!entry) 949 return; 950 951 if (entry->entry) 952 ice_free(hw, entry->entry); 953 954 if (entry->acts) { 955 ice_free(hw, entry->acts); 956 entry->acts = NULL; 957 entry->acts_cnt = 0; 958 } 959 960 ice_free(hw, entry); 961 } 962 963 /** 964 * ice_flow_rem_entry_sync - Remove a flow entry 965 * @hw: pointer to the HW struct 966 * @blk: classification stage 967 * @entry: flow entry to be removed 968 */ 969 static enum ice_status 970 ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __ALWAYS_UNUSED blk, 971 struct ice_flow_entry *entry) 972 { 973 if (!entry) 974 return ICE_ERR_BAD_PTR; 975 976 LIST_DEL(&entry->l_entry); 977 978 ice_dealloc_flow_entry(hw, entry); 979 980 return ICE_SUCCESS; 981 } 982 983 /** 984 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields 985 * @hw: pointer to the HW struct 986 * @blk: classification stage 987 * @dir: flow direction 988 * @prof_id: unique ID to identify this flow profile 989 * @segs: array of one or more packet segments that describe the flow 990 * @segs_cnt: number of packet segments provided 991 * @acts: array of default actions 992 * @acts_cnt: number of default actions 993 * @prof: stores the returned flow profile added 994 * 995 * Assumption: the caller has acquired the lock to the profile list 996 */ 997 static enum ice_status 998 ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, 999 enum ice_flow_dir dir, u64 prof_id, 1000 struct ice_flow_seg_info *segs, u8 segs_cnt, 1001 struct ice_flow_action *acts, u8 acts_cnt, 1002 struct ice_flow_prof **prof) 1003 { 1004 struct ice_flow_prof_params params; 1005 enum ice_status status; 1006 u8 i; 1007 1008 if (!prof || (acts_cnt && !acts)) 1009 return ICE_ERR_BAD_PTR; 1010 1011 ice_memset(¶ms, 0, sizeof(params), ICE_NONDMA_MEM); 1012 params.prof = (struct ice_flow_prof *) 1013 ice_malloc(hw, sizeof(*params.prof)); 1014 if (!params.prof) 1015 return ICE_ERR_NO_MEMORY; 1016 1017 /* initialize extraction sequence to all invalid (0xff) */ 1018 for (i = 0; i < ICE_MAX_FV_WORDS; i++) { 1019 params.es[i].prot_id = ICE_PROT_INVALID; 1020 params.es[i].off = ICE_FV_OFFSET_INVAL; 1021 } 1022 1023 params.blk = blk; 1024 params.prof->id = prof_id; 1025 params.prof->dir = dir; 1026 params.prof->segs_cnt = segs_cnt; 1027 1028 /* Make a copy of the segments that need to be persistent in the flow 1029 * profile instance 1030 */ 1031 for (i = 0; i < segs_cnt; i++) 1032 ice_memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs), 1033 ICE_NONDMA_TO_NONDMA); 1034 1035 /* Make a copy of the actions that need to be persistent in the flow 1036 * profile instance. 1037 */ 1038 if (acts_cnt) { 1039 params.prof->acts = (struct ice_flow_action *) 1040 ice_memdup(hw, acts, acts_cnt * sizeof(*acts), 1041 ICE_NONDMA_TO_NONDMA); 1042 1043 if (!params.prof->acts) { 1044 status = ICE_ERR_NO_MEMORY; 1045 goto out; 1046 } 1047 } 1048 1049 status = ice_flow_proc_segs(hw, ¶ms); 1050 if (status) { 1051 ice_debug(hw, ICE_DBG_FLOW, 1052 "Error processing a flow's packet segments\n"); 1053 goto out; 1054 } 1055 1056 /* Add a HW profile for this flow profile */ 1057 status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es); 1058 if (status) { 1059 ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); 1060 goto out; 1061 } 1062 1063 INIT_LIST_HEAD(¶ms.prof->entries); 1064 ice_init_lock(¶ms.prof->entries_lock); 1065 *prof = params.prof; 1066 1067 out: 1068 if (status) { 1069 if (params.prof->acts) 1070 ice_free(hw, params.prof->acts); 1071 ice_free(hw, params.prof); 1072 } 1073 1074 return status; 1075 } 1076 1077 /** 1078 * ice_flow_rem_prof_sync - remove a flow profile 1079 * @hw: pointer to the hardware structure 1080 * @blk: classification stage 1081 * @prof: pointer to flow profile to remove 1082 * 1083 * Assumption: the caller has acquired the lock to the profile list 1084 */ 1085 static enum ice_status 1086 ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, 1087 struct ice_flow_prof *prof) 1088 { 1089 enum ice_status status; 1090 1091 /* Remove all remaining flow entries before removing the flow profile */ 1092 if (!LIST_EMPTY(&prof->entries)) { 1093 struct ice_flow_entry *e, *t; 1094 1095 ice_acquire_lock(&prof->entries_lock); 1096 1097 LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry, 1098 l_entry) { 1099 status = ice_flow_rem_entry_sync(hw, blk, e); 1100 if (status) 1101 break; 1102 } 1103 1104 ice_release_lock(&prof->entries_lock); 1105 } 1106 1107 /* Remove all hardware profiles associated with this flow profile */ 1108 status = ice_rem_prof(hw, blk, prof->id); 1109 if (!status) { 1110 LIST_DEL(&prof->l_entry); 1111 ice_destroy_lock(&prof->entries_lock); 1112 if (prof->acts) 1113 ice_free(hw, prof->acts); 1114 ice_free(hw, prof); 1115 } 1116 1117 return status; 1118 } 1119 1120 /** 1121 * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG 1122 * @hw: pointer to the hardware structure 1123 * @blk: classification stage 1124 * @vsi_handle: software VSI handle 1125 * @vsig: target VSI group 1126 * 1127 * Assumption: the caller has already verified that the VSI to 1128 * be added has the same characteristics as the VSIG and will 1129 * thereby have access to all resources added to that VSIG. 1130 */ 1131 enum ice_status 1132 ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, 1133 u16 vsig) 1134 { 1135 enum ice_status status; 1136 1137 if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT) 1138 return ICE_ERR_PARAM; 1139 1140 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1141 status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), 1142 vsig); 1143 ice_release_lock(&hw->fl_profs_locks[blk]); 1144 1145 return status; 1146 } 1147 1148 /** 1149 * ice_flow_assoc_prof - associate a VSI with a flow profile 1150 * @hw: pointer to the hardware structure 1151 * @blk: classification stage 1152 * @prof: pointer to flow profile 1153 * @vsi_handle: software VSI handle 1154 * 1155 * Assumption: the caller has acquired the lock to the profile list 1156 * and the software VSI handle has been validated 1157 */ 1158 static enum ice_status 1159 ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, 1160 struct ice_flow_prof *prof, u16 vsi_handle) 1161 { 1162 enum ice_status status = ICE_SUCCESS; 1163 1164 if (!ice_is_bit_set(prof->vsis, vsi_handle)) { 1165 status = ice_add_prof_id_flow(hw, blk, 1166 ice_get_hw_vsi_num(hw, 1167 vsi_handle), 1168 prof->id); 1169 if (!status) 1170 ice_set_bit(vsi_handle, prof->vsis); 1171 else 1172 ice_debug(hw, ICE_DBG_FLOW, 1173 "HW profile add failed, %d\n", 1174 status); 1175 } 1176 1177 return status; 1178 } 1179 1180 /** 1181 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile 1182 * @hw: pointer to the hardware structure 1183 * @blk: classification stage 1184 * @prof: pointer to flow profile 1185 * @vsi_handle: software VSI handle 1186 * 1187 * Assumption: the caller has acquired the lock to the profile list 1188 * and the software VSI handle has been validated 1189 */ 1190 static enum ice_status 1191 ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, 1192 struct ice_flow_prof *prof, u16 vsi_handle) 1193 { 1194 enum ice_status status = ICE_SUCCESS; 1195 1196 if (ice_is_bit_set(prof->vsis, vsi_handle)) { 1197 status = ice_rem_prof_id_flow(hw, blk, 1198 ice_get_hw_vsi_num(hw, 1199 vsi_handle), 1200 prof->id); 1201 if (!status) 1202 ice_clear_bit(vsi_handle, prof->vsis); 1203 else 1204 ice_debug(hw, ICE_DBG_FLOW, 1205 "HW profile remove failed, %d\n", 1206 status); 1207 } 1208 1209 return status; 1210 } 1211 1212 /** 1213 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields 1214 * @hw: pointer to the HW struct 1215 * @blk: classification stage 1216 * @dir: flow direction 1217 * @prof_id: unique ID to identify this flow profile 1218 * @segs: array of one or more packet segments that describe the flow 1219 * @segs_cnt: number of packet segments provided 1220 * @acts: array of default actions 1221 * @acts_cnt: number of default actions 1222 * @prof: stores the returned flow profile added 1223 */ 1224 enum ice_status 1225 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, 1226 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, 1227 struct ice_flow_action *acts, u8 acts_cnt, 1228 struct ice_flow_prof **prof) 1229 { 1230 enum ice_status status; 1231 1232 if (segs_cnt > ICE_FLOW_SEG_MAX) 1233 return ICE_ERR_MAX_LIMIT; 1234 1235 if (!segs_cnt) 1236 return ICE_ERR_PARAM; 1237 1238 if (!segs) 1239 return ICE_ERR_BAD_PTR; 1240 1241 status = ice_flow_val_hdrs(segs, segs_cnt); 1242 if (status) 1243 return status; 1244 1245 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1246 1247 status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, 1248 acts, acts_cnt, prof); 1249 if (!status) 1250 LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]); 1251 1252 ice_release_lock(&hw->fl_profs_locks[blk]); 1253 1254 return status; 1255 } 1256 1257 /** 1258 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it 1259 * @hw: pointer to the HW struct 1260 * @blk: the block for which the flow profile is to be removed 1261 * @prof_id: unique ID of the flow profile to be removed 1262 */ 1263 enum ice_status 1264 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) 1265 { 1266 struct ice_flow_prof *prof; 1267 enum ice_status status; 1268 1269 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1270 1271 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1272 if (!prof) { 1273 status = ICE_ERR_DOES_NOT_EXIST; 1274 goto out; 1275 } 1276 1277 /* prof becomes invalid after the call */ 1278 status = ice_flow_rem_prof_sync(hw, blk, prof); 1279 1280 out: 1281 ice_release_lock(&hw->fl_profs_locks[blk]); 1282 1283 return status; 1284 } 1285 1286 /** 1287 * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle 1288 * @hw: pointer to the HW struct 1289 * @blk: classification stage 1290 * @prof_id: the profile ID handle 1291 * @hw_prof_id: pointer to variable to receive the HW profile ID 1292 */ 1293 enum ice_status 1294 ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 1295 u8 *hw_prof_id) 1296 { 1297 struct ice_prof_map *map; 1298 1299 map = ice_search_prof_id(hw, blk, prof_id); 1300 if (map) { 1301 *hw_prof_id = map->prof_id; 1302 return ICE_SUCCESS; 1303 } 1304 1305 return ICE_ERR_DOES_NOT_EXIST; 1306 } 1307 1308 /** 1309 * ice_flow_find_entry - look for a flow entry using its unique ID 1310 * @hw: pointer to the HW struct 1311 * @blk: classification stage 1312 * @entry_id: unique ID to identify this flow entry 1313 * 1314 * This function looks for the flow entry with the specified unique ID in all 1315 * flow profiles of the specified classification stage. If the entry is found, 1316 * and it returns the handle to the flow entry. Otherwise, it returns 1317 * ICE_FLOW_ENTRY_ID_INVAL. 1318 */ 1319 u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id) 1320 { 1321 struct ice_flow_entry *found = NULL; 1322 struct ice_flow_prof *p; 1323 1324 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1325 1326 LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) { 1327 struct ice_flow_entry *e; 1328 1329 ice_acquire_lock(&p->entries_lock); 1330 LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry) 1331 if (e->id == entry_id) { 1332 found = e; 1333 break; 1334 } 1335 ice_release_lock(&p->entries_lock); 1336 1337 if (found) 1338 break; 1339 } 1340 1341 ice_release_lock(&hw->fl_profs_locks[blk]); 1342 1343 return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL; 1344 } 1345 1346 /** 1347 * ice_flow_add_entry - Add a flow entry 1348 * @hw: pointer to the HW struct 1349 * @blk: classification stage 1350 * @prof_id: ID of the profile to add a new flow entry to 1351 * @entry_id: unique ID to identify this flow entry 1352 * @vsi_handle: software VSI handle for the flow entry 1353 * @prio: priority of the flow entry 1354 * @data: pointer to a data buffer containing flow entry's match values/masks 1355 * @acts: arrays of actions to be performed on a match 1356 * @acts_cnt: number of actions 1357 * @entry_h: pointer to buffer that receives the new flow entry's handle 1358 */ 1359 enum ice_status 1360 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, 1361 u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio, 1362 void *data, struct ice_flow_action *acts, u8 acts_cnt, 1363 u64 *entry_h) 1364 { 1365 struct ice_flow_entry *e = NULL; 1366 struct ice_flow_prof *prof; 1367 enum ice_status status = ICE_SUCCESS; 1368 1369 /* ACL entries must indicate an action */ 1370 if (blk == ICE_BLK_ACL && (!acts || !acts_cnt)) 1371 return ICE_ERR_PARAM; 1372 1373 /* No flow entry data is expected for RSS */ 1374 if (!entry_h || (!data && blk != ICE_BLK_RSS)) 1375 return ICE_ERR_BAD_PTR; 1376 1377 if (!ice_is_vsi_valid(hw, vsi_handle)) 1378 return ICE_ERR_PARAM; 1379 1380 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1381 1382 prof = ice_flow_find_prof_id(hw, blk, prof_id); 1383 if (!prof) { 1384 status = ICE_ERR_DOES_NOT_EXIST; 1385 } else { 1386 /* Allocate memory for the entry being added and associate 1387 * the VSI to the found flow profile 1388 */ 1389 e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e)); 1390 if (!e) 1391 status = ICE_ERR_NO_MEMORY; 1392 else 1393 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1394 } 1395 1396 ice_release_lock(&hw->fl_profs_locks[blk]); 1397 if (status) 1398 goto out; 1399 1400 e->id = entry_id; 1401 e->vsi_handle = vsi_handle; 1402 e->prof = prof; 1403 e->priority = prio; 1404 1405 switch (blk) { 1406 case ICE_BLK_RSS: 1407 /* RSS will add only one entry per VSI per profile */ 1408 break; 1409 case ICE_BLK_FD: 1410 break; 1411 case ICE_BLK_SW: 1412 case ICE_BLK_PE: 1413 default: 1414 status = ICE_ERR_NOT_IMPL; 1415 goto out; 1416 } 1417 1418 if (blk != ICE_BLK_ACL) { 1419 /* ACL will handle the entry management */ 1420 ice_acquire_lock(&prof->entries_lock); 1421 LIST_ADD(&e->l_entry, &prof->entries); 1422 ice_release_lock(&prof->entries_lock); 1423 } 1424 1425 *entry_h = ICE_FLOW_ENTRY_HNDL(e); 1426 1427 out: 1428 if (status && e) { 1429 if (e->entry) 1430 ice_free(hw, e->entry); 1431 ice_free(hw, e); 1432 } 1433 1434 return status; 1435 } 1436 1437 /** 1438 * ice_flow_rem_entry - Remove a flow entry 1439 * @hw: pointer to the HW struct 1440 * @blk: classification stage 1441 * @entry_h: handle to the flow entry to be removed 1442 */ 1443 enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, 1444 u64 entry_h) 1445 { 1446 struct ice_flow_entry *entry; 1447 struct ice_flow_prof *prof; 1448 enum ice_status status = ICE_SUCCESS; 1449 1450 if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL) 1451 return ICE_ERR_PARAM; 1452 1453 entry = ICE_FLOW_ENTRY_PTR(entry_h); 1454 1455 /* Retain the pointer to the flow profile as the entry will be freed */ 1456 prof = entry->prof; 1457 1458 if (prof) { 1459 ice_acquire_lock(&prof->entries_lock); 1460 status = ice_flow_rem_entry_sync(hw, blk, entry); 1461 ice_release_lock(&prof->entries_lock); 1462 } 1463 1464 return status; 1465 } 1466 1467 /** 1468 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer 1469 * @seg: packet segment the field being set belongs to 1470 * @fld: field to be set 1471 * @field_type: type of the field 1472 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1473 * entry's input buffer 1474 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1475 * input buffer 1476 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1477 * entry's input buffer 1478 * 1479 * This helper function stores information of a field being matched, including 1480 * the type of the field and the locations of the value to match, the mask, and 1481 * and the upper-bound value in the start of the input buffer for a flow entry. 1482 * This function should only be used for fixed-size data structures. 1483 * 1484 * This function also opportunistically determines the protocol headers to be 1485 * present based on the fields being set. Some fields cannot be used alone to 1486 * determine the protocol headers present. Sometimes, fields for particular 1487 * protocol headers are not matched. In those cases, the protocol headers 1488 * must be explicitly set. 1489 */ 1490 static void 1491 ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1492 enum ice_flow_fld_match_type field_type, u16 val_loc, 1493 u16 mask_loc, u16 last_loc) 1494 { 1495 u64 bit = BIT_ULL(fld); 1496 1497 seg->match |= bit; 1498 if (field_type == ICE_FLOW_FLD_TYPE_RANGE) 1499 seg->range |= bit; 1500 1501 seg->fields[fld].type = field_type; 1502 seg->fields[fld].src.val = val_loc; 1503 seg->fields[fld].src.mask = mask_loc; 1504 seg->fields[fld].src.last = last_loc; 1505 1506 ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); 1507 } 1508 1509 /** 1510 * ice_flow_set_fld - specifies locations of field from entry's input buffer 1511 * @seg: packet segment the field being set belongs to 1512 * @fld: field to be set 1513 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1514 * entry's input buffer 1515 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's 1516 * input buffer 1517 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from 1518 * entry's input buffer 1519 * @range: indicate if field being matched is to be in a range 1520 * 1521 * This function specifies the locations, in the form of byte offsets from the 1522 * start of the input buffer for a flow entry, from where the value to match, 1523 * the mask value, and upper value can be extracted. These locations are then 1524 * stored in the flow profile. When adding a flow entry associated with the 1525 * flow profile, these locations will be used to quickly extract the values and 1526 * create the content of a match entry. This function should only be used for 1527 * fixed-size data structures. 1528 */ 1529 void 1530 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1531 u16 val_loc, u16 mask_loc, u16 last_loc, bool range) 1532 { 1533 enum ice_flow_fld_match_type t = range ? 1534 ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; 1535 1536 ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); 1537 } 1538 1539 /** 1540 * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf 1541 * @seg: packet segment the field being set belongs to 1542 * @fld: field to be set 1543 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from 1544 * entry's input buffer 1545 * @pref_loc: location of prefix value from entry's input buffer 1546 * @pref_sz: size of the location holding the prefix value 1547 * 1548 * This function specifies the locations, in the form of byte offsets from the 1549 * start of the input buffer for a flow entry, from where the value to match 1550 * and the IPv4 prefix value can be extracted. These locations are then stored 1551 * in the flow profile. When adding flow entries to the associated flow profile, 1552 * these locations can be used to quickly extract the values to create the 1553 * content of a match entry. This function should only be used for fixed-size 1554 * data structures. 1555 */ 1556 void 1557 ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, 1558 u16 val_loc, u16 pref_loc, u8 pref_sz) 1559 { 1560 /* For this type of field, the "mask" location is for the prefix value's 1561 * location and the "last" location is for the size of the location of 1562 * the prefix value. 1563 */ 1564 ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc, 1565 pref_loc, (u16)pref_sz); 1566 } 1567 1568 /** 1569 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf 1570 * @seg: packet segment the field being set belongs to 1571 * @off: offset of the raw field from the beginning of the segment in bytes 1572 * @len: length of the raw pattern to be matched 1573 * @val_loc: location of the value to match from entry's input buffer 1574 * @mask_loc: location of mask value from entry's input buffer 1575 * 1576 * This function specifies the offset of the raw field to be match from the 1577 * beginning of the specified packet segment, and the locations, in the form of 1578 * byte offsets from the start of the input buffer for a flow entry, from where 1579 * the value to match and the mask value to be extracted. These locations are 1580 * then stored in the flow profile. When adding flow entries to the associated 1581 * flow profile, these locations can be used to quickly extract the values to 1582 * create the content of a match entry. This function should only be used for 1583 * fixed-size data structures. 1584 */ 1585 void 1586 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, 1587 u16 val_loc, u16 mask_loc) 1588 { 1589 if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) { 1590 seg->raws[seg->raws_cnt].off = off; 1591 seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE; 1592 seg->raws[seg->raws_cnt].info.src.val = val_loc; 1593 seg->raws[seg->raws_cnt].info.src.mask = mask_loc; 1594 /* The "last" field is used to store the length of the field */ 1595 seg->raws[seg->raws_cnt].info.src.last = len; 1596 } 1597 1598 /* Overflows of "raws" will be handled as an error condition later in 1599 * the flow when this information is processed. 1600 */ 1601 seg->raws_cnt++; 1602 } 1603 1604 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ 1605 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) 1606 1607 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ 1608 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ 1609 ICE_FLOW_SEG_HDR_SCTP) 1610 1611 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ 1612 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ 1613 ICE_FLOW_RSS_SEG_HDR_L4_MASKS) 1614 1615 /** 1616 * ice_flow_set_rss_seg_info - setup packet segments for RSS 1617 * @segs: pointer to the flow field segment(s) 1618 * @hash_fields: fields to be hashed on for the segment(s) 1619 * @flow_hdr: protocol header fields within a packet segment 1620 * 1621 * Helper function to extract fields from hash bitmap and use flow 1622 * header value to set flow field segment for further use in flow 1623 * profile entry or removal. 1624 */ 1625 static enum ice_status 1626 ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields, 1627 u32 flow_hdr) 1628 { 1629 u64 val = hash_fields; 1630 u8 i; 1631 1632 for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) { 1633 u64 bit = BIT_ULL(i); 1634 1635 if (val & bit) { 1636 ice_flow_set_fld(segs, (enum ice_flow_field)i, 1637 ICE_FLOW_FLD_OFF_INVAL, 1638 ICE_FLOW_FLD_OFF_INVAL, 1639 ICE_FLOW_FLD_OFF_INVAL, false); 1640 val &= ~bit; 1641 } 1642 } 1643 ICE_FLOW_SET_HDRS(segs, flow_hdr); 1644 1645 if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) 1646 return ICE_ERR_PARAM; 1647 1648 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); 1649 if (val && !ice_is_pow2(val)) 1650 return ICE_ERR_CFG; 1651 1652 val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); 1653 if (val && !ice_is_pow2(val)) 1654 return ICE_ERR_CFG; 1655 1656 return ICE_SUCCESS; 1657 } 1658 1659 /** 1660 * ice_rem_vsi_rss_list - remove VSI from RSS list 1661 * @hw: pointer to the hardware structure 1662 * @vsi_handle: software VSI handle 1663 * 1664 * Remove the VSI from all RSS configurations in the list. 1665 */ 1666 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) 1667 { 1668 struct ice_rss_cfg *r, *tmp; 1669 1670 if (LIST_EMPTY(&hw->rss_list_head)) 1671 return; 1672 1673 ice_acquire_lock(&hw->rss_locks); 1674 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, 1675 ice_rss_cfg, l_entry) { 1676 if (ice_test_and_clear_bit(vsi_handle, r->vsis)) 1677 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { 1678 LIST_DEL(&r->l_entry); 1679 ice_free(hw, r); 1680 } 1681 } 1682 ice_release_lock(&hw->rss_locks); 1683 } 1684 1685 /** 1686 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI 1687 * @hw: pointer to the hardware structure 1688 * @vsi_handle: software VSI handle 1689 * 1690 * This function will iterate through all flow profiles and disassociate 1691 * the VSI from that profile. If the flow profile has no VSIs it will 1692 * be removed. 1693 */ 1694 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 1695 { 1696 const enum ice_block blk = ICE_BLK_RSS; 1697 struct ice_flow_prof *p, *t; 1698 enum ice_status status = ICE_SUCCESS; 1699 1700 if (!ice_is_vsi_valid(hw, vsi_handle)) 1701 return ICE_ERR_PARAM; 1702 1703 if (LIST_EMPTY(&hw->fl_profs[blk])) 1704 return ICE_SUCCESS; 1705 1706 ice_acquire_lock(&hw->fl_profs_locks[blk]); 1707 LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof, 1708 l_entry) { 1709 if (ice_is_bit_set(p->vsis, vsi_handle)) { 1710 status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); 1711 if (status) 1712 break; 1713 1714 if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) { 1715 status = ice_flow_rem_prof_sync(hw, blk, p); 1716 if (status) 1717 break; 1718 } 1719 } 1720 } 1721 ice_release_lock(&hw->fl_profs_locks[blk]); 1722 1723 return status; 1724 } 1725 1726 /** 1727 * ice_rem_rss_list - remove RSS configuration from list 1728 * @hw: pointer to the hardware structure 1729 * @vsi_handle: software VSI handle 1730 * @prof: pointer to flow profile 1731 * 1732 * Assumption: lock has already been acquired for RSS list 1733 */ 1734 static void 1735 ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1736 { 1737 struct ice_rss_cfg *r, *tmp; 1738 1739 /* Search for RSS hash fields associated to the VSI that match the 1740 * hash configurations associated to the flow profile. If found 1741 * remove from the RSS entry list of the VSI context and delete entry. 1742 */ 1743 LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, 1744 ice_rss_cfg, l_entry) { 1745 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1746 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1747 ice_clear_bit(vsi_handle, r->vsis); 1748 if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { 1749 LIST_DEL(&r->l_entry); 1750 ice_free(hw, r); 1751 } 1752 return; 1753 } 1754 } 1755 } 1756 1757 /** 1758 * ice_add_rss_list - add RSS configuration to list 1759 * @hw: pointer to the hardware structure 1760 * @vsi_handle: software VSI handle 1761 * @prof: pointer to flow profile 1762 * 1763 * Assumption: lock has already been acquired for RSS list 1764 */ 1765 static enum ice_status 1766 ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) 1767 { 1768 struct ice_rss_cfg *r, *rss_cfg; 1769 1770 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, 1771 ice_rss_cfg, l_entry) 1772 if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match && 1773 r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) { 1774 ice_set_bit(vsi_handle, r->vsis); 1775 return ICE_SUCCESS; 1776 } 1777 1778 rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg)); 1779 if (!rss_cfg) 1780 return ICE_ERR_NO_MEMORY; 1781 1782 rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match; 1783 rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs; 1784 ice_set_bit(vsi_handle, rss_cfg->vsis); 1785 1786 LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head); 1787 1788 return ICE_SUCCESS; 1789 } 1790 1791 #define ICE_FLOW_PROF_HASH_S 0 1792 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) 1793 #define ICE_FLOW_PROF_HDR_S 32 1794 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) 1795 #define ICE_FLOW_PROF_ENCAP_S 63 1796 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) 1797 1798 #define ICE_RSS_OUTER_HEADERS 1 1799 #define ICE_RSS_INNER_HEADERS 2 1800 1801 /* Flow profile ID format: 1802 * [0:31] - Packet match fields 1803 * [32:62] - Protocol header 1804 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled 1805 */ 1806 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \ 1807 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ 1808 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ 1809 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)) 1810 1811 /** 1812 * ice_add_rss_cfg_sync - add an RSS configuration 1813 * @hw: pointer to the hardware structure 1814 * @vsi_handle: software VSI handle 1815 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1816 * @addl_hdrs: protocol header fields 1817 * @segs_cnt: packet segment count 1818 * 1819 * Assumption: lock has already been acquired for RSS list 1820 */ 1821 static enum ice_status 1822 ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1823 u32 addl_hdrs, u8 segs_cnt) 1824 { 1825 const enum ice_block blk = ICE_BLK_RSS; 1826 struct ice_flow_prof *prof = NULL; 1827 struct ice_flow_seg_info *segs; 1828 enum ice_status status; 1829 1830 if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX) 1831 return ICE_ERR_PARAM; 1832 1833 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, 1834 sizeof(*segs)); 1835 if (!segs) 1836 return ICE_ERR_NO_MEMORY; 1837 1838 /* Construct the packet segment info from the hashed fields */ 1839 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 1840 addl_hdrs); 1841 if (status) 1842 goto exit; 1843 1844 /* Search for a flow profile that has matching headers, hash fields 1845 * and has the input VSI associated to it. If found, no further 1846 * operations required and exit. 1847 */ 1848 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1849 vsi_handle, 1850 ICE_FLOW_FIND_PROF_CHK_FLDS | 1851 ICE_FLOW_FIND_PROF_CHK_VSI); 1852 if (prof) 1853 goto exit; 1854 1855 /* Check if a flow profile exists with the same protocol headers and 1856 * associated with the input VSI. If so disasscociate the VSI from 1857 * this profile. The VSI will be added to a new profile created with 1858 * the protocol header and new hash field configuration. 1859 */ 1860 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1861 vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); 1862 if (prof) { 1863 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 1864 if (!status) 1865 ice_rem_rss_list(hw, vsi_handle, prof); 1866 else 1867 goto exit; 1868 1869 /* Remove profile if it has no VSIs associated */ 1870 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) { 1871 status = ice_flow_rem_prof(hw, blk, prof->id); 1872 if (status) 1873 goto exit; 1874 } 1875 } 1876 1877 /* Search for a profile that has same match fields only. If this 1878 * exists then associate the VSI to this profile. 1879 */ 1880 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1881 vsi_handle, 1882 ICE_FLOW_FIND_PROF_CHK_FLDS); 1883 if (prof) { 1884 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1885 if (!status) 1886 status = ice_add_rss_list(hw, vsi_handle, prof); 1887 goto exit; 1888 } 1889 1890 /* Create a new flow profile with generated profile and packet 1891 * segment information. 1892 */ 1893 status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, 1894 ICE_FLOW_GEN_PROFID(hashed_flds, 1895 segs[segs_cnt - 1].hdrs, 1896 segs_cnt), 1897 segs, segs_cnt, NULL, 0, &prof); 1898 if (status) 1899 goto exit; 1900 1901 status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); 1902 /* If association to a new flow profile failed then this profile can 1903 * be removed. 1904 */ 1905 if (status) { 1906 ice_flow_rem_prof(hw, blk, prof->id); 1907 goto exit; 1908 } 1909 1910 status = ice_add_rss_list(hw, vsi_handle, prof); 1911 1912 exit: 1913 ice_free(hw, segs); 1914 return status; 1915 } 1916 1917 /** 1918 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields 1919 * @hw: pointer to the hardware structure 1920 * @vsi_handle: software VSI handle 1921 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure 1922 * @addl_hdrs: protocol header fields 1923 * 1924 * This function will generate a flow profile based on fields associated with 1925 * the input fields to hash on, the flow type and use the VSI number to add 1926 * a flow entry to the profile. 1927 */ 1928 enum ice_status 1929 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1930 u32 addl_hdrs) 1931 { 1932 enum ice_status status; 1933 1934 if (hashed_flds == ICE_HASH_INVALID || 1935 !ice_is_vsi_valid(hw, vsi_handle)) 1936 return ICE_ERR_PARAM; 1937 1938 ice_acquire_lock(&hw->rss_locks); 1939 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 1940 ICE_RSS_OUTER_HEADERS); 1941 if (!status) 1942 status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, 1943 addl_hdrs, ICE_RSS_INNER_HEADERS); 1944 ice_release_lock(&hw->rss_locks); 1945 1946 return status; 1947 } 1948 1949 /** 1950 * ice_rem_rss_cfg_sync - remove an existing RSS configuration 1951 * @hw: pointer to the hardware structure 1952 * @vsi_handle: software VSI handle 1953 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove 1954 * @addl_hdrs: Protocol header fields within a packet segment 1955 * @segs_cnt: packet segment count 1956 * 1957 * Assumption: lock has already been acquired for RSS list 1958 */ 1959 static enum ice_status 1960 ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 1961 u32 addl_hdrs, u8 segs_cnt) 1962 { 1963 const enum ice_block blk = ICE_BLK_RSS; 1964 struct ice_flow_seg_info *segs; 1965 struct ice_flow_prof *prof; 1966 enum ice_status status; 1967 1968 segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, 1969 sizeof(*segs)); 1970 if (!segs) 1971 return ICE_ERR_NO_MEMORY; 1972 1973 /* Construct the packet segment info from the hashed fields */ 1974 status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds, 1975 addl_hdrs); 1976 if (status) 1977 goto out; 1978 1979 prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, 1980 vsi_handle, 1981 ICE_FLOW_FIND_PROF_CHK_FLDS); 1982 if (!prof) { 1983 status = ICE_ERR_DOES_NOT_EXIST; 1984 goto out; 1985 } 1986 1987 status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); 1988 if (status) 1989 goto out; 1990 1991 /* Remove RSS configuration from VSI context before deleting 1992 * the flow profile. 1993 */ 1994 ice_rem_rss_list(hw, vsi_handle, prof); 1995 1996 if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) 1997 status = ice_flow_rem_prof(hw, blk, prof->id); 1998 1999 out: 2000 ice_free(hw, segs); 2001 return status; 2002 } 2003 2004 /** 2005 * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields 2006 * @hw: pointer to the hardware structure 2007 * @vsi_handle: software VSI handle 2008 * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove 2009 * @addl_hdrs: Protocol header fields within a packet segment 2010 * 2011 * This function will lookup the flow profile based on the input 2012 * hash field bitmap, iterate through the profile entry list of 2013 * that profile and find entry associated with input VSI to be 2014 * removed. Calls are made to underlying flow apis which will in 2015 * turn build or update buffers for RSS XLT1 section. 2016 */ 2017 enum ice_status 2018 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, 2019 u32 addl_hdrs) 2020 { 2021 enum ice_status status; 2022 2023 if (hashed_flds == ICE_HASH_INVALID || 2024 !ice_is_vsi_valid(hw, vsi_handle)) 2025 return ICE_ERR_PARAM; 2026 2027 ice_acquire_lock(&hw->rss_locks); 2028 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, 2029 ICE_RSS_OUTER_HEADERS); 2030 if (!status) 2031 status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, 2032 addl_hdrs, ICE_RSS_INNER_HEADERS); 2033 ice_release_lock(&hw->rss_locks); 2034 2035 return status; 2036 } 2037 2038 /* Mapping of AVF hash bit fields to an L3-L4 hash combination. 2039 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, 2040 * convert its values to their appropriate flow L3, L4 values. 2041 */ 2042 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ 2043 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ 2044 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) 2045 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ 2046 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ 2047 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) 2048 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ 2049 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ 2050 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ 2051 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) 2052 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ 2053 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ 2054 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) 2055 2056 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ 2057 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ 2058 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) 2059 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ 2060 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ 2061 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ 2062 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) 2063 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ 2064 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ 2065 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) 2066 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ 2067 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ 2068 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) 2069 2070 /** 2071 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver 2072 * @hw: pointer to the hardware structure 2073 * @vsi_handle: software VSI handle 2074 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure 2075 * 2076 * This function will take the hash bitmap provided by the AVF driver via a 2077 * message, convert it to ICE-compatible values, and configure RSS flow 2078 * profiles. 2079 */ 2080 enum ice_status 2081 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) 2082 { 2083 enum ice_status status = ICE_SUCCESS; 2084 u64 hash_flds; 2085 2086 if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || 2087 !ice_is_vsi_valid(hw, vsi_handle)) 2088 return ICE_ERR_PARAM; 2089 2090 /* Make sure no unsupported bits are specified */ 2091 if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | 2092 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) 2093 return ICE_ERR_CFG; 2094 2095 hash_flds = avf_hash; 2096 2097 /* Always create an L3 RSS configuration for any L4 RSS configuration */ 2098 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) 2099 hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; 2100 2101 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) 2102 hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; 2103 2104 /* Create the corresponding RSS configuration for each valid hash bit */ 2105 while (hash_flds) { 2106 u64 rss_hash = ICE_HASH_INVALID; 2107 2108 if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { 2109 if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { 2110 rss_hash = ICE_FLOW_HASH_IPV4; 2111 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; 2112 } else if (hash_flds & 2113 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { 2114 rss_hash = ICE_FLOW_HASH_IPV4 | 2115 ICE_FLOW_HASH_TCP_PORT; 2116 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; 2117 } else if (hash_flds & 2118 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { 2119 rss_hash = ICE_FLOW_HASH_IPV4 | 2120 ICE_FLOW_HASH_UDP_PORT; 2121 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; 2122 } else if (hash_flds & 2123 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { 2124 rss_hash = ICE_FLOW_HASH_IPV4 | 2125 ICE_FLOW_HASH_SCTP_PORT; 2126 hash_flds &= 2127 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); 2128 } 2129 } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { 2130 if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { 2131 rss_hash = ICE_FLOW_HASH_IPV6; 2132 hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; 2133 } else if (hash_flds & 2134 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { 2135 rss_hash = ICE_FLOW_HASH_IPV6 | 2136 ICE_FLOW_HASH_TCP_PORT; 2137 hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; 2138 } else if (hash_flds & 2139 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { 2140 rss_hash = ICE_FLOW_HASH_IPV6 | 2141 ICE_FLOW_HASH_UDP_PORT; 2142 hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; 2143 } else if (hash_flds & 2144 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { 2145 rss_hash = ICE_FLOW_HASH_IPV6 | 2146 ICE_FLOW_HASH_SCTP_PORT; 2147 hash_flds &= 2148 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); 2149 } 2150 } 2151 2152 if (rss_hash == ICE_HASH_INVALID) 2153 return ICE_ERR_OUT_OF_RANGE; 2154 2155 status = ice_add_rss_cfg(hw, vsi_handle, rss_hash, 2156 ICE_FLOW_SEG_HDR_NONE); 2157 if (status) 2158 break; 2159 } 2160 2161 return status; 2162 } 2163 2164 /** 2165 * ice_replay_rss_cfg - replay RSS configurations associated with VSI 2166 * @hw: pointer to the hardware structure 2167 * @vsi_handle: software VSI handle 2168 */ 2169 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) 2170 { 2171 enum ice_status status = ICE_SUCCESS; 2172 struct ice_rss_cfg *r; 2173 2174 if (!ice_is_vsi_valid(hw, vsi_handle)) 2175 return ICE_ERR_PARAM; 2176 2177 ice_acquire_lock(&hw->rss_locks); 2178 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, 2179 ice_rss_cfg, l_entry) { 2180 if (ice_is_bit_set(r->vsis, vsi_handle)) { 2181 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2182 r->hashed_flds, 2183 r->packet_hdr, 2184 ICE_RSS_OUTER_HEADERS); 2185 if (status) 2186 break; 2187 status = ice_add_rss_cfg_sync(hw, vsi_handle, 2188 r->hashed_flds, 2189 r->packet_hdr, 2190 ICE_RSS_INNER_HEADERS); 2191 if (status) 2192 break; 2193 } 2194 } 2195 ice_release_lock(&hw->rss_locks); 2196 2197 return status; 2198 } 2199 2200 /** 2201 * ice_get_rss_cfg - returns hashed fields for the given header types 2202 * @hw: pointer to the hardware structure 2203 * @vsi_handle: software VSI handle 2204 * @hdrs: protocol header type 2205 * 2206 * This function will return the match fields of the first instance of flow 2207 * profile having the given header types and containing input VSI 2208 */ 2209 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) 2210 { 2211 struct ice_rss_cfg *r, *rss_cfg = NULL; 2212 2213 /* verify if the protocol header is non zero and VSI is valid */ 2214 if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) 2215 return ICE_HASH_INVALID; 2216 2217 ice_acquire_lock(&hw->rss_locks); 2218 LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, 2219 ice_rss_cfg, l_entry) 2220 if (ice_is_bit_set(r->vsis, vsi_handle) && 2221 r->packet_hdr == hdrs) { 2222 rss_cfg = r; 2223 break; 2224 } 2225 ice_release_lock(&hw->rss_locks); 2226 2227 return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; 2228 } 2229