1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <net/pkt_cls.h> 6 7 #include "cmsg.h" 8 #include "main.h" 9 10 static void 11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, 12 struct nfp_flower_meta_tci *msk, 13 struct tc_cls_flower_offload *flow, u8 key_type) 14 { 15 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 16 u16 tmp_tci; 17 18 memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); 19 memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); 20 21 /* Populate the metadata frame. */ 22 ext->nfp_flow_key_layer = key_type; 23 ext->mask_id = ~0; 24 25 msk->nfp_flow_key_layer = key_type; 26 msk->mask_id = ~0; 27 28 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 29 struct flow_match_vlan match; 30 31 flow_rule_match_vlan(rule, &match); 32 /* Populate the tci field. */ 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 35 match.key->vlan_priority) | 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 37 match.key->vlan_id); 38 ext->tci = cpu_to_be16(tmp_tci); 39 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 42 match.mask->vlan_priority) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 44 match.mask->vlan_id); 45 msk->tci = cpu_to_be16(tmp_tci); 46 } 47 } 48 49 static void 50 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) 51 { 52 frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); 53 } 54 55 static int 56 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, 57 bool mask_version, enum nfp_flower_tun_type tun_type, 58 struct netlink_ext_ack *extack) 59 { 60 if (mask_version) { 61 frame->in_port = cpu_to_be32(~0); 62 return 0; 63 } 64 65 if (tun_type) { 66 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); 67 } else { 68 if (!cmsg_port) { 69 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload"); 70 return -EOPNOTSUPP; 71 } 72 frame->in_port = cpu_to_be32(cmsg_port); 73 } 74 75 return 0; 76 } 77 78 static void 79 nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, 80 struct nfp_flower_mac_mpls *msk, 81 struct tc_cls_flower_offload *flow) 82 { 83 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 84 85 memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); 86 memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); 87 88 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 89 struct flow_match_eth_addrs match; 90 91 flow_rule_match_eth_addrs(rule, &match); 92 /* Populate mac frame. */ 93 ether_addr_copy(ext->mac_dst, &match.key->dst[0]); 94 ether_addr_copy(ext->mac_src, &match.key->src[0]); 95 ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); 96 ether_addr_copy(msk->mac_src, &match.mask->src[0]); 97 } 98 99 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 100 struct flow_match_mpls match; 101 u32 t_mpls; 102 103 flow_rule_match_mpls(rule, &match); 104 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | 105 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | 106 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | 107 NFP_FLOWER_MASK_MPLS_Q; 108 ext->mpls_lse = cpu_to_be32(t_mpls); 109 t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | 110 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | 111 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | 112 NFP_FLOWER_MASK_MPLS_Q; 113 msk->mpls_lse = cpu_to_be32(t_mpls); 114 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 115 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q 116 * bit, which indicates an mpls ether type but without any 117 * mpls fields. 118 */ 119 struct flow_match_basic match; 120 121 flow_rule_match_basic(rule, &match); 122 if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || 123 match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { 124 ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 125 msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); 126 } 127 } 128 } 129 130 static void 131 nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, 132 struct nfp_flower_tp_ports *msk, 133 struct tc_cls_flower_offload *flow) 134 { 135 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 136 137 memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); 138 memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); 139 140 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 141 struct flow_match_ports match; 142 143 flow_rule_match_ports(rule, &match); 144 ext->port_src = match.key->src; 145 ext->port_dst = match.key->dst; 146 msk->port_src = match.mask->src; 147 msk->port_dst = match.mask->dst; 148 } 149 } 150 151 static void 152 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, 153 struct nfp_flower_ip_ext *msk, 154 struct tc_cls_flower_offload *flow) 155 { 156 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 157 158 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 159 struct flow_match_basic match; 160 161 flow_rule_match_basic(rule, &match); 162 ext->proto = match.key->ip_proto; 163 msk->proto = match.mask->ip_proto; 164 } 165 166 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 167 struct flow_match_ip match; 168 169 flow_rule_match_ip(rule, &match); 170 ext->tos = match.key->tos; 171 ext->ttl = match.key->ttl; 172 msk->tos = match.mask->tos; 173 msk->ttl = match.mask->ttl; 174 } 175 176 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { 177 u16 tcp_flags, tcp_flags_mask; 178 struct flow_match_tcp match; 179 180 flow_rule_match_tcp(rule, &match); 181 tcp_flags = be16_to_cpu(match.key->flags); 182 tcp_flags_mask = be16_to_cpu(match.mask->flags); 183 184 if (tcp_flags & TCPHDR_FIN) 185 ext->flags |= NFP_FL_TCP_FLAG_FIN; 186 if (tcp_flags_mask & TCPHDR_FIN) 187 msk->flags |= NFP_FL_TCP_FLAG_FIN; 188 189 if (tcp_flags & TCPHDR_SYN) 190 ext->flags |= NFP_FL_TCP_FLAG_SYN; 191 if (tcp_flags_mask & TCPHDR_SYN) 192 msk->flags |= NFP_FL_TCP_FLAG_SYN; 193 194 if (tcp_flags & TCPHDR_RST) 195 ext->flags |= NFP_FL_TCP_FLAG_RST; 196 if (tcp_flags_mask & TCPHDR_RST) 197 msk->flags |= NFP_FL_TCP_FLAG_RST; 198 199 if (tcp_flags & TCPHDR_PSH) 200 ext->flags |= NFP_FL_TCP_FLAG_PSH; 201 if (tcp_flags_mask & TCPHDR_PSH) 202 msk->flags |= NFP_FL_TCP_FLAG_PSH; 203 204 if (tcp_flags & TCPHDR_URG) 205 ext->flags |= NFP_FL_TCP_FLAG_URG; 206 if (tcp_flags_mask & TCPHDR_URG) 207 msk->flags |= NFP_FL_TCP_FLAG_URG; 208 } 209 210 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 211 struct flow_match_control match; 212 213 flow_rule_match_control(rule, &match); 214 if (match.key->flags & FLOW_DIS_IS_FRAGMENT) 215 ext->flags |= NFP_FL_IP_FRAGMENTED; 216 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) 217 msk->flags |= NFP_FL_IP_FRAGMENTED; 218 if (match.key->flags & FLOW_DIS_FIRST_FRAG) 219 ext->flags |= NFP_FL_IP_FRAG_FIRST; 220 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) 221 msk->flags |= NFP_FL_IP_FRAG_FIRST; 222 } 223 } 224 225 static void 226 nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, 227 struct nfp_flower_ipv4 *msk, 228 struct tc_cls_flower_offload *flow) 229 { 230 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 231 struct flow_match_ipv4_addrs match; 232 233 memset(ext, 0, sizeof(struct nfp_flower_ipv4)); 234 memset(msk, 0, sizeof(struct nfp_flower_ipv4)); 235 236 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 237 flow_rule_match_ipv4_addrs(rule, &match); 238 ext->ipv4_src = match.key->src; 239 ext->ipv4_dst = match.key->dst; 240 msk->ipv4_src = match.mask->src; 241 msk->ipv4_dst = match.mask->dst; 242 } 243 244 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); 245 } 246 247 static void 248 nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, 249 struct nfp_flower_ipv6 *msk, 250 struct tc_cls_flower_offload *flow) 251 { 252 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 253 254 memset(ext, 0, sizeof(struct nfp_flower_ipv6)); 255 memset(msk, 0, sizeof(struct nfp_flower_ipv6)); 256 257 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 258 struct flow_match_ipv6_addrs match; 259 260 flow_rule_match_ipv6_addrs(rule, &match); 261 ext->ipv6_src = match.key->src; 262 ext->ipv6_dst = match.key->dst; 263 msk->ipv6_src = match.mask->src; 264 msk->ipv6_dst = match.mask->dst; 265 } 266 267 nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); 268 } 269 270 static int 271 nfp_flower_compile_geneve_opt(void *ext, void *msk, 272 struct tc_cls_flower_offload *flow) 273 { 274 struct flow_match_enc_opts match; 275 276 flow_rule_match_enc_opts(flow->rule, &match); 277 memcpy(ext, match.key->data, match.key->len); 278 memcpy(msk, match.mask->data, match.mask->len); 279 280 return 0; 281 } 282 283 static void 284 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, 285 struct nfp_flower_ipv4_udp_tun *msk, 286 struct tc_cls_flower_offload *flow) 287 { 288 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); 289 290 memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 291 memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); 292 293 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 294 struct flow_match_enc_keyid match; 295 u32 temp_vni; 296 297 flow_rule_match_enc_keyid(rule, &match); 298 temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; 299 ext->tun_id = cpu_to_be32(temp_vni); 300 temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; 301 msk->tun_id = cpu_to_be32(temp_vni); 302 } 303 304 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 305 struct flow_match_ipv4_addrs match; 306 307 flow_rule_match_enc_ipv4_addrs(rule, &match); 308 ext->ip_src = match.key->src; 309 ext->ip_dst = match.key->dst; 310 msk->ip_src = match.mask->src; 311 msk->ip_dst = match.mask->dst; 312 } 313 314 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { 315 struct flow_match_ip match; 316 317 flow_rule_match_enc_ip(rule, &match); 318 ext->tos = match.key->tos; 319 ext->ttl = match.key->ttl; 320 msk->tos = match.mask->tos; 321 msk->ttl = match.mask->ttl; 322 } 323 } 324 325 int nfp_flower_compile_flow_match(struct nfp_app *app, 326 struct tc_cls_flower_offload *flow, 327 struct nfp_fl_key_ls *key_ls, 328 struct net_device *netdev, 329 struct nfp_fl_payload *nfp_flow, 330 enum nfp_flower_tun_type tun_type, 331 struct netlink_ext_ack *extack) 332 { 333 u32 port_id; 334 int err; 335 u8 *ext; 336 u8 *msk; 337 338 port_id = nfp_flower_get_port_id_from_netdev(app, netdev); 339 340 memset(nfp_flow->unmasked_data, 0, key_ls->key_size); 341 memset(nfp_flow->mask_data, 0, key_ls->key_size); 342 343 ext = nfp_flow->unmasked_data; 344 msk = nfp_flow->mask_data; 345 346 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, 347 (struct nfp_flower_meta_tci *)msk, 348 flow, key_ls->key_layer); 349 ext += sizeof(struct nfp_flower_meta_tci); 350 msk += sizeof(struct nfp_flower_meta_tci); 351 352 /* Populate Extended Metadata if Required. */ 353 if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) { 354 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext, 355 key_ls->key_layer_two); 356 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, 357 key_ls->key_layer_two); 358 ext += sizeof(struct nfp_flower_ext_meta); 359 msk += sizeof(struct nfp_flower_ext_meta); 360 } 361 362 /* Populate Exact Port data. */ 363 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, 364 port_id, false, tun_type, extack); 365 if (err) 366 return err; 367 368 /* Populate Mask Port Data. */ 369 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, 370 port_id, true, tun_type, extack); 371 if (err) 372 return err; 373 374 ext += sizeof(struct nfp_flower_in_port); 375 msk += sizeof(struct nfp_flower_in_port); 376 377 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { 378 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, 379 (struct nfp_flower_mac_mpls *)msk, 380 flow); 381 ext += sizeof(struct nfp_flower_mac_mpls); 382 msk += sizeof(struct nfp_flower_mac_mpls); 383 } 384 385 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { 386 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, 387 (struct nfp_flower_tp_ports *)msk, 388 flow); 389 ext += sizeof(struct nfp_flower_tp_ports); 390 msk += sizeof(struct nfp_flower_tp_ports); 391 } 392 393 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { 394 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, 395 (struct nfp_flower_ipv4 *)msk, 396 flow); 397 ext += sizeof(struct nfp_flower_ipv4); 398 msk += sizeof(struct nfp_flower_ipv4); 399 } 400 401 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { 402 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, 403 (struct nfp_flower_ipv6 *)msk, 404 flow); 405 ext += sizeof(struct nfp_flower_ipv6); 406 msk += sizeof(struct nfp_flower_ipv6); 407 } 408 409 if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || 410 key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { 411 __be32 tun_dst; 412 413 nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); 414 tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; 415 ext += sizeof(struct nfp_flower_ipv4_udp_tun); 416 msk += sizeof(struct nfp_flower_ipv4_udp_tun); 417 418 /* Store the tunnel destination in the rule data. 419 * This must be present and be an exact match. 420 */ 421 nfp_flow->nfp_tun_ipv4_addr = tun_dst; 422 nfp_tunnel_add_ipv4_off(app, tun_dst); 423 424 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { 425 err = nfp_flower_compile_geneve_opt(ext, msk, flow); 426 if (err) 427 return err; 428 } 429 } 430 431 return 0; 432 } 433