Lines Matching +full:hsi +full:- +full:mac
1 /* Broadcom NetXtreme-C/E network driver.
46 * For VF-reps: src_fid the fid of the VF
53 if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { in bnxt_flow_get_dst_fid()
54 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n", in bnxt_flow_get_dst_fid()
55 dev->ifindex); in bnxt_flow_get_dst_fid()
59 /* Is dev a VF-rep? */ in bnxt_flow_get_dst_fid()
64 return bp->pf.fw_fid; in bnxt_flow_get_dst_fid()
71 struct net_device *dev = act->dev; in bnxt_tc_parse_redir()
74 netdev_info(bp->dev, "no dev in mirred action\n"); in bnxt_tc_parse_redir()
75 return -EINVAL; in bnxt_tc_parse_redir()
78 actions->flags |= BNXT_TC_ACTION_FLAG_FWD; in bnxt_tc_parse_redir()
79 actions->dst_dev = dev; in bnxt_tc_parse_redir()
87 switch (act->id) { in bnxt_tc_parse_vlan()
89 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; in bnxt_tc_parse_vlan()
92 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; in bnxt_tc_parse_vlan()
93 actions->push_vlan_tci = htons(act->vlan.vid); in bnxt_tc_parse_vlan()
94 actions->push_vlan_tpid = act->vlan.proto; in bnxt_tc_parse_vlan()
97 return -EOPNOTSUPP; in bnxt_tc_parse_vlan()
106 const struct ip_tunnel_info *tun_info = act->tunnel; in bnxt_tc_parse_tunnel_set()
107 const struct ip_tunnel_key *tun_key = &tun_info->key; in bnxt_tc_parse_tunnel_set()
110 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n"); in bnxt_tc_parse_tunnel_set()
111 return -EOPNOTSUPP; in bnxt_tc_parse_tunnel_set()
114 actions->tun_encap_key = *tun_key; in bnxt_tc_parse_tunnel_set()
115 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP; in bnxt_tc_parse_tunnel_set()
126 * dst mac 0xffffffff 0 1
127 * dst mac 0x0000ffff 4 2
129 * src mac 0xffff0000 4 1
130 * src mac 0xffffffff 8 2
135 * src mac: 0xffffffffffff
136 * dst mac: 0xffffffffffff
159 return -EINVAL; in bnxt_fill_l2_rewrite_fields()
163 return -EINVAL; in bnxt_fill_l2_rewrite_fields()
167 actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j)); in bnxt_fill_l2_rewrite_fields()
172 return -EINVAL; in bnxt_fill_l2_rewrite_fields()
176 actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j)); in bnxt_fill_l2_rewrite_fields()
192 offset = act->mangle.offset; in bnxt_tc_parse_pedit()
193 htype = act->mangle.htype; in bnxt_tc_parse_pedit()
194 mask = ~act->mangle.mask; in bnxt_tc_parse_pedit()
195 val = act->mangle.val; in bnxt_tc_parse_pedit()
200 netdev_err(bp->dev, in bnxt_tc_parse_pedit()
203 return -EINVAL; in bnxt_tc_parse_pedit()
205 actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE; in bnxt_tc_parse_pedit()
211 actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; in bnxt_tc_parse_pedit()
212 actions->nat.l3_is_ipv4 = true; in bnxt_tc_parse_pedit()
214 actions->nat.src_xlate = true; in bnxt_tc_parse_pedit()
215 actions->nat.l3.ipv4.saddr.s_addr = htonl(val); in bnxt_tc_parse_pedit()
217 actions->nat.src_xlate = false; in bnxt_tc_parse_pedit()
218 actions->nat.l3.ipv4.daddr.s_addr = htonl(val); in bnxt_tc_parse_pedit()
220 netdev_err(bp->dev, in bnxt_tc_parse_pedit()
223 return -EINVAL; in bnxt_tc_parse_pedit()
226 netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n", in bnxt_tc_parse_pedit()
227 actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr, in bnxt_tc_parse_pedit()
228 &actions->nat.l3.ipv4.daddr); in bnxt_tc_parse_pedit()
232 actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; in bnxt_tc_parse_pedit()
233 actions->nat.l3_is_ipv4 = false; in bnxt_tc_parse_pedit()
239 actions->nat.src_xlate = true; in bnxt_tc_parse_pedit()
240 idx = (offset - offset_of_ip6_saddr) / 4; in bnxt_tc_parse_pedit()
242 actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); in bnxt_tc_parse_pedit()
245 actions->nat.src_xlate = false; in bnxt_tc_parse_pedit()
246 idx = (offset - offset_of_ip6_daddr) / 4; in bnxt_tc_parse_pedit()
247 actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); in bnxt_tc_parse_pedit()
249 netdev_err(bp->dev, in bnxt_tc_parse_pedit()
252 return -EINVAL; in bnxt_tc_parse_pedit()
260 if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { in bnxt_tc_parse_pedit()
261 netdev_err(bp->dev, in bnxt_tc_parse_pedit()
263 return -EINVAL; in bnxt_tc_parse_pedit()
265 if (actions->nat.src_xlate) in bnxt_tc_parse_pedit()
266 actions->nat.l4.ports.sport = htons(val); in bnxt_tc_parse_pedit()
268 actions->nat.l4.ports.dport = htons(val); in bnxt_tc_parse_pedit()
269 netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n", in bnxt_tc_parse_pedit()
270 actions->nat.l4.ports.sport, in bnxt_tc_parse_pedit()
271 actions->nat.l4.ports.dport); in bnxt_tc_parse_pedit()
274 netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", in bnxt_tc_parse_pedit()
276 return -EINVAL; in bnxt_tc_parse_pedit()
300 netdev_info(bp->dev, "no actions\n"); in bnxt_tc_parse_actions()
301 return -EINVAL; in bnxt_tc_parse_actions()
305 return -EOPNOTSUPP; in bnxt_tc_parse_actions()
308 switch (act->id) { in bnxt_tc_parse_actions()
310 actions->flags |= BNXT_TC_ACTION_FLAG_DROP; in bnxt_tc_parse_actions()
330 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; in bnxt_tc_parse_actions()
345 if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { in bnxt_tc_parse_actions()
352 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { in bnxt_tc_parse_actions()
353 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { in bnxt_tc_parse_actions()
355 actions->dst_fid = bp->pf.fw_fid; in bnxt_tc_parse_actions()
358 actions->dst_fid = in bnxt_tc_parse_actions()
359 bnxt_flow_get_dst_fid(bp, actions->dst_dev); in bnxt_tc_parse_actions()
360 if (actions->dst_fid == BNXT_FID_INVALID) in bnxt_tc_parse_actions()
361 return -EINVAL; in bnxt_tc_parse_actions()
373 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; in bnxt_tc_parse_flow()
374 struct flow_dissector *dissector = rule->match.dissector; in bnxt_tc_parse_flow()
377 if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || in bnxt_tc_parse_flow()
378 (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) { in bnxt_tc_parse_flow()
379 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n", in bnxt_tc_parse_flow()
380 dissector->used_keys); in bnxt_tc_parse_flow()
381 return -EOPNOTSUPP; in bnxt_tc_parse_flow()
385 return -EOPNOTSUPP; in bnxt_tc_parse_flow()
391 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow()
392 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow()
394 if (match.key->n_proto == htons(ETH_P_IP) || in bnxt_tc_parse_flow()
395 match.key->n_proto == htons(ETH_P_IPV6)) { in bnxt_tc_parse_flow()
396 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow()
397 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow()
405 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow()
406 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow()
407 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow()
408 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow()
409 ether_addr_copy(flow->l2_mask.smac, match.mask->src); in bnxt_tc_parse_flow()
416 flow->l2_key.inner_vlan_tci = in bnxt_tc_parse_flow()
417 cpu_to_be16(VLAN_TCI(match.key->vlan_id, in bnxt_tc_parse_flow()
418 match.key->vlan_priority)); in bnxt_tc_parse_flow()
419 flow->l2_mask.inner_vlan_tci = in bnxt_tc_parse_flow()
420 cpu_to_be16((VLAN_TCI(match.mask->vlan_id, in bnxt_tc_parse_flow()
421 match.mask->vlan_priority))); in bnxt_tc_parse_flow()
422 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); in bnxt_tc_parse_flow()
423 flow->l2_mask.inner_vlan_tpid = htons(0xffff); in bnxt_tc_parse_flow()
424 flow->l2_key.num_vlans = 1; in bnxt_tc_parse_flow()
431 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; in bnxt_tc_parse_flow()
432 flow->l3_key.ipv4.daddr.s_addr = match.key->dst; in bnxt_tc_parse_flow()
433 flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; in bnxt_tc_parse_flow()
434 flow->l3_key.ipv4.saddr.s_addr = match.key->src; in bnxt_tc_parse_flow()
435 flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; in bnxt_tc_parse_flow()
440 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; in bnxt_tc_parse_flow()
441 flow->l3_key.ipv6.daddr = match.key->dst; in bnxt_tc_parse_flow()
442 flow->l3_mask.ipv6.daddr = match.mask->dst; in bnxt_tc_parse_flow()
443 flow->l3_key.ipv6.saddr = match.key->src; in bnxt_tc_parse_flow()
444 flow->l3_mask.ipv6.saddr = match.mask->src; in bnxt_tc_parse_flow()
451 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; in bnxt_tc_parse_flow()
452 flow->l4_key.ports.dport = match.key->dst; in bnxt_tc_parse_flow()
453 flow->l4_mask.ports.dport = match.mask->dst; in bnxt_tc_parse_flow()
454 flow->l4_key.ports.sport = match.key->src; in bnxt_tc_parse_flow()
455 flow->l4_mask.ports.sport = match.mask->src; in bnxt_tc_parse_flow()
462 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; in bnxt_tc_parse_flow()
463 flow->l4_key.icmp.type = match.key->type; in bnxt_tc_parse_flow()
464 flow->l4_key.icmp.code = match.key->code; in bnxt_tc_parse_flow()
465 flow->l4_mask.icmp.type = match.mask->type; in bnxt_tc_parse_flow()
466 flow->l4_mask.icmp.code = match.mask->code; in bnxt_tc_parse_flow()
473 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; in bnxt_tc_parse_flow()
474 flow->tun_key.u.ipv4.dst = match.key->dst; in bnxt_tc_parse_flow()
475 flow->tun_mask.u.ipv4.dst = match.mask->dst; in bnxt_tc_parse_flow()
476 flow->tun_key.u.ipv4.src = match.key->src; in bnxt_tc_parse_flow()
477 flow->tun_mask.u.ipv4.src = match.mask->src; in bnxt_tc_parse_flow()
480 return -EOPNOTSUPP; in bnxt_tc_parse_flow()
487 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; in bnxt_tc_parse_flow()
488 flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); in bnxt_tc_parse_flow()
489 flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); in bnxt_tc_parse_flow()
496 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; in bnxt_tc_parse_flow()
497 flow->tun_key.tp_dst = match.key->dst; in bnxt_tc_parse_flow()
498 flow->tun_mask.tp_dst = match.mask->dst; in bnxt_tc_parse_flow()
499 flow->tun_key.tp_src = match.key->src; in bnxt_tc_parse_flow()
500 flow->tun_mask.tp_src = match.mask->src; in bnxt_tc_parse_flow()
503 return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action, in bnxt_tc_parse_flow()
504 tc_flow_cmd->common.extack); in bnxt_tc_parse_flow()
515 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) in bnxt_hwrm_cfa_flow_free()
516 req->ext_flow_handle = flow_node->ext_flow_handle; in bnxt_hwrm_cfa_flow_free()
518 req->flow_handle = flow_node->flow_handle; in bnxt_hwrm_cfa_flow_free()
523 netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); in bnxt_hwrm_cfa_flow_free()
533 mask_len += inet_mask_len(mask->s6_addr32[i]); in ipv6_mask_len()
594 struct bnxt_tc_actions *actions = &flow->actions; in bnxt_hwrm_cfa_flow_alloc()
595 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; in bnxt_hwrm_cfa_flow_alloc()
596 struct bnxt_tc_l3_key *l3_key = &flow->l3_key; in bnxt_hwrm_cfa_flow_alloc()
606 req->src_fid = cpu_to_le16(flow->src_fid); in bnxt_hwrm_cfa_flow_alloc()
607 req->ref_flow_handle = ref_flow_handle; in bnxt_hwrm_cfa_flow_alloc()
609 if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { in bnxt_hwrm_cfa_flow_alloc()
610 memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, in bnxt_hwrm_cfa_flow_alloc()
612 memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, in bnxt_hwrm_cfa_flow_alloc()
618 if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) { in bnxt_hwrm_cfa_flow_alloc()
619 if (actions->nat.l3_is_ipv4) { in bnxt_hwrm_cfa_flow_alloc()
623 if (actions->nat.src_xlate) { in bnxt_hwrm_cfa_flow_alloc()
627 req->nat_ip_address[0] = in bnxt_hwrm_cfa_flow_alloc()
628 actions->nat.l3.ipv4.saddr.s_addr; in bnxt_hwrm_cfa_flow_alloc()
630 if (actions->nat.l4.ports.sport) in bnxt_hwrm_cfa_flow_alloc()
631 req->nat_port = in bnxt_hwrm_cfa_flow_alloc()
632 actions->nat.l4.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
637 req->nat_ip_address[0] = in bnxt_hwrm_cfa_flow_alloc()
638 actions->nat.l3.ipv4.daddr.s_addr; in bnxt_hwrm_cfa_flow_alloc()
640 if (actions->nat.l4.ports.dport) in bnxt_hwrm_cfa_flow_alloc()
641 req->nat_port = in bnxt_hwrm_cfa_flow_alloc()
642 actions->nat.l4.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
644 netdev_dbg(bp->dev, in bnxt_hwrm_cfa_flow_alloc()
645 "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n", in bnxt_hwrm_cfa_flow_alloc()
646 req->nat_ip_address, actions->nat.src_xlate, in bnxt_hwrm_cfa_flow_alloc()
647 req->nat_port); in bnxt_hwrm_cfa_flow_alloc()
649 if (actions->nat.src_xlate) { in bnxt_hwrm_cfa_flow_alloc()
653 memcpy(req->nat_ip_address, in bnxt_hwrm_cfa_flow_alloc()
654 actions->nat.l3.ipv6.saddr.s6_addr32, in bnxt_hwrm_cfa_flow_alloc()
655 sizeof(req->nat_ip_address)); in bnxt_hwrm_cfa_flow_alloc()
657 if (actions->nat.l4.ports.sport) in bnxt_hwrm_cfa_flow_alloc()
658 req->nat_port = in bnxt_hwrm_cfa_flow_alloc()
659 actions->nat.l4.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
664 memcpy(req->nat_ip_address, in bnxt_hwrm_cfa_flow_alloc()
665 actions->nat.l3.ipv6.daddr.s6_addr32, in bnxt_hwrm_cfa_flow_alloc()
666 sizeof(req->nat_ip_address)); in bnxt_hwrm_cfa_flow_alloc()
668 if (actions->nat.l4.ports.dport) in bnxt_hwrm_cfa_flow_alloc()
669 req->nat_port = in bnxt_hwrm_cfa_flow_alloc()
670 actions->nat.l4.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
672 netdev_dbg(bp->dev, in bnxt_hwrm_cfa_flow_alloc()
673 "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n", in bnxt_hwrm_cfa_flow_alloc()
674 req->nat_ip_address, actions->nat.src_xlate, in bnxt_hwrm_cfa_flow_alloc()
675 req->nat_port); in bnxt_hwrm_cfa_flow_alloc()
679 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || in bnxt_hwrm_cfa_flow_alloc()
680 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { in bnxt_hwrm_cfa_flow_alloc()
681 req->tunnel_handle = tunnel_handle; in bnxt_hwrm_cfa_flow_alloc()
686 req->ethertype = flow->l2_key.ether_type; in bnxt_hwrm_cfa_flow_alloc()
687 req->ip_proto = flow->l4_key.ip_proto; in bnxt_hwrm_cfa_flow_alloc()
689 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
690 memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
691 memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
694 if (flow->l2_key.num_vlans > 0) { in bnxt_hwrm_cfa_flow_alloc()
700 req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; in bnxt_hwrm_cfa_flow_alloc()
705 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { in bnxt_hwrm_cfa_flow_alloc()
708 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? in bnxt_hwrm_cfa_flow_alloc()
712 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
713 req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; in bnxt_hwrm_cfa_flow_alloc()
714 req->ip_dst_mask_len = in bnxt_hwrm_cfa_flow_alloc()
715 inet_mask_len(l3_mask->ipv4.daddr.s_addr); in bnxt_hwrm_cfa_flow_alloc()
716 req->ip_src[0] = l3_key->ipv4.saddr.s_addr; in bnxt_hwrm_cfa_flow_alloc()
717 req->ip_src_mask_len = in bnxt_hwrm_cfa_flow_alloc()
718 inet_mask_len(l3_mask->ipv4.saddr.s_addr); in bnxt_hwrm_cfa_flow_alloc()
719 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { in bnxt_hwrm_cfa_flow_alloc()
720 memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, in bnxt_hwrm_cfa_flow_alloc()
721 sizeof(req->ip_dst)); in bnxt_hwrm_cfa_flow_alloc()
722 req->ip_dst_mask_len = in bnxt_hwrm_cfa_flow_alloc()
723 ipv6_mask_len(&l3_mask->ipv6.daddr); in bnxt_hwrm_cfa_flow_alloc()
724 memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, in bnxt_hwrm_cfa_flow_alloc()
725 sizeof(req->ip_src)); in bnxt_hwrm_cfa_flow_alloc()
726 req->ip_src_mask_len = in bnxt_hwrm_cfa_flow_alloc()
727 ipv6_mask_len(&l3_mask->ipv6.saddr); in bnxt_hwrm_cfa_flow_alloc()
731 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { in bnxt_hwrm_cfa_flow_alloc()
732 req->l4_src_port = flow->l4_key.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
733 req->l4_src_port_mask = flow->l4_mask.ports.sport; in bnxt_hwrm_cfa_flow_alloc()
734 req->l4_dst_port = flow->l4_key.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
735 req->l4_dst_port_mask = flow->l4_mask.ports.dport; in bnxt_hwrm_cfa_flow_alloc()
736 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { in bnxt_hwrm_cfa_flow_alloc()
738 req->l4_src_port = htons(flow->l4_key.icmp.type); in bnxt_hwrm_cfa_flow_alloc()
739 req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); in bnxt_hwrm_cfa_flow_alloc()
740 req->l4_dst_port = htons(flow->l4_key.icmp.code); in bnxt_hwrm_cfa_flow_alloc()
741 req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); in bnxt_hwrm_cfa_flow_alloc()
743 req->flags = cpu_to_le16(flow_flags); in bnxt_hwrm_cfa_flow_alloc()
745 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { in bnxt_hwrm_cfa_flow_alloc()
748 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { in bnxt_hwrm_cfa_flow_alloc()
750 req->dst_fid = cpu_to_le16(actions->dst_fid); in bnxt_hwrm_cfa_flow_alloc()
752 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { in bnxt_hwrm_cfa_flow_alloc()
755 req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; in bnxt_hwrm_cfa_flow_alloc()
756 req->l2_rewrite_vlan_tci = actions->push_vlan_tci; in bnxt_hwrm_cfa_flow_alloc()
757 memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
758 memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
760 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { in bnxt_hwrm_cfa_flow_alloc()
764 req->l2_rewrite_vlan_tpid = 0; in bnxt_hwrm_cfa_flow_alloc()
765 memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
766 memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); in bnxt_hwrm_cfa_flow_alloc()
769 req->action_flags = cpu_to_le16(action_flags); in bnxt_hwrm_cfa_flow_alloc()
776 * 16-bit 64-bit in bnxt_hwrm_cfa_flow_alloc()
783 flow_node->flow_handle = resp->flow_handle; in bnxt_hwrm_cfa_flow_alloc()
784 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { in bnxt_hwrm_cfa_flow_alloc()
785 flow_node->ext_flow_handle = resp->ext_flow_handle; in bnxt_hwrm_cfa_flow_alloc()
786 flow_node->flow_id = resp->flow_id; in bnxt_hwrm_cfa_flow_alloc()
800 struct ip_tunnel_key *tun_key = &flow->tun_key; in hwrm_cfa_decap_filter_alloc()
809 req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); in hwrm_cfa_decap_filter_alloc()
812 req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; in hwrm_cfa_decap_filter_alloc()
813 req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; in hwrm_cfa_decap_filter_alloc()
815 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { in hwrm_cfa_decap_filter_alloc()
817 /* tunnel_id is wrongly defined in hsi defn. as __le32 */ in hwrm_cfa_decap_filter_alloc()
818 req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); in hwrm_cfa_decap_filter_alloc()
821 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { in hwrm_cfa_decap_filter_alloc()
823 ether_addr_copy(req->dst_macaddr, l2_info->dmac); in hwrm_cfa_decap_filter_alloc()
825 if (l2_info->num_vlans) { in hwrm_cfa_decap_filter_alloc()
827 req->t_ivlan_vid = l2_info->inner_vlan_tci; in hwrm_cfa_decap_filter_alloc()
831 req->ethertype = htons(ETH_P_IP); in hwrm_cfa_decap_filter_alloc()
833 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { in hwrm_cfa_decap_filter_alloc()
837 req->ip_addr_type = in hwrm_cfa_decap_filter_alloc()
839 req->dst_ipaddr[0] = tun_key->u.ipv4.dst; in hwrm_cfa_decap_filter_alloc()
840 req->src_ipaddr[0] = tun_key->u.ipv4.src; in hwrm_cfa_decap_filter_alloc()
843 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { in hwrm_cfa_decap_filter_alloc()
845 req->dst_port = tun_key->tp_dst; in hwrm_cfa_decap_filter_alloc()
849 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. in hwrm_cfa_decap_filter_alloc()
851 req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; in hwrm_cfa_decap_filter_alloc()
852 req->enables = cpu_to_le32(enables); in hwrm_cfa_decap_filter_alloc()
857 *decap_filter_handle = resp->decap_filter_id; in hwrm_cfa_decap_filter_alloc()
861 netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); in hwrm_cfa_decap_filter_alloc()
874 req->decap_filter_id = decap_filter_handle; in hwrm_cfa_decap_filter_free()
878 netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); in hwrm_cfa_decap_filter_free()
898 encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; in hwrm_cfa_encap_record_alloc()
899 req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; in hwrm_cfa_encap_record_alloc()
900 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); in hwrm_cfa_encap_record_alloc()
901 ether_addr_copy(encap->src_mac_addr, l2_info->smac); in hwrm_cfa_encap_record_alloc()
902 if (l2_info->num_vlans) { in hwrm_cfa_encap_record_alloc()
903 encap->num_vlan_tags = l2_info->num_vlans; in hwrm_cfa_encap_record_alloc()
904 encap->ovlan_tci = l2_info->inner_vlan_tci; in hwrm_cfa_encap_record_alloc()
905 encap->ovlan_tpid = l2_info->inner_vlan_tpid; in hwrm_cfa_encap_record_alloc()
908 encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; in hwrm_cfa_encap_record_alloc()
909 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; in hwrm_cfa_encap_record_alloc()
910 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; in hwrm_cfa_encap_record_alloc()
911 encap_ipv4->ttl = encap_key->ttl; in hwrm_cfa_encap_record_alloc()
913 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; in hwrm_cfa_encap_record_alloc()
914 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; in hwrm_cfa_encap_record_alloc()
915 encap_ipv4->protocol = IPPROTO_UDP; in hwrm_cfa_encap_record_alloc()
917 encap->dst_port = encap_key->tp_dst; in hwrm_cfa_encap_record_alloc()
918 encap->vni = tunnel_id_to_key32(encap_key->tun_id); in hwrm_cfa_encap_record_alloc()
923 *encap_record_handle = resp->encap_record_id; in hwrm_cfa_encap_record_alloc()
927 netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); in hwrm_cfa_encap_record_alloc()
940 req->encap_record_id = encap_record_handle; in hwrm_cfa_encap_record_free()
944 netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); in hwrm_cfa_encap_record_free()
952 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; in bnxt_tc_put_l2_node()
953 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_put_l2_node()
957 list_del(&flow_node->l2_list_node); in bnxt_tc_put_l2_node()
958 if (--l2_node->refcount == 0) { in bnxt_tc_put_l2_node()
959 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node, in bnxt_tc_put_l2_node()
960 tc_info->l2_ht_params); in bnxt_tc_put_l2_node()
962 netdev_err(bp->dev, in bnxt_tc_put_l2_node()
982 rc = -ENOMEM; in bnxt_tc_get_l2_node()
986 l2_node->key = *l2_key; in bnxt_tc_get_l2_node()
987 rc = rhashtable_insert_fast(l2_table, &l2_node->node, in bnxt_tc_get_l2_node()
991 netdev_err(bp->dev, in bnxt_tc_get_l2_node()
996 INIT_LIST_HEAD(&l2_node->common_l2_flows); in bnxt_tc_get_l2_node()
1009 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_get_ref_flow_handle()
1013 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table, in bnxt_tc_get_ref_flow_handle()
1014 tc_info->l2_ht_params, in bnxt_tc_get_ref_flow_handle()
1015 &flow->l2_key); in bnxt_tc_get_ref_flow_handle()
1017 return -1; in bnxt_tc_get_ref_flow_handle()
1022 if (l2_node->refcount > 0) { in bnxt_tc_get_ref_flow_handle()
1023 ref_flow_node = list_first_entry(&l2_node->common_l2_flows, in bnxt_tc_get_ref_flow_handle()
1026 *ref_flow_handle = ref_flow_node->flow_handle; in bnxt_tc_get_ref_flow_handle()
1035 flow_node->l2_node = l2_node; in bnxt_tc_get_ref_flow_handle()
1036 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows); in bnxt_tc_get_ref_flow_handle()
1037 l2_node->refcount++; in bnxt_tc_get_ref_flow_handle()
1048 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && in bnxt_tc_can_offload()
1049 (flow->l4_key.ip_proto != IPPROTO_TCP && in bnxt_tc_can_offload()
1050 flow->l4_key.ip_proto != IPPROTO_UDP)) { in bnxt_tc_can_offload()
1051 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n", in bnxt_tc_can_offload()
1052 flow->l4_key.ip_proto); in bnxt_tc_can_offload()
1056 /* Currently source/dest MAC cannot be partial wildcard */ in bnxt_tc_can_offload()
1057 if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && in bnxt_tc_can_offload()
1058 !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { in bnxt_tc_can_offload()
1059 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n"); in bnxt_tc_can_offload()
1062 if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) && in bnxt_tc_can_offload()
1063 !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) { in bnxt_tc_can_offload()
1064 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n"); in bnxt_tc_can_offload()
1069 if (bits_set(&flow->l2_key.inner_vlan_tci, in bnxt_tc_can_offload()
1070 sizeof(flow->l2_key.inner_vlan_tci)) && in bnxt_tc_can_offload()
1071 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, in bnxt_tc_can_offload()
1072 flow->l2_key.inner_vlan_tci)) { in bnxt_tc_can_offload()
1073 netdev_info(bp->dev, "Unsupported VLAN TCI\n"); in bnxt_tc_can_offload()
1076 if (bits_set(&flow->l2_key.inner_vlan_tpid, in bnxt_tc_can_offload()
1077 sizeof(flow->l2_key.inner_vlan_tpid)) && in bnxt_tc_can_offload()
1078 !is_exactmatch(&flow->l2_mask.inner_vlan_tpid, in bnxt_tc_can_offload()
1079 sizeof(flow->l2_mask.inner_vlan_tpid))) { in bnxt_tc_can_offload()
1080 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n"); in bnxt_tc_can_offload()
1085 if (!is_exactmatch(&flow->l2_mask.ether_type, in bnxt_tc_can_offload()
1086 sizeof(flow->l2_mask.ether_type))) { in bnxt_tc_can_offload()
1087 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n"); in bnxt_tc_can_offload()
1095 * or a -ve error code on failure
1104 if (--tunnel_node->refcount == 0) { in bnxt_tc_put_tunnel_node()
1105 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, in bnxt_tc_put_tunnel_node()
1108 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); in bnxt_tc_put_tunnel_node()
1109 rc = -1; in bnxt_tc_put_tunnel_node()
1114 return tunnel_node->refcount; in bnxt_tc_put_tunnel_node()
1133 rc = -ENOMEM; in bnxt_tc_get_tunnel_node()
1137 tunnel_node->key = *tun_key; in bnxt_tc_get_tunnel_node()
1138 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE; in bnxt_tc_get_tunnel_node()
1139 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node, in bnxt_tc_get_tunnel_node()
1146 tunnel_node->refcount++; in bnxt_tc_get_tunnel_node()
1149 netdev_info(bp->dev, "error rc=%d\n", rc); in bnxt_tc_get_tunnel_node()
1159 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_get_ref_decap_handle()
1163 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table, in bnxt_tc_get_ref_decap_handle()
1164 tc_info->decap_l2_ht_params, in bnxt_tc_get_ref_decap_handle()
1167 return -1; in bnxt_tc_get_ref_decap_handle()
1172 if (decap_l2_node->refcount > 0) { in bnxt_tc_get_ref_decap_handle()
1174 list_first_entry(&decap_l2_node->common_l2_flows, in bnxt_tc_get_ref_decap_handle()
1177 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle; in bnxt_tc_get_ref_decap_handle()
1186 flow_node->decap_l2_node = decap_l2_node; in bnxt_tc_get_ref_decap_handle()
1187 list_add(&flow_node->decap_l2_list_node, in bnxt_tc_get_ref_decap_handle()
1188 &decap_l2_node->common_l2_flows); in bnxt_tc_get_ref_decap_handle()
1189 decap_l2_node->refcount++; in bnxt_tc_get_ref_decap_handle()
1196 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; in bnxt_tc_put_decap_l2_node()
1197 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_put_decap_l2_node()
1201 list_del(&flow_node->decap_l2_list_node); in bnxt_tc_put_decap_l2_node()
1202 if (--decap_l2_node->refcount == 0) { in bnxt_tc_put_decap_l2_node()
1203 rc = rhashtable_remove_fast(&tc_info->decap_l2_table, in bnxt_tc_put_decap_l2_node()
1204 &decap_l2_node->node, in bnxt_tc_put_decap_l2_node()
1205 tc_info->decap_l2_ht_params); in bnxt_tc_put_decap_l2_node()
1207 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); in bnxt_tc_put_decap_l2_node()
1215 __le32 decap_handle = flow_node->decap_node->tunnel_handle; in bnxt_tc_put_decap_handle()
1216 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_put_decap_handle()
1219 if (flow_node->decap_l2_node) in bnxt_tc_put_decap_handle()
1222 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, in bnxt_tc_put_decap_handle()
1223 &tc_info->decap_ht_params, in bnxt_tc_put_decap_handle()
1224 flow_node->decap_node); in bnxt_tc_put_decap_handle()
1234 struct net_device *real_dst_dev = bp->dev; in bnxt_tc_resolve_tunnel_hdrs()
1242 flow.fl4_dport = tun_key->tp_dst; in bnxt_tc_resolve_tunnel_hdrs()
1243 flow.daddr = tun_key->u.ipv4.dst; in bnxt_tc_resolve_tunnel_hdrs()
1247 netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr); in bnxt_tc_resolve_tunnel_hdrs()
1248 return -EOPNOTSUPP; in bnxt_tc_resolve_tunnel_hdrs()
1254 dst_dev = rt->dst.dev; in bnxt_tc_resolve_tunnel_hdrs()
1259 if (vlan->real_dev != real_dst_dev) { in bnxt_tc_resolve_tunnel_hdrs()
1260 netdev_info(bp->dev, in bnxt_tc_resolve_tunnel_hdrs()
1261 "dst_dev(%s) doesn't use PF-if(%s)\n", in bnxt_tc_resolve_tunnel_hdrs()
1264 rc = -EOPNOTSUPP; in bnxt_tc_resolve_tunnel_hdrs()
1267 l2_info->inner_vlan_tci = htons(vlan->vlan_id); in bnxt_tc_resolve_tunnel_hdrs()
1268 l2_info->inner_vlan_tpid = vlan->vlan_proto; in bnxt_tc_resolve_tunnel_hdrs()
1269 l2_info->num_vlans = 1; in bnxt_tc_resolve_tunnel_hdrs()
1272 netdev_info(bp->dev, in bnxt_tc_resolve_tunnel_hdrs()
1273 "dst_dev(%s) for %pI4b is not PF-if(%s)\n", in bnxt_tc_resolve_tunnel_hdrs()
1276 rc = -EOPNOTSUPP; in bnxt_tc_resolve_tunnel_hdrs()
1280 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); in bnxt_tc_resolve_tunnel_hdrs()
1282 netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n", in bnxt_tc_resolve_tunnel_hdrs()
1284 rc = -EOPNOTSUPP; in bnxt_tc_resolve_tunnel_hdrs()
1288 tun_key->u.ipv4.src = flow.saddr; in bnxt_tc_resolve_tunnel_hdrs()
1289 tun_key->ttl = ip4_dst_hoplimit(&rt->dst); in bnxt_tc_resolve_tunnel_hdrs()
1290 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); in bnxt_tc_resolve_tunnel_hdrs()
1291 ether_addr_copy(l2_info->smac, dst_dev->dev_addr); in bnxt_tc_resolve_tunnel_hdrs()
1300 return -EOPNOTSUPP; in bnxt_tc_resolve_tunnel_hdrs()
1308 struct ip_tunnel_key *decap_key = &flow->tun_key; in bnxt_tc_get_decap_handle()
1309 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_get_decap_handle()
1322 decap_key->tp_src = 0; in bnxt_tc_get_decap_handle()
1323 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, in bnxt_tc_get_decap_handle()
1324 &tc_info->decap_ht_params, in bnxt_tc_get_decap_handle()
1327 return -ENOMEM; in bnxt_tc_get_decap_handle()
1329 flow_node->decap_node = decap_node; in bnxt_tc_get_decap_handle()
1331 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) in bnxt_tc_get_decap_handle()
1336 * Find it's next-hop mac addrs in bnxt_tc_get_decap_handle()
1338 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; in bnxt_tc_get_decap_handle()
1339 tun_key.tp_dst = flow->tun_key.tp_dst; in bnxt_tc_get_decap_handle()
1344 decap_l2_info = &decap_node->l2_info; in bnxt_tc_get_decap_handle()
1346 ether_addr_copy(decap_l2_info->dmac, l2_info.smac); in bnxt_tc_get_decap_handle()
1348 decap_l2_info->num_vlans = l2_info.num_vlans; in bnxt_tc_get_decap_handle()
1349 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; in bnxt_tc_get_decap_handle()
1350 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci; in bnxt_tc_get_decap_handle()
1352 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; in bnxt_tc_get_decap_handle()
1367 &decap_node->tunnel_handle); in bnxt_tc_get_decap_handle()
1372 *decap_filter_handle = decap_node->tunnel_handle; in bnxt_tc_get_decap_handle()
1378 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, in bnxt_tc_get_decap_handle()
1379 &tc_info->decap_ht_params, in bnxt_tc_get_decap_handle()
1380 flow_node->decap_node); in bnxt_tc_get_decap_handle()
1387 __le32 encap_handle = encap_node->tunnel_handle; in bnxt_tc_put_encap_handle()
1388 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_put_encap_handle()
1391 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, in bnxt_tc_put_encap_handle()
1392 &tc_info->encap_ht_params, encap_node); in bnxt_tc_put_encap_handle()
1406 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; in bnxt_tc_get_encap_handle()
1407 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_get_encap_handle()
1415 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, in bnxt_tc_get_encap_handle()
1416 &tc_info->encap_ht_params, in bnxt_tc_get_encap_handle()
1419 return -ENOMEM; in bnxt_tc_get_encap_handle()
1421 flow_node->encap_node = encap_node; in bnxt_tc_get_encap_handle()
1423 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) in bnxt_tc_get_encap_handle()
1426 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info); in bnxt_tc_get_encap_handle()
1431 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info, in bnxt_tc_get_encap_handle()
1432 &encap_node->tunnel_handle); in bnxt_tc_get_encap_handle()
1437 *encap_handle = encap_node->tunnel_handle; in bnxt_tc_get_encap_handle()
1441 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, in bnxt_tc_get_encap_handle()
1442 &tc_info->encap_ht_params, encap_node); in bnxt_tc_get_encap_handle()
1450 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_put_tunnel_handle()
1452 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) in bnxt_tc_put_tunnel_handle()
1453 bnxt_tc_put_encap_handle(bp, flow_node->encap_node); in bnxt_tc_put_tunnel_handle()
1461 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_get_tunnel_handle()
1464 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) in bnxt_tc_get_tunnel_handle()
1473 struct bnxt_tc_info *tc_info = bp->tc_info; in __bnxt_tc_del_flow()
1476 /* send HWRM cmd to free the flow-id */ in __bnxt_tc_del_flow()
1479 mutex_lock(&tc_info->lock); in __bnxt_tc_del_flow()
1482 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); in __bnxt_tc_del_flow()
1487 mutex_unlock(&tc_info->lock); in __bnxt_tc_del_flow()
1489 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, in __bnxt_tc_del_flow()
1490 tc_info->flow_ht_params); in __bnxt_tc_del_flow()
1492 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", in __bnxt_tc_del_flow()
1502 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; in bnxt_tc_set_flow_dir()
1508 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) in bnxt_tc_set_src_fid()
1509 flow->src_fid = bp->pf.fw_fid; in bnxt_tc_set_src_fid()
1511 flow->src_fid = src_fid; in bnxt_tc_set_src_fid()
1518 * a) lookup l2-key
1520 * c) link l2-key with flow
1522 * a) unlinking l2-key from flow
1525 * The hash-tables are already protected by the rhashtable API.
1531 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_add_flow()
1540 rc = -ENOMEM; in bnxt_tc_add_flow()
1543 new_node->cookie = tc_flow_cmd->cookie; in bnxt_tc_add_flow()
1544 flow = &new_node->flow; in bnxt_tc_add_flow()
1551 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid); in bnxt_tc_add_flow()
1554 rc = -EOPNOTSUPP; in bnxt_tc_add_flow()
1560 old_node = rhashtable_lookup_fast(&tc_info->flow_table, in bnxt_tc_add_flow()
1561 &tc_flow_cmd->cookie, in bnxt_tc_add_flow()
1562 tc_info->flow_ht_params); in bnxt_tc_add_flow()
1569 mutex_lock(&tc_info->lock); in bnxt_tc_add_flow()
1585 flow->lastused = jiffies; in bnxt_tc_add_flow()
1586 spin_lock_init(&flow->stats_lock); in bnxt_tc_add_flow()
1587 /* add new flow to flow-table */ in bnxt_tc_add_flow()
1588 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, in bnxt_tc_add_flow()
1589 tc_info->flow_ht_params); in bnxt_tc_add_flow()
1593 mutex_unlock(&tc_info->lock); in bnxt_tc_add_flow()
1603 mutex_unlock(&tc_info->lock); in bnxt_tc_add_flow()
1607 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n", in bnxt_tc_add_flow()
1608 __func__, tc_flow_cmd->cookie, rc); in bnxt_tc_add_flow()
1615 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_del_flow()
1618 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, in bnxt_tc_del_flow()
1619 &tc_flow_cmd->cookie, in bnxt_tc_del_flow()
1620 tc_info->flow_ht_params); in bnxt_tc_del_flow()
1622 return -EINVAL; in bnxt_tc_del_flow()
1631 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_get_flow_stats()
1636 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, in bnxt_tc_get_flow_stats()
1637 &tc_flow_cmd->cookie, in bnxt_tc_get_flow_stats()
1638 tc_info->flow_ht_params); in bnxt_tc_get_flow_stats()
1640 return -1; in bnxt_tc_get_flow_stats()
1642 flow = &flow_node->flow; in bnxt_tc_get_flow_stats()
1643 curr_stats = &flow->stats; in bnxt_tc_get_flow_stats()
1644 prev_stats = &flow->prev_stats; in bnxt_tc_get_flow_stats()
1646 spin_lock(&flow->stats_lock); in bnxt_tc_get_flow_stats()
1647 stats.packets = curr_stats->packets - prev_stats->packets; in bnxt_tc_get_flow_stats()
1648 stats.bytes = curr_stats->bytes - prev_stats->bytes; in bnxt_tc_get_flow_stats()
1650 lastused = flow->lastused; in bnxt_tc_get_flow_stats()
1651 spin_unlock(&flow->stats_lock); in bnxt_tc_get_flow_stats()
1653 flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, in bnxt_tc_get_flow_stats()
1664 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { in bnxt_fill_cfa_stats_req()
1665 *flow_id = flow_node->flow_id; in bnxt_fill_cfa_stats_req()
1672 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) in bnxt_fill_cfa_stats_req()
1680 *flow_handle = flow_node->flow_handle; in bnxt_fill_cfa_stats_req()
1698 req_flow_handles = &req->flow_handle_0; in bnxt_hwrm_cfa_flow_stats_get()
1699 req_flow_ids = &req->flow_id_0; in bnxt_hwrm_cfa_flow_stats_get()
1701 req->num_flows = cpu_to_le16(num_flows); in bnxt_hwrm_cfa_flow_stats_get()
1715 resp_packets = &resp->packet_0; in bnxt_hwrm_cfa_flow_stats_get()
1716 resp_bytes = &resp->byte_0; in bnxt_hwrm_cfa_flow_stats_get()
1728 netdev_info(bp->dev, "error rc=%d\n", rc); in bnxt_hwrm_cfa_flow_stats_get()
1735 * is denoted by mask and will wrap-around beyond that width.
1749 * Handle possible wrap-around while updating the stat counters
1755 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask); in bnxt_flow_stats_accum()
1756 accumulate_val(&acc_stats->packets, hw_stats->packets, in bnxt_flow_stats_accum()
1757 tc_info->packets_mask); in bnxt_flow_stats_accum()
1764 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_flow_stats_batch_update()
1773 struct bnxt_tc_flow *flow = &flow_node->flow; in bnxt_tc_flow_stats_batch_update()
1775 spin_lock(&flow->stats_lock); in bnxt_tc_flow_stats_batch_update()
1776 bnxt_flow_stats_accum(tc_info, &flow->stats, in bnxt_tc_flow_stats_batch_update()
1778 if (flow->stats.packets != flow->prev_stats.packets) in bnxt_tc_flow_stats_batch_update()
1779 flow->lastused = jiffies; in bnxt_tc_flow_stats_batch_update()
1780 spin_unlock(&flow->stats_lock); in bnxt_tc_flow_stats_batch_update()
1791 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_flow_stats_batch_prep()
1792 struct rhashtable_iter *iter = &tc_info->iter; in bnxt_tc_flow_stats_batch_prep()
1803 if (PTR_ERR(flow_node) == -EAGAIN) { in bnxt_tc_flow_stats_batch_prep()
1825 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_tc_flow_stats_work()
1828 num_flows = atomic_read(&tc_info->flow_table.nelems); in bnxt_tc_flow_stats_work()
1832 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter); in bnxt_tc_flow_stats_work()
1835 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch, in bnxt_tc_flow_stats_work()
1838 if (rc == -EAGAIN) in bnxt_tc_flow_stats_work()
1847 tc_info->stats_batch); in bnxt_tc_flow_stats_work()
1850 rhashtable_walk_exit(&tc_info->iter); in bnxt_tc_flow_stats_work()
1856 switch (cls_flower->command) { in bnxt_tc_setup_flower()
1864 return -EOPNOTSUPP; in bnxt_tc_setup_flower()
1873 struct bnxt *bp = priv->bp; in bnxt_tc_setup_indr_block_cb()
1875 if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) in bnxt_tc_setup_indr_block_cb()
1876 return -EOPNOTSUPP; in bnxt_tc_setup_indr_block_cb()
1880 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower); in bnxt_tc_setup_indr_block_cb()
1882 return -EOPNOTSUPP; in bnxt_tc_setup_indr_block_cb()
1891 list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) in bnxt_tc_indr_block_cb_lookup()
1892 if (cb_priv->tunnel_netdev == netdev) in bnxt_tc_indr_block_cb_lookup()
1902 list_del(&priv->list); in bnxt_tc_setup_indr_rel()
1913 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in bnxt_tc_setup_indr_block()
1914 return -EOPNOTSUPP; in bnxt_tc_setup_indr_block()
1916 switch (f->command) { in bnxt_tc_setup_indr_block()
1920 return -ENOMEM; in bnxt_tc_setup_indr_block()
1922 cb_priv->tunnel_netdev = netdev; in bnxt_tc_setup_indr_block()
1923 cb_priv->bp = bp; in bnxt_tc_setup_indr_block()
1924 list_add(&cb_priv->list, &bp->tc_indr_block_list); in bnxt_tc_setup_indr_block()
1931 list_del(&cb_priv->list); in bnxt_tc_setup_indr_block()
1937 list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list); in bnxt_tc_setup_indr_block()
1942 return -ENOENT; in bnxt_tc_setup_indr_block()
1944 block_cb = flow_block_cb_lookup(f->block, in bnxt_tc_setup_indr_block()
1948 return -ENOENT; in bnxt_tc_setup_indr_block()
1951 list_del(&block_cb->driver_list); in bnxt_tc_setup_indr_block()
1954 return -EOPNOTSUPP; in bnxt_tc_setup_indr_block()
1970 return -EOPNOTSUPP; in bnxt_tc_setup_indr_cb()
1979 return -EOPNOTSUPP; in bnxt_tc_setup_indr_cb()
1985 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
2011 #define mask(width) ((u64)~0 >> (64 - (width)))
2018 if (bp->hwrm_spec_code < 0x10803) in bnxt_init_tc()
2023 return -ENOMEM; in bnxt_init_tc()
2024 mutex_init(&tc_info->lock); in bnxt_init_tc()
2027 tc_info->bytes_mask = mask(36); in bnxt_init_tc()
2028 tc_info->packets_mask = mask(28); in bnxt_init_tc()
2030 tc_info->flow_ht_params = bnxt_tc_flow_ht_params; in bnxt_init_tc()
2031 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); in bnxt_init_tc()
2035 tc_info->l2_ht_params = bnxt_tc_l2_ht_params; in bnxt_init_tc()
2036 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); in bnxt_init_tc()
2040 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params; in bnxt_init_tc()
2041 rc = rhashtable_init(&tc_info->decap_l2_table, in bnxt_init_tc()
2042 &tc_info->decap_l2_ht_params); in bnxt_init_tc()
2046 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params; in bnxt_init_tc()
2047 rc = rhashtable_init(&tc_info->decap_table, in bnxt_init_tc()
2048 &tc_info->decap_ht_params); in bnxt_init_tc()
2052 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params; in bnxt_init_tc()
2053 rc = rhashtable_init(&tc_info->encap_table, in bnxt_init_tc()
2054 &tc_info->encap_ht_params); in bnxt_init_tc()
2058 tc_info->enabled = true; in bnxt_init_tc()
2059 bp->dev->hw_features |= NETIF_F_HW_TC; in bnxt_init_tc()
2060 bp->dev->features |= NETIF_F_HW_TC; in bnxt_init_tc()
2061 bp->tc_info = tc_info; in bnxt_init_tc()
2064 INIT_LIST_HEAD(&bp->tc_indr_block_list); in bnxt_init_tc()
2070 rhashtable_destroy(&tc_info->encap_table); in bnxt_init_tc()
2073 rhashtable_destroy(&tc_info->decap_table); in bnxt_init_tc()
2075 rhashtable_destroy(&tc_info->decap_l2_table); in bnxt_init_tc()
2077 rhashtable_destroy(&tc_info->l2_table); in bnxt_init_tc()
2079 rhashtable_destroy(&tc_info->flow_table); in bnxt_init_tc()
2082 bp->tc_info = NULL; in bnxt_init_tc()
2088 struct bnxt_tc_info *tc_info = bp->tc_info; in bnxt_shutdown_tc()
2095 rhashtable_destroy(&tc_info->flow_table); in bnxt_shutdown_tc()
2096 rhashtable_destroy(&tc_info->l2_table); in bnxt_shutdown_tc()
2097 rhashtable_destroy(&tc_info->decap_l2_table); in bnxt_shutdown_tc()
2098 rhashtable_destroy(&tc_info->decap_table); in bnxt_shutdown_tc()
2099 rhashtable_destroy(&tc_info->encap_table); in bnxt_shutdown_tc()
2101 bp->tc_info = NULL; in bnxt_shutdown_tc()