1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #ifndef __MLX5_EN_TC_PRIV_H__
5 #define __MLX5_EN_TC_PRIV_H__
6
7 #include "en_tc.h"
8 #include "en/tc/act/act.h"
9
10 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
11
12 #define MLX5E_TC_MAX_SPLITS 1
13
14
15 enum {
16 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
17 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
18 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
19 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
20 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
21 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
22 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
23 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
24 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
25 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
26 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
27 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
28 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 7,
29 MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 8,
30 MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 9,
31 MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 10,
32 MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 11,
33 };
34
35 struct mlx5e_tc_flow_parse_attr {
36 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
37 struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
38 struct net_device *filter_dev;
39 struct mlx5_flow_spec spec;
40 struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
41 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
42 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
43 struct mlx5e_tc_act_parse_state parse_state;
44 };
45
46 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
47
48 /* Helper struct for accessing a struct containing list_head array.
49 * Containing struct
50 * |- Helper array
51 * [0] Helper item 0
52 * |- list_head item 0
53 * |- index (0)
54 * [1] Helper item 1
55 * |- list_head item 1
56 * |- index (1)
57 * To access the containing struct from one of the list_head items:
58 * 1. Get the helper item from the list_head item using
59 * helper item =
60 * container_of(list_head item, helper struct type, list_head field)
61 * 2. Get the contining struct from the helper item and its index in the array:
62 * containing struct =
63 * container_of(helper item, containing struct type, helper field[index])
64 */
65 struct encap_flow_item {
66 struct mlx5e_encap_entry *e; /* attached encap instance */
67 struct list_head list;
68 int index;
69 };
70
71 struct encap_route_flow_item {
72 struct mlx5e_route_entry *r; /* attached route instance */
73 int index;
74 };
75
76 struct mlx5e_tc_flow {
77 struct rhash_head node;
78 struct mlx5e_priv *priv;
79 u64 cookie;
80 unsigned long flags;
81 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
82
83 /* flows sharing the same reformat object - currently mpls decap */
84 struct list_head l3_to_l2_reformat;
85 struct mlx5e_decap_entry *decap_reformat;
86
87 /* flows sharing same route entry */
88 struct list_head decap_routes;
89 struct mlx5e_route_entry *decap_route;
90 struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
91
92 /* Flow can be associated with multiple encap IDs.
93 * The number of encaps is bounded by the number of supported
94 * destinations.
95 */
96 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
97 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
98 struct list_head hairpin; /* flows sharing the same hairpin */
99 struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
100 struct list_head unready; /* flows not ready to be offloaded (e.g
101 * due to missing route)
102 */
103 struct list_head peer_flows; /* flows on peer */
104 struct net_device *orig_dev; /* netdev adding flow first */
105 int tmp_entry_index;
106 struct list_head tmp_list; /* temporary flow list used by neigh update */
107 refcount_t refcnt;
108 struct rcu_head rcu_head;
109 struct completion init_done;
110 struct completion del_hw_done;
111 struct mlx5_flow_attr *attr;
112 struct mlx5_flow_attr *extra_split_attr;
113 struct list_head attrs;
114 u32 chain_mapping;
115 };
116
117 struct mlx5_flow_handle *
118 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
119 struct mlx5_flow_spec *spec,
120 struct mlx5_flow_attr *attr);
121
122 void
123 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
124 struct mlx5_flow_handle *rule,
125 struct mlx5_flow_attr *attr);
126
127 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
128
129 struct mlx5_flow_handle *
130 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
131 struct mlx5e_tc_flow *flow,
132 struct mlx5_flow_spec *spec,
133 struct mlx5_flow_attr *attr);
134
135 struct mlx5_flow_attr *
136 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
137
138 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
139 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
140
141 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
142 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
143 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
144 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
145 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
146
__flow_flag_set(struct mlx5e_tc_flow * flow,unsigned long flag)147 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
148 {
149 /* Complete all memory stores before setting bit. */
150 smp_mb__before_atomic();
151 set_bit(flag, &flow->flags);
152 }
153
154 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
155
__flow_flag_test_and_set(struct mlx5e_tc_flow * flow,unsigned long flag)156 static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
157 unsigned long flag)
158 {
159 /* test_and_set_bit() provides all necessary barriers */
160 return test_and_set_bit(flag, &flow->flags);
161 }
162
163 #define flow_flag_test_and_set(flow, flag) \
164 __flow_flag_test_and_set(flow, \
165 MLX5E_TC_FLOW_FLAG_##flag)
166
__flow_flag_clear(struct mlx5e_tc_flow * flow,unsigned long flag)167 static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
168 {
169 /* Complete all memory stores before clearing bit. */
170 smp_mb__before_atomic();
171 clear_bit(flag, &flow->flags);
172 }
173
174 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
175 MLX5E_TC_FLOW_FLAG_##flag)
176
__flow_flag_test(struct mlx5e_tc_flow * flow,unsigned long flag)177 static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
178 {
179 bool ret = test_bit(flag, &flow->flags);
180
181 /* Read fields of flow structure only after checking flags. */
182 smp_mb__after_atomic();
183 return ret;
184 }
185
186 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
187 MLX5E_TC_FLOW_FLAG_##flag)
188
189 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
190 struct mlx5e_tc_flow *flow);
191 struct mlx5_flow_handle *
192 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
193 struct mlx5e_tc_flow *flow,
194 struct mlx5_flow_spec *spec);
195
196 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
197 struct mlx5e_tc_flow *flow,
198 struct mlx5_flow_attr *attr);
199
200 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
201 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
202
203 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
204
205 struct mlx5e_tc_int_port_priv *
206 mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
207
208 struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
209
210 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
211 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
212
213 #endif /* __MLX5_EN_TC_PRIV_H__ */
214