xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c (revision 6439a0e64c355d2e375bd094f365d56ce81faba3)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
3 
4 #include "en_tc.h"
5 #include "en/tc_ct.h"
6 #include "en/tc_priv.h"
7 #include "en/tc/ct_fs.h"
8 #include "fs_core.h"
9 #include "steering/hws/fs_hws_pools.h"
10 #include "steering/hws/mlx5hws.h"
11 #include "steering/hws/table.h"
12 
13 struct mlx5_ct_fs_hmfs_matcher {
14 	struct mlx5hws_bwc_matcher *hws_bwc_matcher;
15 	refcount_t ref;
16 };
17 
18 /* We need {ipv4, ipv6} x {tcp, udp, gre}  matchers. */
19 #define NUM_MATCHERS (2 * 3)
20 
21 struct mlx5_ct_fs_hmfs {
22 	struct mlx5hws_table *ct_tbl;
23 	struct mlx5hws_table *ct_nat_tbl;
24 	struct mlx5_flow_table *ct_nat;
25 	struct mlx5hws_action *fwd_action;
26 	struct mlx5hws_action *last_action;
27 	struct mlx5hws_context *ctx;
28 	struct mutex lock;   /* Guards matchers */
29 	struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
30 	struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
31 };
32 
33 struct mlx5_ct_fs_hmfs_rule {
34 	struct mlx5_ct_fs_rule fs_rule;
35 	struct mlx5hws_bwc_rule *hws_bwc_rule;
36 	struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
37 	struct mlx5_fc *counter;
38 };
39 
get_matcher_idx(bool ipv4,bool tcp,bool gre)40 static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
41 {
42 	return ipv4 * 3 + tcp * 2 + gre;
43 }
44 
mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs * fs,struct mlx5_flow_table * ct,struct mlx5_flow_table * ct_nat,struct mlx5_flow_table * post_ct)45 static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
46 				struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
47 {
48 	u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
49 	struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
50 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
51 
52 	ct_tbl = ct->fs_hws_table.hws_table;
53 	ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
54 	post_ct_tbl = post_ct->fs_hws_table.hws_table;
55 	fs_hmfs->ct_nat = ct_nat;
56 
57 	if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
58 		netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
59 		return -EOPNOTSUPP;
60 	}
61 
62 	netdev_dbg(fs->netdev, "using hmfs steering");
63 
64 	fs_hmfs->ct_tbl = ct_tbl;
65 	fs_hmfs->ct_nat_tbl = ct_nat_tbl;
66 	fs_hmfs->ctx = ct_tbl->ctx;
67 	mutex_init(&fs_hmfs->lock);
68 
69 	fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
70 	if (!fs_hmfs->fwd_action) {
71 		netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
72 		return -EINVAL;
73 	}
74 	fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
75 	if (!fs_hmfs->last_action) {
76 		netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
77 		mlx5hws_action_destroy(fs_hmfs->fwd_action);
78 		return -EINVAL;
79 	}
80 
81 	return 0;
82 }
83 
mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs * fs)84 static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
85 {
86 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
87 
88 	mlx5hws_action_destroy(fs_hmfs->last_action);
89 	mlx5hws_action_destroy(fs_hmfs->fwd_action);
90 }
91 
92 static struct mlx5hws_bwc_matcher *
mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs * fs,struct mlx5hws_table * tbl,struct mlx5_flow_spec * spec,bool ipv4,bool tcp,bool gre)93 mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
94 			       struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
95 {
96 	u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
97 	struct mlx5hws_match_parameters mask = {
98 		.match_buf = spec->match_criteria,
99 		.match_sz = sizeof(spec->match_criteria),
100 	};
101 	u32 priority = get_matcher_idx(ipv4, tcp, gre);  /* Static priority based on params. */
102 	struct mlx5hws_bwc_matcher *hws_bwc_matcher;
103 
104 	hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
105 	if (!hws_bwc_matcher)
106 		return ERR_PTR(-EINVAL);
107 
108 	return hws_bwc_matcher;
109 }
110 
111 static struct mlx5_ct_fs_hmfs_matcher *
mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs * fs,struct mlx5_flow_spec * spec,bool nat,bool ipv4,bool tcp,bool gre)112 mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
113 			    bool nat, bool ipv4, bool tcp, bool gre)
114 {
115 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
116 	u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
117 	struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
118 	struct mlx5hws_bwc_matcher *hws_bwc_matcher;
119 	struct mlx5hws_table *tbl;
120 
121 	hmfs_matcher = nat ?
122 		(fs_hmfs->matchers_nat + matcher_idx) :
123 		(fs_hmfs->matchers + matcher_idx);
124 
125 	if (refcount_inc_not_zero(&hmfs_matcher->ref))
126 		return hmfs_matcher;
127 
128 	mutex_lock(&fs_hmfs->lock);
129 
130 	/* Retry with lock, as the matcher might be already created by another cpu. */
131 	if (refcount_inc_not_zero(&hmfs_matcher->ref))
132 		goto out_unlock;
133 
134 	tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
135 
136 	hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
137 	if (IS_ERR(hws_bwc_matcher)) {
138 		netdev_warn(fs->netdev,
139 			    "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
140 			    nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
141 
142 		hmfs_matcher = ERR_CAST(hws_bwc_matcher);
143 		goto out_unlock;
144 	}
145 
146 	hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
147 	refcount_set(&hmfs_matcher->ref, 1);
148 
149 out_unlock:
150 	mutex_unlock(&fs_hmfs->lock);
151 	return hmfs_matcher;
152 }
153 
154 static void
mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs * fs,struct mlx5_ct_fs_hmfs_matcher * hmfs_matcher)155 mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
156 {
157 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
158 
159 	if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
160 		return;
161 
162 	mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
163 	mutex_unlock(&fs_hmfs->lock);
164 }
165 
166 #define NUM_CT_HMFS_RULES 4
167 
mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs * fs_hmfs,struct mlx5_flow_attr * attr,struct mlx5hws_rule_action * rule_actions)168 static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
169 					      struct mlx5_flow_attr *attr,
170 					      struct mlx5hws_rule_action *rule_actions)
171 {
172 	struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
173 
174 	memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
175 	rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
176 	rule_actions[0].counter.offset =
177 		attr->counter->id - attr->counter->bulk->base_id;
178 	/* Modify header is special, it may require extra arguments outside the action itself. */
179 	if (mh_action->mh_data) {
180 		rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
181 		rule_actions[1].modify_header.data = mh_action->mh_data->data;
182 	}
183 	rule_actions[1].action = mh_action->hws_action;
184 	rule_actions[2].action = fs_hmfs->fwd_action;
185 	rule_actions[3].action = fs_hmfs->last_action;
186 }
187 
188 static struct mlx5_ct_fs_rule *
mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs * fs,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr,struct flow_rule * flow_rule)189 mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
190 			    struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
191 {
192 	struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
193 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
194 	struct mlx5hws_match_parameters match_params = {
195 		.match_buf = spec->match_value,
196 		.match_sz = ARRAY_SIZE(spec->match_value),
197 	};
198 	struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
199 	struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
200 	bool nat, tcp, ipv4, gre;
201 	int err;
202 
203 	if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
204 		return ERR_PTR(-EOPNOTSUPP);
205 
206 	hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
207 	if (!hmfs_rule)
208 		return ERR_PTR(-ENOMEM);
209 
210 	nat = (attr->ft == fs_hmfs->ct_nat);
211 	ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
212 	tcp = MLX5_GET(fte_match_param, spec->match_value,
213 		       outer_headers.ip_protocol) == IPPROTO_TCP;
214 	gre = MLX5_GET(fte_match_param, spec->match_value,
215 		       outer_headers.ip_protocol) == IPPROTO_GRE;
216 
217 	hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
218 	if (IS_ERR(hmfs_matcher)) {
219 		err = PTR_ERR(hmfs_matcher);
220 		goto err_free_rule;
221 	}
222 	hmfs_rule->hmfs_matcher = hmfs_matcher;
223 
224 	mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
225 	hmfs_rule->counter = attr->counter;
226 
227 	hmfs_rule->hws_bwc_rule =
228 		mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
229 					spec->flow_context.flow_source, rule_actions);
230 	if (!hmfs_rule->hws_bwc_rule) {
231 		err = -EINVAL;
232 		goto err_put_matcher;
233 	}
234 
235 	return &hmfs_rule->fs_rule;
236 
237 err_put_matcher:
238 	mlx5_fc_put_hws_action(hmfs_rule->counter);
239 	mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
240 err_free_rule:
241 	kfree(hmfs_rule);
242 	return ERR_PTR(err);
243 }
244 
mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs * fs,struct mlx5_ct_fs_rule * fs_rule)245 static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
246 {
247 	struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
248 							      struct mlx5_ct_fs_hmfs_rule,
249 							      fs_rule);
250 	mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
251 	mlx5_fc_put_hws_action(hmfs_rule->counter);
252 	mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
253 	kfree(hmfs_rule);
254 }
255 
mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs * fs,struct mlx5_ct_fs_rule * fs_rule,struct mlx5_flow_spec * spec,struct mlx5_flow_attr * attr)256 static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
257 					  struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
258 {
259 	struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
260 							      struct mlx5_ct_fs_hmfs_rule,
261 							      fs_rule);
262 	struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
263 	struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
264 	int err;
265 
266 	mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
267 
268 	err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
269 	if (err) {
270 		mlx5_fc_put_hws_action(attr->counter);
271 		return err;
272 	}
273 
274 	mlx5_fc_put_hws_action(hmfs_rule->counter);
275 	hmfs_rule->counter = attr->counter;
276 
277 	return 0;
278 }
279 
280 static struct mlx5_ct_fs_ops hmfs_ops = {
281 	.ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
282 	.ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
283 	.ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
284 
285 	.init = mlx5_ct_fs_hmfs_init,
286 	.destroy = mlx5_ct_fs_hmfs_destroy,
287 
288 	.priv_size = sizeof(struct mlx5_ct_fs_hmfs),
289 };
290 
mlx5_ct_fs_hmfs_ops_get(void)291 struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
292 {
293 	return &hmfs_ops;
294 }
295