1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
41 #include "eswitch.h"
42
mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 struct mlx5_flow_table *ft,
45 u32 underlay_qpn,
46 bool disconnect)
47 {
48 return 0;
49 }
50
mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 struct mlx5_flow_table *ft,
53 struct mlx5_flow_table_attr *ft_attr,
54 struct mlx5_flow_table *next_ft)
55 {
56 int max_fte = ft_attr->max_fte;
57
58 ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
59
60 return 0;
61 }
62
mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)63 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
64 struct mlx5_flow_table *ft)
65 {
66 return 0;
67 }
68
mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)69 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
70 struct mlx5_flow_table *ft,
71 struct mlx5_flow_table *next_ft)
72 {
73 return 0;
74 }
75
mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)76 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
77 struct mlx5_flow_table *ft,
78 u32 *in,
79 struct mlx5_flow_group *fg)
80 {
81 return 0;
82 }
83
mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)84 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
85 struct mlx5_flow_table *ft,
86 struct mlx5_flow_group *fg)
87 {
88 return 0;
89 }
90
mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)91 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
92 struct mlx5_flow_table *ft,
93 struct mlx5_flow_group *group,
94 struct fs_fte *fte)
95 {
96 return 0;
97 }
98
mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)99 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
100 struct mlx5_flow_table *ft,
101 struct mlx5_flow_group *group,
102 int modify_mask,
103 struct fs_fte *fte)
104 {
105 return -EOPNOTSUPP;
106 }
107
mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)108 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
109 struct mlx5_flow_table *ft,
110 struct fs_fte *fte)
111 {
112 return 0;
113 }
114
mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)115 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
116 struct mlx5_pkt_reformat_params *params,
117 enum mlx5_flow_namespace_type namespace,
118 struct mlx5_pkt_reformat *pkt_reformat)
119 {
120 return 0;
121 }
122
mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)123 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
124 struct mlx5_pkt_reformat *pkt_reformat)
125 {
126 }
127
mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)128 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
129 u8 namespace, u8 num_actions,
130 void *modify_actions,
131 struct mlx5_modify_hdr *modify_hdr)
132 {
133 return 0;
134 }
135
mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)136 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
137 struct mlx5_modify_hdr *modify_hdr)
138 {
139 }
140
mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns,u16 peer_vhca_id)141 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
142 struct mlx5_flow_root_namespace *peer_ns,
143 u16 peer_vhca_id)
144 {
145 return 0;
146 }
147
mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace * ns)148 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
149 {
150 return 0;
151 }
152
mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace * ns)153 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
154 {
155 return 0;
156 }
157
mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)158 static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
159 enum fs_flow_table_type ft_type)
160 {
161 return 0;
162 }
163
mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,bool ft_id_valid,u32 ft_id)164 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
165 struct mlx5_core_dev *slave,
166 bool ft_id_valid,
167 u32 ft_id)
168 {
169 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
170 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
171 struct mlx5_flow_root_namespace *root;
172 struct mlx5_flow_namespace *ns;
173
174 MLX5_SET(set_flow_table_root_in, in, opcode,
175 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
176 MLX5_SET(set_flow_table_root_in, in, table_type,
177 FS_FT_FDB);
178 if (ft_id_valid) {
179 MLX5_SET(set_flow_table_root_in, in,
180 table_eswitch_owner_vhca_id_valid, 1);
181 MLX5_SET(set_flow_table_root_in, in,
182 table_eswitch_owner_vhca_id,
183 MLX5_CAP_GEN(master, vhca_id));
184 MLX5_SET(set_flow_table_root_in, in, table_id,
185 ft_id);
186 } else {
187 ns = mlx5_get_flow_namespace(slave,
188 MLX5_FLOW_NAMESPACE_FDB);
189 root = find_root(&ns->node);
190 MLX5_SET(set_flow_table_root_in, in, table_id,
191 root->root_ft->id);
192 }
193
194 return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
195 }
196
197 static int
mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)198 mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
199 int definer_id)
200 {
201 return 0;
202 }
203
204 static int
mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)205 mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
206 u16 format_id, u32 *match_mask)
207 {
208 return 0;
209 }
210
mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)211 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
212 struct mlx5_flow_table *ft, u32 underlay_qpn,
213 bool disconnect)
214 {
215 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
216 struct mlx5_core_dev *dev = ns->dev;
217 int err;
218
219 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
220 underlay_qpn == 0 &&
221 (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
222 return 0;
223
224 if (ft->type == FS_FT_FDB &&
225 mlx5_lag_is_shared_fdb(dev) &&
226 !mlx5_lag_is_master(dev))
227 return 0;
228
229 MLX5_SET(set_flow_table_root_in, in, opcode,
230 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
231 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
232
233 if (disconnect)
234 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
235 else
236 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
237
238 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
239 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
240 MLX5_SET(set_flow_table_root_in, in, other_vport,
241 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
242 MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
243 ft->esw_owner_vhca_id);
244 MLX5_SET(set_flow_table_root_in, in, other_eswitch,
245 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
246
247 err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
248 if (!err &&
249 ft->type == FS_FT_FDB &&
250 mlx5_lag_is_shared_fdb(dev) &&
251 mlx5_lag_is_master(dev)) {
252 struct mlx5_core_dev *peer_dev;
253 int i, j;
254
255 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
256 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
257 (!disconnect) ? ft->id : 0);
258 if (err && !disconnect) {
259 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) {
260 if (j < i)
261 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1,
262 ns->root_ft->id);
263 else
264 break;
265 }
266
267 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
268 MLX5_SET(set_flow_table_root_in, in, table_id,
269 ns->root_ft->id);
270 mlx5_cmd_exec_in(dev, set_flow_table_root, in);
271 }
272 if (err)
273 break;
274 }
275
276 }
277
278 return err;
279 }
280
mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)281 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
282 struct mlx5_flow_table *ft,
283 struct mlx5_flow_table_attr *ft_attr,
284 struct mlx5_flow_table *next_ft)
285 {
286 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
287 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
288 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
289 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
290 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
291 struct mlx5_core_dev *dev = ns->dev;
292 unsigned int size;
293 int err;
294
295 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
296 if (!size)
297 return -ENOSPC;
298
299 MLX5_SET(create_flow_table_in, in, opcode,
300 MLX5_CMD_OP_CREATE_FLOW_TABLE);
301
302 MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
303 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
304 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
305 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
306 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
307 MLX5_SET(create_flow_table_in, in, other_vport,
308 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
309 MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
310 ft->esw_owner_vhca_id);
311 MLX5_SET(create_flow_table_in, in, other_eswitch,
312 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
313
314 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
315 en_decap);
316 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
317 en_encap);
318 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
319 term);
320
321 switch (ft->op_mod) {
322 case FS_FT_OP_MOD_NORMAL:
323 if (next_ft) {
324 MLX5_SET(create_flow_table_in, in,
325 flow_table_context.table_miss_action,
326 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
327 MLX5_SET(create_flow_table_in, in,
328 flow_table_context.table_miss_id, next_ft->id);
329 } else {
330 MLX5_SET(create_flow_table_in, in,
331 flow_table_context.table_miss_action,
332 ft->def_miss_action);
333 }
334 break;
335
336 case FS_FT_OP_MOD_LAG_DEMUX:
337 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
338 if (next_ft)
339 MLX5_SET(create_flow_table_in, in,
340 flow_table_context.lag_master_next_table_id,
341 next_ft->id);
342 break;
343 }
344
345 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
346 if (!err) {
347 ft->id = MLX5_GET(create_flow_table_out, out,
348 table_id);
349 ft->max_fte = size;
350 } else {
351 mlx5_ft_pool_put_sz(ns->dev, size);
352 }
353
354 return err;
355 }
356
mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)357 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
358 struct mlx5_flow_table *ft)
359 {
360 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
361 struct mlx5_core_dev *dev = ns->dev;
362 int err;
363
364 MLX5_SET(destroy_flow_table_in, in, opcode,
365 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
366 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
367 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
368 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
369 MLX5_SET(destroy_flow_table_in, in, other_vport,
370 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
371 MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
372 ft->esw_owner_vhca_id);
373 MLX5_SET(destroy_flow_table_in, in, other_eswitch,
374 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
375
376 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
377 if (!err)
378 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
379
380 return err;
381 }
382
mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)383 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
384 struct mlx5_flow_table *ft,
385 struct mlx5_flow_table *next_ft)
386 {
387 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
388 struct mlx5_core_dev *dev = ns->dev;
389
390 MLX5_SET(modify_flow_table_in, in, opcode,
391 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
392 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
393 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
394
395 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
396 MLX5_SET(modify_flow_table_in, in, modify_field_select,
397 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
398 if (next_ft) {
399 MLX5_SET(modify_flow_table_in, in,
400 flow_table_context.lag_master_next_table_id, next_ft->id);
401 } else {
402 MLX5_SET(modify_flow_table_in, in,
403 flow_table_context.lag_master_next_table_id, 0);
404 }
405 } else {
406 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
407 MLX5_SET(modify_flow_table_in, in, other_vport,
408 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
409 MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
410 ft->esw_owner_vhca_id);
411 MLX5_SET(modify_flow_table_in, in, other_eswitch,
412 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
413 MLX5_SET(modify_flow_table_in, in, modify_field_select,
414 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
415 if (next_ft) {
416 MLX5_SET(modify_flow_table_in, in,
417 flow_table_context.table_miss_action,
418 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
419 MLX5_SET(modify_flow_table_in, in,
420 flow_table_context.table_miss_id,
421 next_ft->id);
422 } else {
423 MLX5_SET(modify_flow_table_in, in,
424 flow_table_context.table_miss_action,
425 ft->def_miss_action);
426 }
427 }
428
429 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
430 }
431
mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)432 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
433 struct mlx5_flow_table *ft,
434 u32 *in,
435 struct mlx5_flow_group *fg)
436 {
437 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
438 struct mlx5_core_dev *dev = ns->dev;
439 int err;
440
441 MLX5_SET(create_flow_group_in, in, opcode,
442 MLX5_CMD_OP_CREATE_FLOW_GROUP);
443 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
444 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
445 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
446 MLX5_SET(create_flow_group_in, in, other_vport,
447 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
448 MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
449 ft->esw_owner_vhca_id);
450 MLX5_SET(create_flow_group_in, in, other_eswitch,
451 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
452 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
453 if (!err)
454 fg->id = MLX5_GET(create_flow_group_out, out,
455 group_id);
456 return err;
457 }
458
mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)459 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
460 struct mlx5_flow_table *ft,
461 struct mlx5_flow_group *fg)
462 {
463 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
464 struct mlx5_core_dev *dev = ns->dev;
465
466 MLX5_SET(destroy_flow_group_in, in, opcode,
467 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
468 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
469 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
470 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
471 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
472 MLX5_SET(destroy_flow_group_in, in, other_vport,
473 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
474 MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
475 ft->esw_owner_vhca_id);
476 MLX5_SET(destroy_flow_group_in, in, other_eswitch,
477 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
478 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
479 }
480
mlx5_set_extended_dest(struct mlx5_core_dev * dev,struct fs_fte * fte,bool * extended_dest)481 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
482 struct fs_fte *fte, bool *extended_dest)
483 {
484 int fw_log_max_fdb_encap_uplink =
485 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
486 int num_fwd_destinations = 0;
487 struct mlx5_flow_rule *dst;
488 int num_encap = 0;
489
490 *extended_dest = false;
491 if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
492 return 0;
493
494 list_for_each_entry(dst, &fte->node.children, node.list) {
495 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
496 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
497 continue;
498 if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
499 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
500 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
501 num_encap++;
502 num_fwd_destinations++;
503 }
504 if (num_fwd_destinations > 1 && num_encap > 0)
505 *extended_dest = true;
506
507 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
508 mlx5_core_warn(dev, "FW does not support extended destination");
509 return -EOPNOTSUPP;
510 }
511 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
512 mlx5_core_warn(dev, "FW does not support more than %d encaps",
513 1 << fw_log_max_fdb_encap_uplink);
514 return -EOPNOTSUPP;
515 }
516
517 return 0;
518 }
519
520 static void
mlx5_cmd_set_fte_flow_meter(struct fs_fte * fte,void * in_flow_context)521 mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
522 {
523 void *exe_aso_ctrl;
524 void *execute_aso;
525
526 execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
527 execute_aso[0]);
528 MLX5_SET(execute_aso, execute_aso, valid, 1);
529 MLX5_SET(execute_aso, execute_aso, aso_object_id,
530 fte->act_dests.action.exe_aso.object_id);
531
532 exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
533 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
534 fte->act_dests.action.exe_aso.return_reg_id);
535 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
536 fte->act_dests.action.exe_aso.type);
537 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
538 fte->act_dests.action.exe_aso.flow_meter.init_color);
539 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
540 fte->act_dests.action.exe_aso.flow_meter.meter_idx);
541 }
542
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)543 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
544 int opmod, int modify_mask,
545 struct mlx5_flow_table *ft,
546 unsigned group_id,
547 struct fs_fte *fte)
548 {
549 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
550 bool extended_dest = false;
551 struct mlx5_flow_rule *dst;
552 void *in_flow_context, *vlan;
553 void *in_match_value;
554 u32 reformat_id = 0;
555 unsigned int inlen;
556 int dst_cnt_size;
557 u32 *in, action;
558 void *in_dests;
559 int err;
560
561 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
562 return -EOPNOTSUPP;
563
564 if (!extended_dest)
565 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
566 else
567 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
568
569 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
570 in = kvzalloc(inlen, GFP_KERNEL);
571 if (!in)
572 return -ENOMEM;
573
574 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
575 MLX5_SET(set_fte_in, in, op_mod, opmod);
576 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
577 MLX5_SET(set_fte_in, in, table_type, ft->type);
578 MLX5_SET(set_fte_in, in, table_id, ft->id);
579 MLX5_SET(set_fte_in, in, flow_index, fte->index);
580 MLX5_SET(set_fte_in, in, ignore_flow_level,
581 !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
582
583 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
584 MLX5_SET(set_fte_in, in, other_vport,
585 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
586 MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
587 MLX5_SET(set_fte_in, in, other_eswitch,
588 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
589
590 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
591 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
592
593 MLX5_SET(flow_context, in_flow_context, flow_tag,
594 fte->act_dests.flow_context.flow_tag);
595 MLX5_SET(flow_context, in_flow_context, flow_source,
596 fte->act_dests.flow_context.flow_source);
597 MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
598 !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
599
600 MLX5_SET(flow_context, in_flow_context, extended_destination,
601 extended_dest);
602
603 action = fte->act_dests.action.action;
604 if (extended_dest)
605 action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
606
607 MLX5_SET(flow_context, in_flow_context, action, action);
608
609 if (!extended_dest && fte->act_dests.action.pkt_reformat) {
610 struct mlx5_pkt_reformat *pkt_reformat =
611 fte->act_dests.action.pkt_reformat;
612
613 err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
614 &reformat_id);
615 if (err) {
616 mlx5_core_err(dev,
617 "Unsupported pkt_reformat type (%d)\n",
618 pkt_reformat->reformat_type);
619 goto err_out;
620 }
621 }
622
623 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
624 reformat_id);
625
626 if (fte->act_dests.action.modify_hdr) {
627 if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
628 mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
629 err = -EOPNOTSUPP;
630 goto err_out;
631 }
632
633 MLX5_SET(flow_context, in_flow_context, modify_header_id,
634 fte->act_dests.action.modify_hdr->id);
635 }
636
637 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
638 fte->act_dests.action.crypto.type);
639 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
640 fte->act_dests.action.crypto.obj_id);
641
642 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
643
644 MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
645 MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
646 MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
647
648 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
649
650 MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
651 MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
652 MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
653
654 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
655 match_value);
656 memcpy(in_match_value, &fte->val, sizeof(fte->val));
657
658 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
659 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
660 int list_size = 0;
661
662 list_for_each_entry(dst, &fte->node.children, node.list) {
663 enum mlx5_flow_destination_type type = dst->dest_attr.type;
664 enum mlx5_ifc_flow_destination_type ifc_type;
665 unsigned int id;
666
667 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
668 continue;
669
670 switch (type) {
671 case MLX5_FLOW_DESTINATION_TYPE_NONE:
672 continue;
673 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
674 id = dst->dest_attr.ft_num;
675 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
676 break;
677 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
678 id = dst->dest_attr.ft->id;
679 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
680 break;
681 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
682 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
683 MLX5_SET(dest_format_struct, in_dests,
684 destination_eswitch_owner_vhca_id_valid,
685 !!(dst->dest_attr.vport.flags &
686 MLX5_FLOW_DEST_VPORT_VHCA_ID));
687 MLX5_SET(dest_format_struct, in_dests,
688 destination_eswitch_owner_vhca_id,
689 dst->dest_attr.vport.vhca_id);
690 if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
691 /* destination_id is reserved */
692 id = 0;
693 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
694 break;
695 }
696 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
697 id = dst->dest_attr.vport.num;
698 if (extended_dest &&
699 dst->dest_attr.vport.pkt_reformat) {
700 MLX5_SET(dest_format_struct, in_dests,
701 packet_reformat,
702 !!(dst->dest_attr.vport.flags &
703 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
704 MLX5_SET(extended_dest_format, in_dests,
705 packet_reformat_id,
706 dst->dest_attr.vport.pkt_reformat->id);
707 }
708 break;
709 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
710 id = dst->dest_attr.sampler_id;
711 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
712 break;
713 case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
714 MLX5_SET(dest_format_struct, in_dests,
715 destination_table_type, dst->dest_attr.ft->type);
716 id = dst->dest_attr.ft->id;
717 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
718 break;
719 default:
720 id = dst->dest_attr.tir_num;
721 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
722 }
723
724 MLX5_SET(dest_format_struct, in_dests, destination_type,
725 ifc_type);
726 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
727 in_dests += dst_cnt_size;
728 list_size++;
729 }
730
731 MLX5_SET(flow_context, in_flow_context, destination_list_size,
732 list_size);
733 }
734
735 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
736 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
737 log_max_flow_counter,
738 ft->type));
739 int list_size = 0;
740
741 list_for_each_entry(dst, &fte->node.children, node.list) {
742 if (dst->dest_attr.type !=
743 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
744 continue;
745
746 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
747 mlx5_fc_id(dst->dest_attr.counter));
748 in_dests += dst_cnt_size;
749 list_size++;
750 }
751 if (list_size > max_list_size) {
752 err = -EINVAL;
753 goto err_out;
754 }
755
756 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
757 list_size);
758 }
759
760 if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
761 if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
762 mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
763 } else {
764 err = -EOPNOTSUPP;
765 goto err_out;
766 }
767 }
768
769 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
770 err_out:
771 kvfree(in);
772 return err;
773 }
774
mlx5_cmd_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)775 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
776 struct mlx5_flow_table *ft,
777 struct mlx5_flow_group *group,
778 struct fs_fte *fte)
779 {
780 struct mlx5_core_dev *dev = ns->dev;
781 unsigned int group_id = group->id;
782
783 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
784 }
785
mlx5_cmd_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)786 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
787 struct mlx5_flow_table *ft,
788 struct mlx5_flow_group *fg,
789 int modify_mask,
790 struct fs_fte *fte)
791 {
792 int opmod;
793 struct mlx5_core_dev *dev = ns->dev;
794 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
795 flow_table_properties_nic_receive.
796 flow_modify_en);
797 if (!atomic_mod_cap)
798 return -EOPNOTSUPP;
799 opmod = 1;
800
801 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
802 }
803
mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)804 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
805 struct mlx5_flow_table *ft,
806 struct fs_fte *fte)
807 {
808 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
809 struct mlx5_core_dev *dev = ns->dev;
810
811 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
812 MLX5_SET(delete_fte_in, in, table_type, ft->type);
813 MLX5_SET(delete_fte_in, in, table_id, ft->id);
814 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
815 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
816 MLX5_SET(delete_fte_in, in, other_vport,
817 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
818 MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
819 ft->esw_owner_vhca_id);
820 MLX5_SET(delete_fte_in, in, other_eswitch,
821 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
822
823 return mlx5_cmd_exec_in(dev, delete_fte, in);
824 }
825
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,u32 * id)826 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
827 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
828 u32 *id)
829 {
830 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
831 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
832 int err;
833
834 MLX5_SET(alloc_flow_counter_in, in, opcode,
835 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
836 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
837
838 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
839 if (!err)
840 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
841 return err;
842 }
843
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)844 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
845 {
846 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
847 }
848
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)849 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
850 {
851 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
852
853 MLX5_SET(dealloc_flow_counter_in, in, opcode,
854 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
855 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
856 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
857 }
858
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)859 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
860 u64 *packets, u64 *bytes)
861 {
862 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
863 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
864 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
865 void *stats;
866 int err = 0;
867
868 MLX5_SET(query_flow_counter_in, in, opcode,
869 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
870 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
871 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
872 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
873 if (err)
874 return err;
875
876 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
877 *packets = MLX5_GET64(traffic_counter, stats, packets);
878 *bytes = MLX5_GET64(traffic_counter, stats, octets);
879 return 0;
880 }
881
mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)882 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
883 {
884 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
885 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
886 }
887
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,u32 base_id,int bulk_len,u32 * out)888 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
889 u32 *out)
890 {
891 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
892 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
893
894 MLX5_SET(query_flow_counter_in, in, opcode,
895 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
896 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
897 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
898 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
899 }
900
mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)901 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
902 struct mlx5_pkt_reformat_params *params,
903 enum mlx5_flow_namespace_type namespace,
904 struct mlx5_pkt_reformat *pkt_reformat)
905 {
906 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
907 struct mlx5_core_dev *dev = ns->dev;
908 void *packet_reformat_context_in;
909 int max_encap_size;
910 void *reformat;
911 int inlen;
912 int err;
913 u32 *in;
914
915 if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
916 namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
917 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
918 else
919 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
920
921 if (params->size > max_encap_size) {
922 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
923 params->size, max_encap_size);
924 return -EINVAL;
925 }
926
927 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
928 params->size, GFP_KERNEL);
929 if (!in)
930 return -ENOMEM;
931
932 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
933 in, packet_reformat_context);
934 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
935 packet_reformat_context_in,
936 reformat_data);
937 inlen = reformat - (void *)in + params->size;
938
939 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
940 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
941 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
942 reformat_data_size, params->size);
943 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
944 reformat_type, params->type);
945 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
946 reformat_param_0, params->param_0);
947 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
948 reformat_param_1, params->param_1);
949 if (params->data && params->size)
950 memcpy(reformat, params->data, params->size);
951
952 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
953
954 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
955 out, packet_reformat_id);
956 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
957
958 kfree(in);
959 return err;
960 }
961
mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)962 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
963 struct mlx5_pkt_reformat *pkt_reformat)
964 {
965 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
966 struct mlx5_core_dev *dev = ns->dev;
967
968 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
969 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
970 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
971 pkt_reformat->id);
972
973 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
974 }
975
mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)976 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
977 u8 namespace, u8 num_actions,
978 void *modify_actions,
979 struct mlx5_modify_hdr *modify_hdr)
980 {
981 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
982 int max_actions, actions_size, inlen, err;
983 struct mlx5_core_dev *dev = ns->dev;
984 void *actions_in;
985 u8 table_type;
986 u32 *in;
987
988 switch (namespace) {
989 case MLX5_FLOW_NAMESPACE_FDB:
990 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
991 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
992 table_type = FS_FT_FDB;
993 break;
994 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
995 case MLX5_FLOW_NAMESPACE_KERNEL:
996 case MLX5_FLOW_NAMESPACE_BYPASS:
997 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
998 table_type = FS_FT_NIC_RX;
999 break;
1000 case MLX5_FLOW_NAMESPACE_EGRESS:
1001 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
1002 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
1003 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
1004 table_type = FS_FT_NIC_TX;
1005 break;
1006 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
1007 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
1008 table_type = FS_FT_ESW_INGRESS_ACL;
1009 break;
1010 case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
1011 case MLX5_FLOW_NAMESPACE_RDMA_TX:
1012 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
1013 table_type = FS_FT_RDMA_TX;
1014 break;
1015 default:
1016 return -EOPNOTSUPP;
1017 }
1018
1019 if (num_actions > max_actions) {
1020 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
1021 num_actions, max_actions);
1022 return -EOPNOTSUPP;
1023 }
1024
1025 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
1026 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
1027
1028 in = kzalloc(inlen, GFP_KERNEL);
1029 if (!in)
1030 return -ENOMEM;
1031
1032 MLX5_SET(alloc_modify_header_context_in, in, opcode,
1033 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
1034 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
1035 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
1036
1037 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
1038 memcpy(actions_in, modify_actions, actions_size);
1039
1040 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
1041
1042 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
1043 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
1044 kfree(in);
1045 return err;
1046 }
1047
mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)1048 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
1049 struct mlx5_modify_hdr *modify_hdr)
1050 {
1051 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
1052 struct mlx5_core_dev *dev = ns->dev;
1053
1054 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
1055 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1056 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
1057 modify_hdr->id);
1058
1059 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
1060 }
1061
mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)1062 static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
1063 int definer_id)
1064 {
1065 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
1066 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1067
1068 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
1069 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1070 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
1071 MLX5_OBJ_TYPE_MATCH_DEFINER);
1072 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
1073
1074 return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
1075 }
1076
mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1077 static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
1078 u16 format_id, u32 *match_mask)
1079 {
1080 u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
1081 u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
1082 struct mlx5_core_dev *dev = ns->dev;
1083 void *ptr;
1084 int err;
1085
1086 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
1087 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1088 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
1089 MLX5_OBJ_TYPE_MATCH_DEFINER);
1090
1091 ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
1092 MLX5_SET(match_definer, ptr, format_id, format_id);
1093
1094 ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
1095 memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
1096
1097 err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
1098 return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1099 }
1100
mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1101 static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
1102 enum fs_flow_table_type ft_type)
1103 {
1104 return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
1105 }
1106
1107 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
1108 .create_flow_table = mlx5_cmd_create_flow_table,
1109 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
1110 .modify_flow_table = mlx5_cmd_modify_flow_table,
1111 .create_flow_group = mlx5_cmd_create_flow_group,
1112 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
1113 .create_fte = mlx5_cmd_create_fte,
1114 .update_fte = mlx5_cmd_update_fte,
1115 .delete_fte = mlx5_cmd_delete_fte,
1116 .update_root_ft = mlx5_cmd_update_root_ft,
1117 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
1118 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
1119 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
1120 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
1121 .create_match_definer = mlx5_cmd_create_match_definer,
1122 .destroy_match_definer = mlx5_cmd_destroy_match_definer,
1123 .set_peer = mlx5_cmd_stub_set_peer,
1124 .create_ns = mlx5_cmd_stub_create_ns,
1125 .destroy_ns = mlx5_cmd_stub_destroy_ns,
1126 .get_capabilities = mlx5_cmd_get_capabilities,
1127 };
1128
1129 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
1130 .create_flow_table = mlx5_cmd_stub_create_flow_table,
1131 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
1132 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
1133 .create_flow_group = mlx5_cmd_stub_create_flow_group,
1134 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
1135 .create_fte = mlx5_cmd_stub_create_fte,
1136 .update_fte = mlx5_cmd_stub_update_fte,
1137 .delete_fte = mlx5_cmd_stub_delete_fte,
1138 .update_root_ft = mlx5_cmd_stub_update_root_ft,
1139 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
1140 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
1141 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
1142 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
1143 .create_match_definer = mlx5_cmd_stub_create_match_definer,
1144 .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
1145 .set_peer = mlx5_cmd_stub_set_peer,
1146 .create_ns = mlx5_cmd_stub_create_ns,
1147 .destroy_ns = mlx5_cmd_stub_destroy_ns,
1148 .get_capabilities = mlx5_cmd_stub_get_capabilities,
1149 };
1150
mlx5_fs_cmd_get_fw_cmds(void)1151 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
1152 {
1153 return &mlx5_flow_cmds;
1154 }
1155
mlx5_fs_cmd_get_stub_cmds(void)1156 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
1157 {
1158 return &mlx5_flow_cmd_stubs;
1159 }
1160
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)1161 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
1162 {
1163 switch (type) {
1164 case FS_FT_NIC_RX:
1165 case FS_FT_ESW_EGRESS_ACL:
1166 case FS_FT_ESW_INGRESS_ACL:
1167 case FS_FT_FDB:
1168 case FS_FT_SNIFFER_RX:
1169 case FS_FT_SNIFFER_TX:
1170 case FS_FT_NIC_TX:
1171 case FS_FT_RDMA_RX:
1172 case FS_FT_RDMA_TX:
1173 case FS_FT_PORT_SEL:
1174 case FS_FT_RDMA_TRANSPORT_RX:
1175 case FS_FT_RDMA_TRANSPORT_TX:
1176 return mlx5_fs_cmd_get_fw_cmds();
1177 default:
1178 return mlx5_fs_cmd_get_stub_cmds();
1179 }
1180 }
1181
mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev * dev,u8 silent_mode)1182 int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode)
1183 {
1184 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
1185
1186 if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode))
1187 return -EOPNOTSUPP;
1188
1189 MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
1190 MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1);
1191 MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode);
1192
1193 return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
1194 }
1195
mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev * dev,u32 ft_id,bool disconnect)1196 int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect)
1197 {
1198 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
1199 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
1200
1201 if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
1202 return -EOPNOTSUPP;
1203
1204 MLX5_SET(set_flow_table_root_in, in, opcode,
1205 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
1206 MLX5_SET(set_flow_table_root_in, in, table_type,
1207 FS_FT_NIC_TX);
1208 if (disconnect)
1209 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
1210 else
1211 MLX5_SET(set_flow_table_root_in, in, table_id, ft_id);
1212
1213 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
1214 }
1215