1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2024 NVIDIA Corporation & Affiliates */ 3 4 #ifndef MLX5HWS_PAT_ARG_H_ 5 #define MLX5HWS_PAT_ARG_H_ 6 7 /* Modify-header arg pool */ 8 enum mlx5hws_arg_chunk_size { 9 MLX5HWS_ARG_CHUNK_SIZE_1, 10 /* Keep MIN updated when changing */ 11 MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1, 12 MLX5HWS_ARG_CHUNK_SIZE_2, 13 MLX5HWS_ARG_CHUNK_SIZE_3, 14 MLX5HWS_ARG_CHUNK_SIZE_4, 15 MLX5HWS_ARG_CHUNK_SIZE_MAX, 16 }; 17 18 enum { 19 MLX5HWS_MODIFY_ACTION_SIZE = 8, 20 MLX5HWS_ARG_DATA_SIZE = 64, 21 }; 22 23 struct mlx5hws_pattern_cache { 24 struct mutex lock; /* Protect pattern list */ 25 struct list_head ptrn_list; 26 }; 27 28 struct mlx5hws_pattern_cache_item { 29 struct { 30 u32 pattern_id; 31 u8 *data; 32 u16 num_of_actions; 33 } mh_data; 34 u32 refcount; 35 struct list_head ptrn_list_node; 36 }; 37 38 enum mlx5hws_arg_chunk_size 39 mlx5hws_arg_get_arg_log_size(u16 num_of_actions); 40 41 u32 mlx5hws_arg_get_arg_size(u16 num_of_actions); 42 43 enum mlx5hws_arg_chunk_size 44 mlx5hws_arg_data_size_to_arg_log_size(u16 data_size); 45 46 u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size); 47 48 int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache); 49 50 void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache); 51 52 bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz); 53 54 int mlx5hws_arg_create(struct mlx5hws_context *ctx, 55 u8 *data, 56 size_t data_sz, 57 u32 log_bulk_sz, 58 bool write_data, 59 u32 *arg_id); 60 61 void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id); 62 63 int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx, 64 __be64 *data, 65 u8 num_of_actions, 66 u32 log_bulk_sz, 67 bool write_data, 68 u32 *modify_hdr_arg_id); 69 70 int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx, 71 __be64 *pattern, 72 size_t pattern_sz, 73 u32 *ptrn_id); 74 75 void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, 76 u32 ptrn_id); 77 78 bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx, 79 u32 arg_size); 80 81 bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions); 82 83 void mlx5hws_arg_write(struct mlx5hws_send_engine *queue, 84 void *comp_data, 85 u32 arg_idx, 86 u8 *arg_data, 87 size_t data_size); 88 89 void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue, 90 u32 arg_idx, 91 u8 *arg_data, 92 u16 num_of_actions); 93 94 int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, 95 u32 arg_idx, 96 u8 *arg_data, 97 size_t data_size); 98 99 void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions, 100 size_t *new_size, u32 *nope_location, __be64 *new_pat); 101 #endif /* MLX5HWS_PAT_ARG_H_ */ 102