1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/mlx5_core/mlx5_core.h>
33
34 #ifdef RATELIMIT
35
36 /* Finds an entry where we can register the given rate
37 * If the rate already exists, return the entry where it is registered,
38 * otherwise return the first available entry.
39 * If the table is full, return NULL
40 */
find_rl_entry(struct mlx5_rl_table * table,u32 rate,u16 burst)41 static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
42 u32 rate, u16 burst)
43 {
44 struct mlx5_rl_entry *ret_entry = NULL;
45 struct mlx5_rl_entry *entry;
46 u16 i;
47
48 for (i = 0; i < table->max_size; i++) {
49 entry = table->rl_entry + i;
50 if (entry->rate == rate && entry->burst == burst)
51 return entry;
52 if (ret_entry == NULL && entry->rate == 0)
53 ret_entry = entry;
54 }
55
56 return ret_entry;
57 }
58
mlx5_set_rate_limit_cmd(struct mlx5_core_dev * dev,u32 rate,u32 burst,u16 index)59 static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
60 u32 rate, u32 burst, u16 index)
61 {
62 u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {};
63 u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {};
64
65 MLX5_SET(set_rate_limit_in, in, opcode, MLX5_CMD_OP_SET_RATE_LIMIT);
66 MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
67 MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
68 MLX5_SET(set_rate_limit_in, in, burst_upper_bound, burst);
69 MLX5_SET(set_rate_limit_in, in, typical_packet_size, 0 /* use MTU */);
70
71 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
72 }
73
mlx5e_query_rate_limit_cmd(struct mlx5_core_dev * dev,u16 index,u32 * scq_handle)74 int mlx5e_query_rate_limit_cmd(struct mlx5_core_dev *dev,
75 u16 index, u32 *scq_handle)
76 {
77 int err;
78 u32 in[MLX5_ST_SZ_DW(query_pp_rate_limit_in)] = {};
79 u32 out[MLX5_ST_SZ_DW(query_pp_rate_limit_out)] = {};
80
81 MLX5_SET(query_pp_rate_limit_in, in, opcode, MLX5_CMD_OP_QUERY_RATE_LIMIT);
82 MLX5_SET(query_pp_rate_limit_in, in, rate_limit_index, index);
83
84 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
85 if (err)
86 return err;
87
88 *scq_handle = MLX5_GET(query_pp_rate_limit_out, out, pp_context.qos_handle);
89
90 return 0;
91 }
92
mlx5_rl_is_in_range(const struct mlx5_core_dev * dev,u32 rate,u32 burst)93 bool mlx5_rl_is_in_range(const struct mlx5_core_dev *dev, u32 rate, u32 burst)
94 {
95 const struct mlx5_rl_table *table = &dev->priv.rl_table;
96
97 return (rate <= table->max_rate && rate >= table->min_rate &&
98 burst <= 65535);
99 }
100 EXPORT_SYMBOL(mlx5_rl_is_in_range);
101
mlx5_rl_add_rate(struct mlx5_core_dev * dev,u32 rate,u32 burst,u16 * index)102 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst, u16 *index)
103 {
104 struct mlx5_rl_table *table = &dev->priv.rl_table;
105 struct mlx5_rl_entry *entry;
106 int err = 0;
107
108 mutex_lock(&table->rl_lock);
109
110 if (!rate || !mlx5_rl_is_in_range(dev, rate, burst)) {
111 mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
112 rate, table->min_rate, table->max_rate);
113 err = -ERANGE;
114 goto out;
115 }
116
117 entry = find_rl_entry(table, rate, burst);
118 if (!entry) {
119 mlx5_core_err(dev, "Max number of %u rates reached\n",
120 table->max_size);
121 err = -ENOSPC;
122 goto out;
123 }
124 if (entry->refcount == 0xFFFFFFFFU) {
125 /* out of refcounts */
126 err = -ENOMEM;
127 goto out;
128 } else if (entry->refcount != 0) {
129 /* rate already configured */
130 entry->refcount++;
131 } else {
132 /* new rate limit */
133 err = mlx5_set_rate_limit_cmd(dev, rate, burst, entry->index);
134 if (err) {
135 mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
136 rate, err);
137 goto out;
138 }
139 entry->rate = rate;
140 entry->burst = burst;
141 entry->refcount = 1;
142
143 if (MLX5_CAP_QOS(dev, qos_remap_pp)) {
144 err = mlx5e_query_rate_limit_cmd(dev, entry->index, &entry->qos_handle);
145 if (err) {
146 mlx5_core_err(dev, "Failed retrieving schedule queue handle for"
147 "SQ remap: rate: %u error:(%d)\n", rate, err);
148 entry->qos_handle = MLX5_INVALID_QUEUE_HANDLE;
149 }
150 } else
151 entry->qos_handle = MLX5_INVALID_QUEUE_HANDLE;
152 }
153 *index = entry->index;
154
155 out:
156 mutex_unlock(&table->rl_lock);
157 return err;
158 }
159 EXPORT_SYMBOL(mlx5_rl_add_rate);
160
mlx5_rl_remove_rate(struct mlx5_core_dev * dev,u32 rate,u32 burst)161 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate, u32 burst)
162 {
163 struct mlx5_rl_table *table = &dev->priv.rl_table;
164 struct mlx5_rl_entry *entry = NULL;
165
166 /* 0 is a reserved value for unlimited rate */
167 if (rate == 0)
168 return;
169
170 mutex_lock(&table->rl_lock);
171 entry = find_rl_entry(table, rate, burst);
172 if (!entry || !entry->refcount) {
173 mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
174 goto out;
175 }
176
177 entry->refcount--;
178 if (!entry->refcount) {
179 /* need to remove rate */
180 mlx5_set_rate_limit_cmd(dev, 0, 0, entry->index);
181 entry->rate = 0;
182 entry->burst = 0;
183 }
184
185 out:
186 mutex_unlock(&table->rl_lock);
187 }
188 EXPORT_SYMBOL(mlx5_rl_remove_rate);
189
mlx5_init_rl_table(struct mlx5_core_dev * dev)190 int mlx5_init_rl_table(struct mlx5_core_dev *dev)
191 {
192 struct mlx5_rl_table *table = &dev->priv.rl_table;
193 int i;
194
195 mutex_init(&table->rl_lock);
196 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
197 table->max_size = 0;
198 return 0;
199 }
200
201 /* First entry is reserved for unlimited rate */
202 table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
203 table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
204 table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
205
206 table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
207 GFP_KERNEL);
208 if (!table->rl_entry)
209 return -ENOMEM;
210
211 /* The index represents the index in HW rate limit table
212 * Index 0 is reserved for unlimited rate
213 */
214 for (i = 0; i < table->max_size; i++)
215 table->rl_entry[i].index = i + 1;
216
217 return 0;
218 }
219
mlx5_cleanup_rl_table(struct mlx5_core_dev * dev)220 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
221 {
222 struct mlx5_rl_table *table = &dev->priv.rl_table;
223 int i;
224
225 /* Clear all configured rates */
226 for (i = 0; i < table->max_size; i++)
227 if (table->rl_entry[i].rate)
228 mlx5_set_rate_limit_cmd(dev, 0, 0,
229 table->rl_entry[i].index);
230
231 kfree(dev->priv.rl_table.rl_entry);
232 }
233
234 #endif
235