1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/math64.h>
8 #include <linux/vmalloc.h>
9 #include <net/pkt_cls.h>
10 #include <net/pkt_sched.h>
11
12 #include "cmsg.h"
13 #include "main.h"
14 #include "../nfp_port.h"
15
16 #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
17 #define NFP_FL_QOS_PPS BIT(15)
18 #define NFP_FL_QOS_METER BIT(10)
19
20 struct nfp_police_cfg_head {
21 __be32 flags_opts;
22 union {
23 __be32 meter_id;
24 __be32 port;
25 };
26 };
27
28 enum NFP_FL_QOS_TYPES {
29 NFP_FL_QOS_TYPE_BPS,
30 NFP_FL_QOS_TYPE_PPS,
31 NFP_FL_QOS_TYPE_MAX,
32 };
33
34 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
35 * See RFC 2698 for more details.
36 * ----------------------------------------------------------------
37 * 3 2 1
38 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
39 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
40 * | Reserved |p| Reserved |
41 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42 * | Port Ingress |
43 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
44 * | Token Bucket Peak |
45 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
46 * | Token Bucket Committed |
47 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
48 * | Peak Burst Size |
49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
50 * | Committed Burst Size |
51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
52 * | Peak Information Rate |
53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
54 * | Committed Information Rate |
55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56 * Word[0](FLag options):
57 * [15] p(pps) 1 for pps, 0 for bps
58 *
59 * Meter control message
60 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
61 * +-------------------------------+-+---+-----+-+---------+-+---+-+
62 * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R|
63 * +-------------------------------+-+---+-----+-+---------+-+---+-+
64 * | meter ID |
65 * +-------------------------------+-------------------------------+
66 *
67 */
68 struct nfp_police_config {
69 struct nfp_police_cfg_head head;
70 __be32 bkt_tkn_p;
71 __be32 bkt_tkn_c;
72 __be32 pbs;
73 __be32 cbs;
74 __be32 pir;
75 __be32 cir;
76 };
77
78 struct nfp_police_stats_reply {
79 struct nfp_police_cfg_head head;
80 __be64 pass_bytes;
81 __be64 pass_pkts;
82 __be64 drop_bytes;
83 __be64 drop_pkts;
84 };
85
nfp_flower_offload_one_police(struct nfp_app * app,bool ingress,bool pps,u32 id,u32 rate,u32 burst)86 int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
87 bool pps, u32 id, u32 rate, u32 burst)
88 {
89 struct nfp_police_config *config;
90 struct sk_buff *skb;
91
92 skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
93 NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
94 if (!skb)
95 return -ENOMEM;
96
97 config = nfp_flower_cmsg_get_data(skb);
98 memset(config, 0, sizeof(struct nfp_police_config));
99 if (pps)
100 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
101 if (!ingress)
102 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
103
104 if (ingress)
105 config->head.port = cpu_to_be32(id);
106 else
107 config->head.meter_id = cpu_to_be32(id);
108
109 config->bkt_tkn_p = cpu_to_be32(burst);
110 config->bkt_tkn_c = cpu_to_be32(burst);
111 config->pbs = cpu_to_be32(burst);
112 config->cbs = cpu_to_be32(burst);
113 config->pir = cpu_to_be32(rate);
114 config->cir = cpu_to_be32(rate);
115 nfp_ctrl_tx(app->ctrl, skb);
116
117 return 0;
118 }
119
nfp_policer_validate(const struct flow_action * action,const struct flow_action_entry * act,struct netlink_ext_ack * extack,bool ingress)120 static int nfp_policer_validate(const struct flow_action *action,
121 const struct flow_action_entry *act,
122 struct netlink_ext_ack *extack,
123 bool ingress)
124 {
125 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
126 NL_SET_ERR_MSG_MOD(extack,
127 "Offload not supported when exceed action is not drop");
128 return -EOPNOTSUPP;
129 }
130
131 if (ingress) {
132 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
133 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
134 NL_SET_ERR_MSG_MOD(extack,
135 "Offload not supported when conform action is not continue or ok");
136 return -EOPNOTSUPP;
137 }
138 } else {
139 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
140 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
141 NL_SET_ERR_MSG_MOD(extack,
142 "Offload not supported when conform action is not pipe or ok");
143 return -EOPNOTSUPP;
144 }
145 }
146
147 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
148 !flow_action_is_last_entry(action, act)) {
149 NL_SET_ERR_MSG_MOD(extack,
150 "Offload not supported when conform action is ok, but action is not last");
151 return -EOPNOTSUPP;
152 }
153
154 if (act->police.peakrate_bytes_ps ||
155 act->police.avrate || act->police.overhead) {
156 NL_SET_ERR_MSG_MOD(extack,
157 "Offload not supported when peakrate/avrate/overhead is configured");
158 return -EOPNOTSUPP;
159 }
160
161 return 0;
162 }
163
164 static int
nfp_flower_install_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)165 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
166 struct tc_cls_matchall_offload *flow,
167 struct netlink_ext_ack *extack)
168 {
169 struct flow_action_entry *paction = &flow->rule->action.entries[0];
170 u32 action_num = flow->rule->action.num_entries;
171 struct nfp_flower_priv *fl_priv = app->priv;
172 struct flow_action_entry *action = NULL;
173 struct nfp_flower_repr_priv *repr_priv;
174 u32 netdev_port_id, i;
175 struct nfp_repr *repr;
176 bool pps_support;
177 u32 bps_num = 0;
178 u32 pps_num = 0;
179 u32 burst;
180 bool pps;
181 u64 rate;
182 int err;
183
184 if (!nfp_netdev_is_nfp_repr(netdev)) {
185 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
186 return -EOPNOTSUPP;
187 }
188 repr = netdev_priv(netdev);
189 repr_priv = repr->app_priv;
190 netdev_port_id = nfp_repr_get_port_id(netdev);
191 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
192
193 if (repr_priv->block_shared) {
194 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
195 return -EOPNOTSUPP;
196 }
197
198 if (repr->port->type != NFP_PORT_VF_PORT) {
199 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
200 return -EOPNOTSUPP;
201 }
202
203 if (pps_support) {
204 if (action_num > 2 || action_num == 0) {
205 NL_SET_ERR_MSG_MOD(extack,
206 "unsupported offload: qos rate limit offload only support action number 1 or 2");
207 return -EOPNOTSUPP;
208 }
209 } else {
210 if (!flow_offload_has_one_action(&flow->rule->action)) {
211 NL_SET_ERR_MSG_MOD(extack,
212 "unsupported offload: qos rate limit offload requires a single action");
213 return -EOPNOTSUPP;
214 }
215 }
216
217 if (flow->common.prio != 1) {
218 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
219 return -EOPNOTSUPP;
220 }
221
222 for (i = 0 ; i < action_num; i++) {
223 action = paction + i;
224 if (action->id != FLOW_ACTION_POLICE) {
225 NL_SET_ERR_MSG_MOD(extack,
226 "unsupported offload: qos rate limit offload requires police action");
227 return -EOPNOTSUPP;
228 }
229
230 err = nfp_policer_validate(&flow->rule->action, action, extack, true);
231 if (err)
232 return err;
233
234 if (action->police.rate_bytes_ps > 0) {
235 if (bps_num++) {
236 NL_SET_ERR_MSG_MOD(extack,
237 "unsupported offload: qos rate limit offload only support one BPS action");
238 return -EOPNOTSUPP;
239 }
240 }
241 if (action->police.rate_pkt_ps > 0) {
242 if (!pps_support) {
243 NL_SET_ERR_MSG_MOD(extack,
244 "unsupported offload: FW does not support PPS action");
245 return -EOPNOTSUPP;
246 }
247 if (pps_num++) {
248 NL_SET_ERR_MSG_MOD(extack,
249 "unsupported offload: qos rate limit offload only support one PPS action");
250 return -EOPNOTSUPP;
251 }
252 }
253 }
254
255 for (i = 0 ; i < action_num; i++) {
256 /* Set QoS data for this interface */
257 action = paction + i;
258 if (action->police.rate_bytes_ps > 0) {
259 rate = action->police.rate_bytes_ps;
260 burst = action->police.burst;
261 } else if (action->police.rate_pkt_ps > 0) {
262 rate = action->police.rate_pkt_ps;
263 burst = action->police.burst_pkt;
264 } else {
265 NL_SET_ERR_MSG_MOD(extack,
266 "unsupported offload: qos rate limit is not BPS or PPS");
267 continue;
268 }
269
270 if (rate != 0) {
271 pps = false;
272 if (action->police.rate_pkt_ps > 0)
273 pps = true;
274 nfp_flower_offload_one_police(repr->app, true,
275 pps, netdev_port_id,
276 rate, burst);
277 }
278 }
279 repr_priv->qos_table.netdev_port_id = netdev_port_id;
280 fl_priv->qos_rate_limiters++;
281 if (fl_priv->qos_rate_limiters == 1)
282 schedule_delayed_work(&fl_priv->qos_stats_work,
283 NFP_FL_QOS_UPDATE);
284
285 return 0;
286 }
287
288 static int
nfp_flower_remove_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)289 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
290 struct tc_cls_matchall_offload *flow,
291 struct netlink_ext_ack *extack)
292 {
293 struct nfp_flower_priv *fl_priv = app->priv;
294 struct nfp_flower_repr_priv *repr_priv;
295 struct nfp_police_config *config;
296 u32 netdev_port_id, i;
297 struct nfp_repr *repr;
298 struct sk_buff *skb;
299 bool pps_support;
300
301 if (!nfp_netdev_is_nfp_repr(netdev)) {
302 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
303 return -EOPNOTSUPP;
304 }
305 repr = netdev_priv(netdev);
306
307 netdev_port_id = nfp_repr_get_port_id(netdev);
308 repr_priv = repr->app_priv;
309 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
310
311 if (!repr_priv->qos_table.netdev_port_id) {
312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
313 return -EOPNOTSUPP;
314 }
315
316 memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
317 fl_priv->qos_rate_limiters--;
318 if (!fl_priv->qos_rate_limiters)
319 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
320 for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) {
321 if (i == NFP_FL_QOS_TYPE_PPS && !pps_support)
322 break;
323 /* 0:bps 1:pps
324 * Clear QoS data for this interface.
325 * There is no need to check if a specific QOS_TYPE was
326 * configured as the firmware handles clearing a QoS entry
327 * safely, even if it wasn't explicitly added.
328 */
329 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
330 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
331 if (!skb)
332 return -ENOMEM;
333
334 config = nfp_flower_cmsg_get_data(skb);
335 memset(config, 0, sizeof(struct nfp_police_config));
336 if (i == NFP_FL_QOS_TYPE_PPS)
337 config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
338 config->head.port = cpu_to_be32(netdev_port_id);
339 nfp_ctrl_tx(repr->app->ctrl, skb);
340 }
341
342 return 0;
343 }
344
nfp_flower_stats_rlim_reply(struct nfp_app * app,struct sk_buff * skb)345 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
346 {
347 struct nfp_flower_priv *fl_priv = app->priv;
348 struct nfp_flower_repr_priv *repr_priv;
349 struct nfp_police_stats_reply *msg;
350 struct nfp_stat_pair *curr_stats;
351 struct nfp_stat_pair *prev_stats;
352 struct net_device *netdev;
353 struct nfp_repr *repr;
354 u32 netdev_port_id;
355
356 msg = nfp_flower_cmsg_get_data(skb);
357 if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
358 return nfp_act_stats_reply(app, msg);
359
360 netdev_port_id = be32_to_cpu(msg->head.port);
361 rcu_read_lock();
362 netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
363 if (!netdev)
364 goto exit_unlock_rcu;
365
366 repr = netdev_priv(netdev);
367 repr_priv = repr->app_priv;
368 curr_stats = &repr_priv->qos_table.curr_stats;
369 prev_stats = &repr_priv->qos_table.prev_stats;
370
371 spin_lock_bh(&fl_priv->qos_stats_lock);
372 curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
373 be64_to_cpu(msg->drop_pkts);
374 curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
375 be64_to_cpu(msg->drop_bytes);
376
377 if (!repr_priv->qos_table.last_update) {
378 prev_stats->pkts = curr_stats->pkts;
379 prev_stats->bytes = curr_stats->bytes;
380 }
381
382 repr_priv->qos_table.last_update = jiffies;
383 spin_unlock_bh(&fl_priv->qos_stats_lock);
384
385 exit_unlock_rcu:
386 rcu_read_unlock();
387 }
388
389 static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv * fl_priv,u32 id,bool ingress)390 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
391 u32 id, bool ingress)
392 {
393 struct nfp_police_cfg_head *head;
394 struct sk_buff *skb;
395
396 skb = nfp_flower_cmsg_alloc(fl_priv->app,
397 sizeof(struct nfp_police_cfg_head),
398 NFP_FLOWER_CMSG_TYPE_QOS_STATS,
399 GFP_ATOMIC);
400 if (!skb)
401 return;
402 head = nfp_flower_cmsg_get_data(skb);
403
404 memset(head, 0, sizeof(struct nfp_police_cfg_head));
405 if (ingress) {
406 head->port = cpu_to_be32(id);
407 } else {
408 head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
409 head->meter_id = cpu_to_be32(id);
410 }
411
412 nfp_ctrl_tx(fl_priv->app->ctrl, skb);
413 }
414
415 static void
nfp_flower_stats_rlim_request_all(struct nfp_flower_priv * fl_priv)416 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
417 {
418 struct nfp_reprs *repr_set;
419 int i;
420
421 rcu_read_lock();
422 repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
423 if (!repr_set)
424 goto exit_unlock_rcu;
425
426 for (i = 0; i < repr_set->num_reprs; i++) {
427 struct net_device *netdev;
428
429 netdev = rcu_dereference(repr_set->reprs[i]);
430 if (netdev) {
431 struct nfp_repr *priv = netdev_priv(netdev);
432 struct nfp_flower_repr_priv *repr_priv;
433 u32 netdev_port_id;
434
435 repr_priv = priv->app_priv;
436 netdev_port_id = repr_priv->qos_table.netdev_port_id;
437 if (!netdev_port_id)
438 continue;
439
440 nfp_flower_stats_rlim_request(fl_priv,
441 netdev_port_id, true);
442 }
443 }
444
445 exit_unlock_rcu:
446 rcu_read_unlock();
447 }
448
update_stats_cache(struct work_struct * work)449 static void update_stats_cache(struct work_struct *work)
450 {
451 struct delayed_work *delayed_work;
452 struct nfp_flower_priv *fl_priv;
453
454 delayed_work = to_delayed_work(work);
455 fl_priv = container_of(delayed_work, struct nfp_flower_priv,
456 qos_stats_work);
457
458 nfp_flower_stats_rlim_request_all(fl_priv);
459 nfp_flower_stats_meter_request_all(fl_priv);
460
461 schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
462 }
463
464 static int
nfp_flower_stats_rate_limiter(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow,struct netlink_ext_ack * extack)465 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
466 struct tc_cls_matchall_offload *flow,
467 struct netlink_ext_ack *extack)
468 {
469 struct nfp_flower_priv *fl_priv = app->priv;
470 struct nfp_flower_repr_priv *repr_priv;
471 struct nfp_stat_pair *curr_stats;
472 struct nfp_stat_pair *prev_stats;
473 u64 diff_bytes, diff_pkts;
474 struct nfp_repr *repr;
475
476 if (!nfp_netdev_is_nfp_repr(netdev)) {
477 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
478 return -EOPNOTSUPP;
479 }
480 repr = netdev_priv(netdev);
481
482 repr_priv = repr->app_priv;
483 if (!repr_priv->qos_table.netdev_port_id) {
484 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
485 return -EOPNOTSUPP;
486 }
487
488 spin_lock_bh(&fl_priv->qos_stats_lock);
489 curr_stats = &repr_priv->qos_table.curr_stats;
490 prev_stats = &repr_priv->qos_table.prev_stats;
491 diff_pkts = curr_stats->pkts - prev_stats->pkts;
492 diff_bytes = curr_stats->bytes - prev_stats->bytes;
493 prev_stats->pkts = curr_stats->pkts;
494 prev_stats->bytes = curr_stats->bytes;
495 spin_unlock_bh(&fl_priv->qos_stats_lock);
496
497 flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
498 repr_priv->qos_table.last_update,
499 FLOW_ACTION_HW_STATS_DELAYED);
500 return 0;
501 }
502
nfp_flower_qos_init(struct nfp_app * app)503 void nfp_flower_qos_init(struct nfp_app *app)
504 {
505 struct nfp_flower_priv *fl_priv = app->priv;
506
507 spin_lock_init(&fl_priv->qos_stats_lock);
508 mutex_init(&fl_priv->meter_stats_lock);
509 nfp_init_meter_table(app);
510
511 INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
512 }
513
nfp_flower_qos_cleanup(struct nfp_app * app)514 void nfp_flower_qos_cleanup(struct nfp_app *app)
515 {
516 struct nfp_flower_priv *fl_priv = app->priv;
517
518 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
519 }
520
nfp_flower_setup_qos_offload(struct nfp_app * app,struct net_device * netdev,struct tc_cls_matchall_offload * flow)521 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
522 struct tc_cls_matchall_offload *flow)
523 {
524 struct netlink_ext_ack *extack = flow->common.extack;
525 struct nfp_flower_priv *fl_priv = app->priv;
526 int ret;
527
528 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
529 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
530 return -EOPNOTSUPP;
531 }
532
533 mutex_lock(&fl_priv->nfp_fl_lock);
534 switch (flow->command) {
535 case TC_CLSMATCHALL_REPLACE:
536 ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
537 break;
538 case TC_CLSMATCHALL_DESTROY:
539 ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
540 break;
541 case TC_CLSMATCHALL_STATS:
542 ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
543 break;
544 default:
545 ret = -EOPNOTSUPP;
546 break;
547 }
548 mutex_unlock(&fl_priv->nfp_fl_lock);
549
550 return ret;
551 }
552
553 /* Offload tc action, currently only for tc police */
554
555 static const struct rhashtable_params stats_meter_table_params = {
556 .key_offset = offsetof(struct nfp_meter_entry, meter_id),
557 .head_offset = offsetof(struct nfp_meter_entry, ht_node),
558 .key_len = sizeof(u32),
559 };
560
561 struct nfp_meter_entry *
nfp_flower_search_meter_entry(struct nfp_app * app,u32 meter_id)562 nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
563 {
564 struct nfp_flower_priv *priv = app->priv;
565
566 return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
567 stats_meter_table_params);
568 }
569
570 static struct nfp_meter_entry *
nfp_flower_add_meter_entry(struct nfp_app * app,u32 meter_id)571 nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
572 {
573 struct nfp_meter_entry *meter_entry = NULL;
574 struct nfp_flower_priv *priv = app->priv;
575
576 meter_entry = rhashtable_lookup_fast(&priv->meter_table,
577 &meter_id,
578 stats_meter_table_params);
579 if (meter_entry)
580 return meter_entry;
581
582 meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
583 if (!meter_entry)
584 return NULL;
585
586 meter_entry->meter_id = meter_id;
587 meter_entry->used = jiffies;
588 if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
589 stats_meter_table_params)) {
590 kfree(meter_entry);
591 return NULL;
592 }
593
594 priv->qos_rate_limiters++;
595 if (priv->qos_rate_limiters == 1)
596 schedule_delayed_work(&priv->qos_stats_work,
597 NFP_FL_QOS_UPDATE);
598
599 return meter_entry;
600 }
601
nfp_flower_del_meter_entry(struct nfp_app * app,u32 meter_id)602 static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
603 {
604 struct nfp_meter_entry *meter_entry = NULL;
605 struct nfp_flower_priv *priv = app->priv;
606
607 meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
608 stats_meter_table_params);
609 if (!meter_entry)
610 return;
611
612 rhashtable_remove_fast(&priv->meter_table,
613 &meter_entry->ht_node,
614 stats_meter_table_params);
615 kfree(meter_entry);
616 priv->qos_rate_limiters--;
617 if (!priv->qos_rate_limiters)
618 cancel_delayed_work_sync(&priv->qos_stats_work);
619 }
620
nfp_flower_setup_meter_entry(struct nfp_app * app,const struct flow_action_entry * action,enum nfp_meter_op op,u32 meter_id)621 int nfp_flower_setup_meter_entry(struct nfp_app *app,
622 const struct flow_action_entry *action,
623 enum nfp_meter_op op,
624 u32 meter_id)
625 {
626 struct nfp_flower_priv *fl_priv = app->priv;
627 struct nfp_meter_entry *meter_entry = NULL;
628 int err = 0;
629
630 mutex_lock(&fl_priv->meter_stats_lock);
631
632 switch (op) {
633 case NFP_METER_DEL:
634 nfp_flower_del_meter_entry(app, meter_id);
635 goto exit_unlock;
636 case NFP_METER_ADD:
637 meter_entry = nfp_flower_add_meter_entry(app, meter_id);
638 break;
639 default:
640 err = -EOPNOTSUPP;
641 goto exit_unlock;
642 }
643
644 if (!meter_entry) {
645 err = -ENOMEM;
646 goto exit_unlock;
647 }
648
649 if (action->police.rate_bytes_ps > 0) {
650 meter_entry->bps = true;
651 meter_entry->rate = action->police.rate_bytes_ps;
652 meter_entry->burst = action->police.burst;
653 } else {
654 meter_entry->bps = false;
655 meter_entry->rate = action->police.rate_pkt_ps;
656 meter_entry->burst = action->police.burst_pkt;
657 }
658
659 exit_unlock:
660 mutex_unlock(&fl_priv->meter_stats_lock);
661 return err;
662 }
663
nfp_init_meter_table(struct nfp_app * app)664 int nfp_init_meter_table(struct nfp_app *app)
665 {
666 struct nfp_flower_priv *priv = app->priv;
667
668 return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
669 }
670
671 void
nfp_flower_stats_meter_request_all(struct nfp_flower_priv * fl_priv)672 nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
673 {
674 struct nfp_meter_entry *meter_entry = NULL;
675 struct rhashtable_iter iter;
676
677 mutex_lock(&fl_priv->meter_stats_lock);
678 rhashtable_walk_enter(&fl_priv->meter_table, &iter);
679 rhashtable_walk_start(&iter);
680
681 while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
682 if (IS_ERR(meter_entry))
683 continue;
684 nfp_flower_stats_rlim_request(fl_priv,
685 meter_entry->meter_id, false);
686 }
687
688 rhashtable_walk_stop(&iter);
689 rhashtable_walk_exit(&iter);
690 mutex_unlock(&fl_priv->meter_stats_lock);
691 }
692
693 static int
nfp_act_install_actions(struct nfp_app * app,struct flow_offload_action * fl_act,struct netlink_ext_ack * extack)694 nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
695 struct netlink_ext_ack *extack)
696 {
697 struct flow_action_entry *paction = &fl_act->action.entries[0];
698 u32 action_num = fl_act->action.num_entries;
699 struct nfp_flower_priv *fl_priv = app->priv;
700 struct flow_action_entry *action = NULL;
701 u32 burst, i, meter_id;
702 bool pps_support, pps;
703 bool add = false;
704 u64 rate;
705 int err;
706
707 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
708
709 for (i = 0 ; i < action_num; i++) {
710 /* Set qos associate data for this interface */
711 action = paction + i;
712 if (action->id != FLOW_ACTION_POLICE) {
713 NL_SET_ERR_MSG_MOD(extack,
714 "unsupported offload: qos rate limit offload requires police action");
715 continue;
716 }
717
718 err = nfp_policer_validate(&fl_act->action, action, extack, false);
719 if (err)
720 return err;
721
722 if (action->police.rate_bytes_ps > 0) {
723 rate = action->police.rate_bytes_ps;
724 burst = action->police.burst;
725 } else if (action->police.rate_pkt_ps > 0 && pps_support) {
726 rate = action->police.rate_pkt_ps;
727 burst = action->police.burst_pkt;
728 } else {
729 NL_SET_ERR_MSG_MOD(extack,
730 "unsupported offload: unsupported qos rate limit");
731 continue;
732 }
733
734 if (rate != 0) {
735 meter_id = action->hw_index;
736 if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
737 continue;
738
739 pps = false;
740 if (action->police.rate_pkt_ps > 0)
741 pps = true;
742 nfp_flower_offload_one_police(app, false, pps, meter_id,
743 rate, burst);
744 add = true;
745 }
746 }
747
748 return add ? 0 : -EOPNOTSUPP;
749 }
750
751 static int
nfp_act_remove_actions(struct nfp_app * app,struct flow_offload_action * fl_act,struct netlink_ext_ack * extack)752 nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
753 struct netlink_ext_ack *extack)
754 {
755 struct nfp_meter_entry *meter_entry = NULL;
756 struct nfp_police_config *config;
757 struct sk_buff *skb;
758 u32 meter_id;
759 bool pps;
760
761 /* Delete qos associate data for this interface */
762 if (fl_act->id != FLOW_ACTION_POLICE) {
763 NL_SET_ERR_MSG_MOD(extack,
764 "unsupported offload: qos rate limit offload requires police action");
765 return -EOPNOTSUPP;
766 }
767
768 meter_id = fl_act->index;
769 meter_entry = nfp_flower_search_meter_entry(app, meter_id);
770 if (!meter_entry) {
771 NL_SET_ERR_MSG_MOD(extack,
772 "no meter entry when delete the action index.");
773 return -ENOENT;
774 }
775 pps = !meter_entry->bps;
776
777 skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
778 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
779 if (!skb)
780 return -ENOMEM;
781
782 config = nfp_flower_cmsg_get_data(skb);
783 memset(config, 0, sizeof(struct nfp_police_config));
784 config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
785 config->head.meter_id = cpu_to_be32(meter_id);
786 if (pps)
787 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
788
789 nfp_ctrl_tx(app->ctrl, skb);
790 nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
791
792 return 0;
793 }
794
795 void
nfp_act_stats_reply(struct nfp_app * app,void * pmsg)796 nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
797 {
798 struct nfp_flower_priv *fl_priv = app->priv;
799 struct nfp_meter_entry *meter_entry = NULL;
800 struct nfp_police_stats_reply *msg = pmsg;
801 u32 meter_id;
802
803 meter_id = be32_to_cpu(msg->head.meter_id);
804 mutex_lock(&fl_priv->meter_stats_lock);
805
806 meter_entry = nfp_flower_search_meter_entry(app, meter_id);
807 if (!meter_entry)
808 goto exit_unlock;
809
810 meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
811 be64_to_cpu(msg->drop_pkts);
812 meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
813 be64_to_cpu(msg->drop_bytes);
814 meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
815 if (!meter_entry->stats.update) {
816 meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
817 meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
818 meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
819 }
820
821 meter_entry->stats.update = jiffies;
822
823 exit_unlock:
824 mutex_unlock(&fl_priv->meter_stats_lock);
825 }
826
827 static int
nfp_act_stats_actions(struct nfp_app * app,struct flow_offload_action * fl_act,struct netlink_ext_ack * extack)828 nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
829 struct netlink_ext_ack *extack)
830 {
831 struct nfp_flower_priv *fl_priv = app->priv;
832 struct nfp_meter_entry *meter_entry = NULL;
833 u64 diff_bytes, diff_pkts, diff_drops;
834 int err = 0;
835
836 if (fl_act->id != FLOW_ACTION_POLICE) {
837 NL_SET_ERR_MSG_MOD(extack,
838 "unsupported offload: qos rate limit offload requires police action");
839 return -EOPNOTSUPP;
840 }
841
842 mutex_lock(&fl_priv->meter_stats_lock);
843 meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
844 if (!meter_entry) {
845 err = -ENOENT;
846 goto exit_unlock;
847 }
848 diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
849 meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
850 diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
851 meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
852 diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
853 meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
854
855 flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
856 meter_entry->stats.update,
857 FLOW_ACTION_HW_STATS_DELAYED);
858
859 meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
860 meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
861 meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
862
863 exit_unlock:
864 mutex_unlock(&fl_priv->meter_stats_lock);
865 return err;
866 }
867
nfp_setup_tc_act_offload(struct nfp_app * app,struct flow_offload_action * fl_act)868 int nfp_setup_tc_act_offload(struct nfp_app *app,
869 struct flow_offload_action *fl_act)
870 {
871 struct netlink_ext_ack *extack = fl_act->extack;
872 struct nfp_flower_priv *fl_priv = app->priv;
873
874 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
875 return -EOPNOTSUPP;
876
877 switch (fl_act->command) {
878 case FLOW_ACT_REPLACE:
879 return nfp_act_install_actions(app, fl_act, extack);
880 case FLOW_ACT_DESTROY:
881 return nfp_act_remove_actions(app, fl_act, extack);
882 case FLOW_ACT_STATS:
883 return nfp_act_stats_actions(app, fl_act, extack);
884 default:
885 return -EOPNOTSUPP;
886 }
887 }
888