1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
3
4 #include "en/ptp.h"
5 #include "en/health.h"
6 #include "en/txrx.h"
7 #include "en/params.h"
8 #include "en/fs_tt_redirect.h"
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <net/netdev_lock.h>
12
13 struct mlx5e_ptp_fs {
14 struct mlx5_flow_handle *l2_rule;
15 struct mlx5_flow_handle *udp_v4_rule;
16 struct mlx5_flow_handle *udp_v6_rule;
17 bool valid;
18 };
19
20 struct mlx5e_ptp_params {
21 struct mlx5e_params params;
22 struct mlx5e_sq_param txq_sq_param;
23 struct mlx5e_rq_param rq_param;
24 };
25
26 struct mlx5e_ptp_port_ts_cqe_tracker {
27 u8 metadata_id;
28 bool inuse : 1;
29 struct list_head entry;
30 };
31
32 struct mlx5e_ptp_port_ts_cqe_list {
33 struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
34 struct list_head tracker_list_head;
35 /* Sync list operations in xmit and napi_poll contexts */
36 spinlock_t tracker_list_lock;
37 };
38
39 static inline void
mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list * list,u8 metadata)40 mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
41 {
42 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
43
44 WARN_ON_ONCE(tracker->inuse);
45 tracker->inuse = true;
46 spin_lock_bh(&list->tracker_list_lock);
47 list_add_tail(&tracker->entry, &list->tracker_list_head);
48 spin_unlock_bh(&list->tracker_list_lock);
49 }
50
51 static void
mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list * list,u8 metadata)52 mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
53 {
54 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
55
56 WARN_ON_ONCE(!tracker->inuse);
57 tracker->inuse = false;
58 spin_lock_bh(&list->tracker_list_lock);
59 list_del(&tracker->entry);
60 spin_unlock_bh(&list->tracker_list_lock);
61 }
62
mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq * ptpsq,u8 metadata)63 void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
64 {
65 mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
66 }
67
68 struct mlx5e_skb_cb_hwtstamp {
69 ktime_t cqe_hwtstamp;
70 ktime_t port_hwtstamp;
71 };
72
mlx5e_skb_cb_hwtstamp_init(struct sk_buff * skb)73 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
74 {
75 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
76 }
77
mlx5e_skb_cb_get_hwts(struct sk_buff * skb)78 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
79 {
80 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
81 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
82 }
83
mlx5e_skb_cb_hwtstamp_tx(struct sk_buff * skb,struct mlx5e_ptpsq * ptpsq)84 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
85 struct mlx5e_ptpsq *ptpsq)
86 {
87 struct skb_shared_hwtstamps hwts = {};
88 ktime_t diff;
89
90 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
91 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
92
93 /* Maximal allowed diff is 1 / 128 second */
94 if (diff > (NSEC_PER_SEC >> 7)) {
95 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
96
97 ptpsq->cq_stats->abort++;
98 ptpsq->cq_stats->abort_abs_diff_ns += diff;
99 if (diff > (NSEC_PER_SEC >> 1) &&
100 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
101 netdev_warn(sq->channel->netdev,
102 "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n",
103 (s64)diff, sq->sqn);
104 queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work);
105 }
106 return;
107 }
108
109 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
110 skb_tstamp_tx(skb, &hwts);
111 }
112
mlx5e_skb_cb_hwtstamp_handler(struct sk_buff * skb,int hwtstamp_type,ktime_t hwtstamp,struct mlx5e_ptpsq * ptpsq)113 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
114 ktime_t hwtstamp,
115 struct mlx5e_ptpsq *ptpsq)
116 {
117 switch (hwtstamp_type) {
118 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
119 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
120 break;
121 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
122 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
123 break;
124 }
125
126 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
127 * skb soon to be released.
128 */
129 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
130 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
131 return;
132
133 mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq);
134 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
135 }
136
137 static struct sk_buff *
mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map * map,u16 metadata)138 mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
139 {
140 return map->data[metadata];
141 }
142
143 static struct sk_buff *
mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map * map,u16 metadata)144 mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
145 {
146 struct sk_buff *skb;
147
148 skb = map->data[metadata];
149 map->data[metadata] = NULL;
150
151 return skb;
152 }
153
mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map * map)154 static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
155 {
156 /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
157 return map->undelivered_counter > (map->capacity >> 4) * 15;
158 }
159
mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq * ptpsq,ktime_t port_tstamp)160 static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
161 ktime_t port_tstamp)
162 {
163 struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
164 ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
165 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
166 struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
167
168 spin_lock_bh(&cqe_list->tracker_list_lock);
169 list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
170 struct sk_buff *skb =
171 mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
172 ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
173
174 if (!dma_tstamp ||
175 ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
176 break;
177
178 metadata_map->undelivered_counter++;
179 WARN_ON_ONCE(!pos->inuse);
180 pos->inuse = false;
181 list_del(&pos->entry);
182 ptpsq->cq_stats->lost_cqe++;
183 }
184 spin_unlock_bh(&cqe_list->tracker_list_lock);
185 }
186
187 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
188
mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq * ptpsq,struct mlx5_cqe64 * cqe,u8 * md_buff,u8 * md_buff_sz,int budget)189 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
190 struct mlx5_cqe64 *cqe,
191 u8 *md_buff,
192 u8 *md_buff_sz,
193 int budget)
194 {
195 struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
196 u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
197 bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
198 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
199 struct sk_buff *skb;
200 ktime_t hwtstamp;
201
202 if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
203 mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
204 } else {
205 /* Reclaim space in the unlikely event CQE was delivered after
206 * marking it late.
207 */
208 ptpsq->metadata_map.undelivered_counter--;
209 ptpsq->cq_stats->late_cqe++;
210 }
211
212 skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
213
214 if (unlikely(is_err_cqe)) {
215 ptpsq->cq_stats->err_cqe++;
216 goto out;
217 }
218
219 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
220 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
221 hwtstamp, ptpsq);
222 ptpsq->cq_stats->cqe++;
223
224 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
225 out:
226 napi_consume_skb(skb, budget);
227 md_buff[(*md_buff_sz)++] = metadata_id;
228 if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
229 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
230 queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
231 }
232
mlx5e_ptp_poll_ts_cq(struct mlx5e_cq * cq,int napi_budget)233 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
234 {
235 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
236 int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
237 u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
238 u8 metadata_buff_sz = 0;
239 struct mlx5_cqwq *cqwq;
240 struct mlx5_cqe64 *cqe;
241 int work_done = 0;
242
243 cqwq = &cq->wq;
244
245 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
246 return false;
247
248 cqe = mlx5_cqwq_get_cqe(cqwq);
249 if (!cqe)
250 return false;
251
252 do {
253 mlx5_cqwq_pop(cqwq);
254
255 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
256 metadata_buff, &metadata_buff_sz, napi_budget);
257 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
258
259 mlx5_cqwq_update_db_record(cqwq);
260
261 /* ensure cq space is freed before enabling more cqes */
262 wmb();
263
264 while (metadata_buff_sz > 0)
265 mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
266 metadata_buff[--metadata_buff_sz]);
267
268 mlx5e_txqsq_wake(&ptpsq->txqsq);
269
270 return work_done == budget;
271 }
272
mlx5e_ptp_napi_poll(struct napi_struct * napi,int budget)273 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
274 {
275 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
276 struct mlx5e_ch_stats *ch_stats = c->stats;
277 struct mlx5e_rq *rq = &c->rq;
278 bool busy = false;
279 int work_done = 0;
280 int i;
281
282 rcu_read_lock();
283
284 ch_stats->poll++;
285
286 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
287 for (i = 0; i < c->num_tc; i++) {
288 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
289 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
290 }
291 }
292 if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
293 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
294 busy |= work_done == budget;
295 busy |= INDIRECT_CALL_2(rq->post_wqes,
296 mlx5e_post_rx_mpwqes,
297 mlx5e_post_rx_wqes,
298 rq);
299 }
300
301 if (busy) {
302 work_done = budget;
303 goto out;
304 }
305
306 if (unlikely(!napi_complete_done(napi, work_done)))
307 goto out;
308
309 ch_stats->arm++;
310
311 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
312 for (i = 0; i < c->num_tc; i++) {
313 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
314 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
315 }
316 }
317 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
318 mlx5e_cq_arm(&rq->cq);
319
320 out:
321 rcu_read_unlock();
322
323 return work_done;
324 }
325
mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp * c,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc,struct mlx5e_ptpsq * ptpsq)326 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
327 struct mlx5e_params *params,
328 struct mlx5e_sq_param *param,
329 struct mlx5e_txqsq *sq, int tc,
330 struct mlx5e_ptpsq *ptpsq)
331 {
332 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
333 struct mlx5_core_dev *mdev = c->mdev;
334 struct mlx5_wq_cyc *wq = &sq->wq;
335 int err;
336 int node;
337
338 sq->pdev = c->pdev;
339 sq->clock = mdev->clock;
340 sq->mkey_be = c->mkey_be;
341 sq->netdev = c->netdev;
342 sq->priv = c->priv;
343 sq->mdev = mdev;
344 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
345 sq->txq_ix = txq_ix;
346 sq->uar_map = c->bfreg->map;
347 sq->min_inline_mode = params->tx_min_inline_mode;
348 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
349 sq->stats = &c->priv->ptp_stats.sq[tc];
350 sq->ptpsq = ptpsq;
351 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
352 sq->stop_room = param->stop_room;
353 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
354
355 node = dev_to_node(mlx5_core_dma_dev(mdev));
356
357 param->wq.db_numa_node = node;
358 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
359 if (err)
360 return err;
361 wq->db = &wq->db[MLX5_SND_DBR];
362
363 err = mlx5e_alloc_txqsq_db(sq, node);
364 if (err)
365 goto err_sq_wq_destroy;
366
367 return 0;
368
369 err_sq_wq_destroy:
370 mlx5_wq_destroy(&sq->wq_ctrl);
371
372 return err;
373 }
374
mlx5e_ptp_destroy_sq(struct mlx5_core_dev * mdev,u32 sqn)375 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
376 {
377 mlx5_core_destroy_sq(mdev, sqn);
378 }
379
mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq * ptpsq,int numa)380 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
381 {
382 struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
383 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
384 struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
385 int db_sz;
386 int md;
387
388 cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
389 if (!cqe_list)
390 return -ENOMEM;
391 ptpsq->ts_cqe_pending_list = cqe_list;
392
393 db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
394 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
395 ts_cqe_metadata_size2wqe_counter));
396 ptpsq->ts_cqe_ctr_mask = db_sz - 1;
397
398 cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
399 GFP_KERNEL, numa);
400 if (!cqe_list->nodes)
401 goto free_cqe_list;
402 INIT_LIST_HEAD(&cqe_list->tracker_list_head);
403 spin_lock_init(&cqe_list->tracker_list_lock);
404
405 metadata_freelist->data =
406 kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
407 GFP_KERNEL, numa);
408 if (!metadata_freelist->data)
409 goto free_cqe_list_nodes;
410 metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
411
412 for (md = 0; md < db_sz; ++md) {
413 cqe_list->nodes[md].metadata_id = md;
414 metadata_freelist->data[md] = md;
415 }
416 metadata_freelist->pc = db_sz;
417
418 metadata_map->data =
419 kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
420 GFP_KERNEL, numa);
421 if (!metadata_map->data)
422 goto free_metadata_freelist;
423 metadata_map->capacity = db_sz;
424
425 return 0;
426
427 free_metadata_freelist:
428 kvfree(metadata_freelist->data);
429 free_cqe_list_nodes:
430 kvfree(cqe_list->nodes);
431 free_cqe_list:
432 kvfree(cqe_list);
433 return -ENOMEM;
434 }
435
mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map * map)436 static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
437 {
438 int idx;
439
440 for (idx = 0; idx < map->capacity; ++idx) {
441 struct sk_buff *skb = map->data[idx];
442
443 dev_kfree_skb_any(skb);
444 }
445 }
446
mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq * ptpsq)447 static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
448 {
449 mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
450 kvfree(ptpsq->metadata_map.data);
451 kvfree(ptpsq->metadata_freelist.data);
452 kvfree(ptpsq->ts_cqe_pending_list->nodes);
453 kvfree(ptpsq->ts_cqe_pending_list);
454 }
455
mlx5e_ptpsq_unhealthy_work(struct work_struct * work)456 static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
457 {
458 struct mlx5e_ptpsq *ptpsq =
459 container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
460
461 mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
462 }
463
mlx5e_ptp_open_txqsq(struct mlx5e_ptp * c,u32 tisn,int txq_ix,struct mlx5e_ptp_params * cparams,int tc,struct mlx5e_ptpsq * ptpsq)464 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
465 int txq_ix, struct mlx5e_ptp_params *cparams,
466 int tc, struct mlx5e_ptpsq *ptpsq)
467 {
468 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
469 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
470 struct mlx5e_create_sq_param csp = {};
471 int err;
472
473 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
474 txqsq, tc, ptpsq);
475 if (err)
476 return err;
477
478 csp.tisn = tisn;
479 csp.tis_lst_sz = 1;
480 csp.cqn = txqsq->cq.mcq.cqn;
481 csp.wq_ctrl = &txqsq->wq_ctrl;
482 csp.min_inline_mode = txqsq->min_inline_mode;
483 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
484 csp.uar_page = c->bfreg->index;
485
486 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
487 if (err)
488 goto err_free_txqsq;
489
490 err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
491 if (err)
492 goto err_free_txqsq;
493
494 INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
495
496 return 0;
497
498 err_free_txqsq:
499 mlx5e_free_txqsq(txqsq);
500
501 return err;
502 }
503
mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq * ptpsq)504 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
505 {
506 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
507 struct mlx5_core_dev *mdev = sq->mdev;
508
509 if (current_work() != &ptpsq->report_unhealthy_work)
510 cancel_work_sync(&ptpsq->report_unhealthy_work);
511 mlx5e_ptp_free_traffic_db(ptpsq);
512 cancel_work_sync(&sq->recover_work);
513 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
514 mlx5e_free_txqsq_descs(sq);
515 mlx5e_free_txqsq(sq);
516 }
517
mlx5e_ptp_open_txqsqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)518 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
519 struct mlx5e_ptp_params *cparams)
520 {
521 struct mlx5e_params *params = &cparams->params;
522 u8 num_tc = mlx5e_get_dcb_num_tc(params);
523 int ix_base;
524 int err;
525 int tc;
526
527 ix_base = num_tc * params->num_channels;
528
529 for (tc = 0; tc < num_tc; tc++) {
530 int txq_ix = ix_base + tc;
531 u32 tisn;
532
533 tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
534 c->lag_port, tc);
535 err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]);
536 if (err)
537 goto close_txqsq;
538 }
539
540 return 0;
541
542 close_txqsq:
543 for (--tc; tc >= 0; tc--)
544 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
545
546 return err;
547 }
548
mlx5e_ptp_close_txqsqs(struct mlx5e_ptp * c)549 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
550 {
551 int tc;
552
553 for (tc = 0; tc < c->num_tc; tc++)
554 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
555 }
556
mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)557 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
558 struct mlx5e_ptp_params *cparams)
559 {
560 struct mlx5e_params *params = &cparams->params;
561 struct mlx5e_create_cq_param ccp = {};
562 struct dim_cq_moder ptp_moder = {};
563 struct mlx5e_cq_param *cq_param;
564 u8 num_tc;
565 int err;
566 int tc;
567
568 num_tc = mlx5e_get_dcb_num_tc(params);
569
570 ccp.netdev = c->netdev;
571 ccp.wq = c->priv->wq;
572 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
573 ccp.ch_stats = c->stats;
574 ccp.napi = &c->napi;
575 ccp.ix = MLX5E_PTP_CHANNEL_IX;
576 ccp.uar = c->bfreg->up;
577
578 cq_param = &cparams->txq_sq_param.cqp;
579
580 for (tc = 0; tc < num_tc; tc++) {
581 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
582
583 err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
584 if (err)
585 goto out_err_txqsq_cq;
586 }
587
588 for (tc = 0; tc < num_tc; tc++) {
589 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
590 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
591
592 err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
593 if (err)
594 goto out_err_ts_cq;
595
596 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
597 }
598
599 return 0;
600
601 out_err_ts_cq:
602 for (--tc; tc >= 0; tc--)
603 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
604 tc = num_tc;
605 out_err_txqsq_cq:
606 for (--tc; tc >= 0; tc--)
607 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
608
609 return err;
610 }
611
mlx5e_ptp_open_rx_cq(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)612 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
613 struct mlx5e_ptp_params *cparams)
614 {
615 struct mlx5e_create_cq_param ccp = {};
616 struct dim_cq_moder ptp_moder = {};
617 struct mlx5e_cq_param *cq_param;
618 struct mlx5e_cq *cq = &c->rq.cq;
619
620 ccp.netdev = c->netdev;
621 ccp.wq = c->priv->wq;
622 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
623 ccp.ch_stats = c->stats;
624 ccp.napi = &c->napi;
625 ccp.ix = MLX5E_PTP_CHANNEL_IX;
626 ccp.uar = c->bfreg->up;
627
628 cq_param = &cparams->rq_param.cqp;
629
630 return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
631 }
632
mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp * c)633 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
634 {
635 int tc;
636
637 for (tc = 0; tc < c->num_tc; tc++)
638 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
639
640 for (tc = 0; tc < c->num_tc; tc++)
641 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
642 }
643
mlx5e_ptp_build_sq_param(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_sq_param * param)644 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
645 struct mlx5e_params *params,
646 struct mlx5e_sq_param *param)
647 {
648 void *sqc = param->sqc;
649 void *wq;
650
651 mlx5e_build_sq_param_common(mdev, param);
652
653 wq = MLX5_ADDR_OF(sqc, sqc, wq);
654 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
655 param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
656 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
657 }
658
mlx5e_ptp_build_rq_param(struct mlx5_core_dev * mdev,struct net_device * netdev,struct mlx5e_ptp_params * ptp_params)659 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
660 struct net_device *netdev,
661 struct mlx5e_ptp_params *ptp_params)
662 {
663 struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
664 struct mlx5e_params *params = &ptp_params->params;
665
666 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
667 mlx5e_init_rq_type_params(mdev, params);
668 params->sw_mtu = netdev->max_mtu;
669 mlx5e_build_rq_param(mdev, params, NULL, rq_params);
670 }
671
mlx5e_ptp_build_params(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams,struct mlx5e_params * orig)672 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
673 struct mlx5e_ptp_params *cparams,
674 struct mlx5e_params *orig)
675 {
676 struct mlx5e_params *params = &cparams->params;
677
678 params->tx_min_inline_mode = orig->tx_min_inline_mode;
679 params->num_channels = orig->num_channels;
680 params->hard_mtu = orig->hard_mtu;
681 params->sw_mtu = orig->sw_mtu;
682 params->mqprio = orig->mqprio;
683
684 /* SQ */
685 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
686 params->log_sq_size =
687 min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
688 MLX5E_PTP_MAX_LOG_SQ_SIZE);
689 params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
690 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
691 }
692 /* RQ */
693 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
694 params->vlan_strip_disable = orig->vlan_strip_disable;
695 mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
696 }
697 }
698
mlx5e_init_ptp_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq * rq)699 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
700 struct mlx5e_rq *rq)
701 {
702 struct mlx5_core_dev *mdev = c->mdev;
703 struct mlx5e_priv *priv = c->priv;
704 int err;
705
706 rq->wq_type = params->rq_wq_type;
707 rq->pdev = c->pdev;
708 rq->netdev = priv->netdev;
709 rq->priv = priv;
710 rq->clock = mdev->clock;
711 rq->hwtstamp_config = &priv->hwtstamp_config;
712 rq->mdev = mdev;
713 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
714 rq->stats = &c->priv->ptp_stats.rq;
715 rq->ix = MLX5E_PTP_CHANNEL_IX;
716 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
717 err = mlx5e_rq_set_handlers(rq, params, false);
718 if (err)
719 return err;
720
721 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
722 }
723
mlx5e_ptp_open_rq(struct mlx5e_ptp * c,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param)724 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
725 struct mlx5e_rq_param *rq_param)
726 {
727 int node = dev_to_node(c->mdev->device);
728 int err, sd_ix;
729 u16 q_counter;
730
731 err = mlx5e_init_ptp_rq(c, params, &c->rq);
732 if (err)
733 return err;
734
735 sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
736 q_counter = c->priv->q_counter[sd_ix];
737 return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
738 }
739
mlx5e_ptp_open_queues(struct mlx5e_ptp * c,struct mlx5e_ptp_params * cparams)740 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
741 struct mlx5e_ptp_params *cparams)
742 {
743 int err;
744
745 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
746 err = mlx5e_ptp_open_tx_cqs(c, cparams);
747 if (err)
748 return err;
749
750 err = mlx5e_ptp_open_txqsqs(c, cparams);
751 if (err)
752 goto close_tx_cqs;
753 }
754 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
755 err = mlx5e_ptp_open_rx_cq(c, cparams);
756 if (err)
757 goto close_txqsq;
758
759 err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
760 if (err)
761 goto close_rx_cq;
762 }
763 return 0;
764
765 close_rx_cq:
766 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
767 mlx5e_close_cq(&c->rq.cq);
768 close_txqsq:
769 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
770 mlx5e_ptp_close_txqsqs(c);
771 close_tx_cqs:
772 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
773 mlx5e_ptp_close_tx_cqs(c);
774
775 return err;
776 }
777
mlx5e_ptp_close_queues(struct mlx5e_ptp * c)778 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
779 {
780 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
781 mlx5e_close_rq(&c->rq);
782 mlx5e_close_cq(&c->rq.cq);
783 }
784 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
785 mlx5e_ptp_close_txqsqs(c);
786 mlx5e_ptp_close_tx_cqs(c);
787 }
788 }
789
mlx5e_ptp_set_state(struct mlx5e_ptp * c,struct mlx5e_params * params)790 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
791 {
792 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
793 __set_bit(MLX5E_PTP_STATE_TX, c->state);
794
795 if (params->ptp_rx)
796 __set_bit(MLX5E_PTP_STATE_RX, c->state);
797
798 return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
799 }
800
mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering * fs)801 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
802 {
803 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
804
805 if (!ptp_fs->valid)
806 return;
807
808 mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
809 mlx5e_fs_tt_redirect_any_destroy(fs);
810
811 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
812 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
813 mlx5e_fs_tt_redirect_udp_destroy(fs);
814 ptp_fs->valid = false;
815 }
816
mlx5e_ptp_rx_set_fs(struct mlx5e_priv * priv)817 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
818 {
819 u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
820 struct mlx5e_flow_steering *fs = priv->fs;
821 struct mlx5_flow_handle *rule;
822 struct mlx5e_ptp_fs *ptp_fs;
823 int err;
824
825 ptp_fs = mlx5e_fs_get_ptp(fs);
826 if (ptp_fs->valid)
827 return 0;
828
829 err = mlx5e_fs_tt_redirect_udp_create(fs);
830 if (err)
831 goto out_free;
832
833 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
834 tirn, PTP_EV_PORT);
835 if (IS_ERR(rule)) {
836 err = PTR_ERR(rule);
837 goto out_destroy_fs_udp;
838 }
839 ptp_fs->udp_v4_rule = rule;
840
841 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
842 tirn, PTP_EV_PORT);
843 if (IS_ERR(rule)) {
844 err = PTR_ERR(rule);
845 goto out_destroy_udp_v4_rule;
846 }
847 ptp_fs->udp_v6_rule = rule;
848
849 err = mlx5e_fs_tt_redirect_any_create(fs);
850 if (err)
851 goto out_destroy_udp_v6_rule;
852
853 rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
854 if (IS_ERR(rule)) {
855 err = PTR_ERR(rule);
856 goto out_destroy_fs_any;
857 }
858 ptp_fs->l2_rule = rule;
859 ptp_fs->valid = true;
860
861 return 0;
862
863 out_destroy_fs_any:
864 mlx5e_fs_tt_redirect_any_destroy(fs);
865 out_destroy_udp_v6_rule:
866 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
867 out_destroy_udp_v4_rule:
868 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
869 out_destroy_fs_udp:
870 mlx5e_fs_tt_redirect_udp_destroy(fs);
871 out_free:
872 return err;
873 }
874
mlx5e_ptp_open(struct mlx5e_priv * priv,struct mlx5e_params * params,u8 lag_port,struct mlx5e_ptp ** cp)875 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
876 u8 lag_port, struct mlx5e_ptp **cp)
877 {
878 struct net_device *netdev = priv->netdev;
879 struct mlx5_core_dev *mdev = priv->mdev;
880 struct mlx5e_ptp_params *cparams;
881 struct mlx5e_ptp *c;
882 int err;
883
884
885 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
886 cparams = kvzalloc_obj(*cparams);
887 if (!c || !cparams) {
888 err = -ENOMEM;
889 goto err_free;
890 }
891
892 c->priv = priv;
893 c->mdev = priv->mdev;
894 c->pdev = mlx5_core_dma_dev(priv->mdev);
895 c->netdev = priv->netdev;
896 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
897 c->num_tc = mlx5e_get_dcb_num_tc(params);
898 c->stats = &priv->ptp_stats.ch;
899 c->lag_port = lag_port;
900 c->bfreg = &mdev->priv.bfreg;
901
902 err = mlx5e_ptp_set_state(c, params);
903 if (err)
904 goto err_free;
905
906 netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
907
908 mlx5e_ptp_build_params(c, cparams, params);
909
910 err = mlx5e_ptp_open_queues(c, cparams);
911 if (unlikely(err))
912 goto err_napi_del;
913
914 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
915 priv->rx_ptp_opened = true;
916
917 *cp = c;
918
919 kvfree(cparams);
920
921 return 0;
922
923 err_napi_del:
924 netif_napi_del_locked(&c->napi);
925 err_free:
926 kvfree(cparams);
927 kvfree(c);
928 return err;
929 }
930
mlx5e_ptp_close(struct mlx5e_ptp * c)931 void mlx5e_ptp_close(struct mlx5e_ptp *c)
932 {
933 mlx5e_ptp_close_queues(c);
934 netif_napi_del_locked(&c->napi);
935
936 kvfree(c);
937 }
938
mlx5e_ptp_activate_channel(struct mlx5e_ptp * c)939 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
940 {
941 int tc;
942
943 napi_enable_locked(&c->napi);
944
945 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
946 for (tc = 0; tc < c->num_tc; tc++)
947 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
948 }
949 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
950 mlx5e_ptp_rx_set_fs(c->priv);
951 mlx5e_activate_rq(&c->rq);
952 netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
953 }
954 mlx5e_trigger_napi_sched(&c->napi);
955 }
956
mlx5e_ptp_deactivate_channel(struct mlx5e_ptp * c)957 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
958 {
959 int tc;
960
961 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
962 netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
963 mlx5e_deactivate_rq(&c->rq);
964 }
965
966 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
967 for (tc = 0; tc < c->num_tc; tc++)
968 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
969 }
970
971 napi_disable_locked(&c->napi);
972 }
973
mlx5e_ptp_get_rqn(struct mlx5e_ptp * c,u32 * rqn)974 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
975 {
976 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
977 return -EINVAL;
978
979 *rqn = c->rq.rqn;
980 return 0;
981 }
982
mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)983 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
984 const struct mlx5e_profile *profile)
985 {
986 struct mlx5e_ptp_fs *ptp_fs;
987
988 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
989 return 0;
990
991 ptp_fs = kzalloc_obj(*ptp_fs);
992 if (!ptp_fs)
993 return -ENOMEM;
994 mlx5e_fs_set_ptp(fs, ptp_fs);
995
996 return 0;
997 }
998
mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering * fs,const struct mlx5e_profile * profile)999 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
1000 const struct mlx5e_profile *profile)
1001 {
1002 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
1003
1004 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
1005 return;
1006
1007 mlx5e_ptp_rx_unset_fs(fs);
1008 kfree(ptp_fs);
1009 }
1010
mlx5e_ptp_rx_manage_fs(struct mlx5e_priv * priv,bool set)1011 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
1012 {
1013 struct mlx5e_ptp *c = priv->channels.ptp;
1014
1015 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
1016 return 0;
1017
1018 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1019 return 0;
1020
1021 if (set) {
1022 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
1023 netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
1024 return -EINVAL;
1025 }
1026 return mlx5e_ptp_rx_set_fs(priv);
1027 }
1028 /* set == false */
1029 if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
1030 netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
1031 return -EINVAL;
1032 }
1033 mlx5e_ptp_rx_unset_fs(priv->fs);
1034 return 0;
1035 }
1036