1 /*
2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/dim.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <net/geneve.h>
38 #include <linux/bpf.h>
39 #include <linux/debugfs.h>
40 #include <linux/if_bridge.h>
41 #include <linux/filter.h>
42 #include <net/netdev_lock.h>
43 #include <net/netdev_queues.h>
44 #include <net/netdev_rx_queue.h>
45 #include <net/page_pool/types.h>
46 #include <net/pkt_sched.h>
47 #include <net/xdp_sock_drv.h>
48 #include "eswitch.h"
49 #include "en.h"
50 #include "en/dim.h"
51 #include "en/txrx.h"
52 #include "en_tc.h"
53 #include "en_rep.h"
54 #include "en_accel/ipsec.h"
55 #include "en_accel/psp.h"
56 #include "en_accel/macsec.h"
57 #include "en_accel/en_accel.h"
58 #include "en_accel/ktls.h"
59 #include "lib/vxlan.h"
60 #include "lib/clock.h"
61 #include "en/port.h"
62 #include "en/xdp.h"
63 #include "lib/eq.h"
64 #include "en/monitor_stats.h"
65 #include "en/health.h"
66 #include "en/params.h"
67 #include "en/xsk/pool.h"
68 #include "en/xsk/setup.h"
69 #include "en/xsk/rx.h"
70 #include "en/xsk/tx.h"
71 #include "en/hv_vhca_stats.h"
72 #include "en/devlink.h"
73 #include "lib/mlx5.h"
74 #include "en/ptp.h"
75 #include "en/htb.h"
76 #include "qos.h"
77 #include "en/trap.h"
78 #include "lib/devcom.h"
79 #include "lib/sd.h"
80 #include "en/pcie_cong_event.h"
81
mlx5e_hw_gro_supported(struct mlx5_core_dev * mdev)82 static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
83 {
84 if (!MLX5_CAP_GEN(mdev, shampo) ||
85 !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge))
86 return false;
87
88 /* Our HW-GRO implementation relies on "KSM Mkey" for
89 * SHAMPO headers buffer mapping
90 */
91 if (!MLX5_CAP_GEN(mdev, fixed_buffer_size))
92 return false;
93
94 if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid))
95 return false;
96
97 if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) >
98 MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
99 return false;
100
101 return true;
102 }
103
mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev * mdev,u8 page_shift,enum mlx5e_mpwrq_umr_mode umr_mode)104 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
105 enum mlx5e_mpwrq_umr_mode umr_mode)
106 {
107 u16 umr_wqebbs, max_wqebbs;
108 bool striding_rq_umr;
109
110 striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
111 MLX5_CAP_ETH(mdev, reg_umr_sq);
112 if (!striding_rq_umr)
113 return false;
114
115 umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
116 max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
117 /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
118 * calculated from mlx5e_get_max_sq_aligned_wqebbs.
119 */
120 if (WARN_ON(umr_wqebbs > max_wqebbs))
121 return false;
122
123 return true;
124 }
125
mlx5e_update_carrier(struct mlx5e_priv * priv)126 void mlx5e_update_carrier(struct mlx5e_priv *priv)
127 {
128 struct mlx5_core_dev *mdev = priv->mdev;
129 u8 port_state;
130 bool up;
131
132 port_state = mlx5_query_vport_state(mdev,
133 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
134 0);
135
136 up = port_state == VPORT_STATE_UP;
137 if (up == netif_carrier_ok(priv->netdev))
138 netif_carrier_event(priv->netdev);
139 if (up) {
140 netdev_info(priv->netdev, "Link up\n");
141 netif_carrier_on(priv->netdev);
142 } else {
143 netdev_info(priv->netdev, "Link down\n");
144 netif_carrier_off(priv->netdev);
145 }
146 }
147
mlx5e_update_carrier_work(struct work_struct * work)148 static void mlx5e_update_carrier_work(struct work_struct *work)
149 {
150 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
151 update_carrier_work);
152
153 mutex_lock(&priv->state_lock);
154 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
155 if (priv->profile->update_carrier)
156 priv->profile->update_carrier(priv);
157 mutex_unlock(&priv->state_lock);
158 }
159
mlx5e_update_stats_work(struct work_struct * work)160 static void mlx5e_update_stats_work(struct work_struct *work)
161 {
162 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
163 update_stats_work);
164
165 mutex_lock(&priv->state_lock);
166 priv->profile->update_stats(priv);
167 mutex_unlock(&priv->state_lock);
168 }
169
mlx5e_queue_update_stats(struct mlx5e_priv * priv)170 void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
171 {
172 if (!priv->profile->update_stats)
173 return;
174
175 if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
176 return;
177
178 queue_work(priv->wq, &priv->update_stats_work);
179 }
180
async_event(struct notifier_block * nb,unsigned long event,void * data)181 static int async_event(struct notifier_block *nb, unsigned long event, void *data)
182 {
183 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
184 struct mlx5_eqe *eqe = data;
185
186 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
187 return NOTIFY_DONE;
188
189 switch (eqe->sub_type) {
190 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
191 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
192 queue_work(priv->wq, &priv->update_carrier_work);
193 break;
194 default:
195 return NOTIFY_DONE;
196 }
197
198 return NOTIFY_OK;
199 }
200
mlx5e_enable_async_events(struct mlx5e_priv * priv)201 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
202 {
203 priv->events_nb.notifier_call = async_event;
204 mlx5_notifier_register(priv->mdev, &priv->events_nb);
205 }
206
mlx5e_disable_async_events(struct mlx5e_priv * priv)207 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
208 {
209 mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
210 }
211
mlx5e_devcom_event_mpv(int event,void * my_data,void * event_data)212 static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
213 {
214 struct mlx5e_priv *slave_priv = my_data;
215
216 switch (event) {
217 case MPV_DEVCOM_MASTER_UP:
218 mlx5_devcom_comp_set_ready(slave_priv->devcom, true);
219 break;
220 case MPV_DEVCOM_MASTER_DOWN:
221 /* no need for comp set ready false since we unregister after
222 * and it hurts cleanup flow.
223 */
224 break;
225 case MPV_DEVCOM_IPSEC_MASTER_UP:
226 case MPV_DEVCOM_IPSEC_MASTER_DOWN:
227 mlx5e_ipsec_handle_mpv_event(event, my_data, event_data);
228 break;
229 }
230
231 return 0;
232 }
233
mlx5e_devcom_init_mpv(struct mlx5e_priv * priv,u64 * data)234 static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
235 {
236 struct mlx5_devcom_match_attr attr = {
237 .key.val = *data,
238 };
239
240 priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
241 MLX5_DEVCOM_MPV,
242 &attr,
243 mlx5e_devcom_event_mpv,
244 priv);
245 if (!priv->devcom)
246 return -EINVAL;
247
248 if (mlx5_core_is_mp_master(priv->mdev)) {
249 mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
250 MPV_DEVCOM_MASTER_UP, priv);
251 mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP);
252 }
253
254 return 0;
255 }
256
mlx5e_devcom_cleanup_mpv(struct mlx5e_priv * priv)257 static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
258 {
259 if (!priv->devcom)
260 return;
261
262 if (mlx5_core_is_mp_master(priv->mdev)) {
263 mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN,
264 MPV_DEVCOM_MASTER_DOWN, priv);
265 mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN);
266 }
267
268 mlx5_devcom_unregister_component(priv->devcom);
269 priv->devcom = NULL;
270 }
271
blocking_event(struct notifier_block * nb,unsigned long event,void * data)272 static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
273 {
274 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
275 struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
276 int err;
277
278 switch (event) {
279 case MLX5_DRIVER_EVENT_TYPE_TRAP:
280 err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
281 if (err) {
282 trap_event_ctx->err = err;
283 return NOTIFY_BAD;
284 }
285 break;
286 case MLX5_DRIVER_EVENT_AFFILIATION_DONE:
287 if (mlx5e_devcom_init_mpv(priv, data))
288 return NOTIFY_BAD;
289 break;
290 case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED:
291 mlx5e_devcom_cleanup_mpv(priv);
292 break;
293 default:
294 return NOTIFY_DONE;
295 }
296 return NOTIFY_OK;
297 }
298
mlx5e_enable_blocking_events(struct mlx5e_priv * priv)299 static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
300 {
301 priv->blocking_events_nb.notifier_call = blocking_event;
302 mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
303 }
304
mlx5e_disable_blocking_events(struct mlx5e_priv * priv)305 static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
306 {
307 mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
308 }
309
mlx5e_mpwrq_umr_octowords(u32 entries,enum mlx5e_mpwrq_umr_mode umr_mode)310 static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
311 {
312 u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
313 u32 sz;
314
315 sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
316
317 return sz / MLX5_OCTWORD;
318 }
319
mlx5e_build_umr_wqe(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,struct mlx5e_umr_wqe * wqe)320 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
321 struct mlx5e_icosq *sq,
322 struct mlx5e_umr_wqe *wqe)
323 {
324 struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
325 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
326 u16 octowords;
327 u8 ds_cnt;
328
329 ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
330 rq->mpwqe.umr_mode),
331 MLX5_SEND_WQE_DS);
332
333 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
334 ds_cnt);
335 cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
336
337 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
338 octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
339 ucseg->xlt_octowords = cpu_to_be16(octowords);
340 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
341 }
342
mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq * rq,int node)343 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
344 {
345 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
346 size_t alloc_size;
347
348 alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info,
349 alloc_units.frag_pages,
350 rq->mpwqe.pages_per_wqe));
351
352 rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
353 if (!rq->mpwqe.info)
354 return -ENOMEM;
355
356 /* For deferred page release (release right before alloc), make sure
357 * that on first round release is not called.
358 */
359 for (int i = 0; i < wq_sz; i++) {
360 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
361
362 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
363 }
364
365 mlx5e_build_umr_wqe(rq, rq->icosq,
366 container_of(&rq->mpwqe.umr_wqe,
367 struct mlx5e_umr_wqe, hdr));
368
369 return 0;
370 }
371
mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq * rq,int node,struct mlx5e_params * params,struct mlx5e_rq_opt_param * rqo)372 static int mlx5e_rq_alloc_mpwqe_linear_info(struct mlx5e_rq *rq, int node,
373 struct mlx5e_params *params,
374 struct mlx5e_rq_opt_param *rqo)
375 {
376 struct mlx5_core_dev *mdev = rq->mdev;
377 struct mlx5e_mpw_linear_info *li;
378 u32 linear_frag_count;
379
380 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, rqo) ||
381 !params->xdp_prog)
382 return 0;
383
384 li = kvzalloc_node(sizeof(*li), GFP_KERNEL, node);
385 if (!li)
386 return -ENOMEM;
387
388 linear_frag_count =
389 BIT(rq->mpwqe.page_shift - MLX5E_XDP_LOG_MAX_LINEAR_SZ);
390 if (linear_frag_count > U16_MAX) {
391 netdev_warn(rq->netdev,
392 "rq %d: linear_frag_count (%u) larger than expected (%u), page_shift: %u, log_max_linear_sz: %u\n",
393 rq->ix, linear_frag_count, U16_MAX,
394 rq->mpwqe.page_shift, MLX5E_XDP_LOG_MAX_LINEAR_SZ);
395 kvfree(li);
396 return -EINVAL;
397 }
398
399 li->max_frags = linear_frag_count;
400 rq->mpwqe.linear_info = li;
401
402 /* Set to max to force allocation on first run. */
403 li->frag_page.frags = li->max_frags;
404
405 return 0;
406 }
407
mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)408 static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
409 {
410 switch (umr_mode) {
411 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
412 return MLX5_MKC_ACCESS_MODE_MTT;
413 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
414 return MLX5_MKC_ACCESS_MODE_KSM;
415 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
416 return MLX5_MKC_ACCESS_MODE_KLMS;
417 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
418 return MLX5_MKC_ACCESS_MODE_KSM;
419 }
420 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
421 return 0;
422 }
423
mlx5e_create_umr_mkey(struct mlx5_core_dev * mdev,u32 npages,u8 page_shift,u32 * umr_mkey,dma_addr_t filler_addr,enum mlx5e_mpwrq_umr_mode umr_mode,u32 xsk_chunk_size)424 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
425 u32 npages, u8 page_shift, u32 *umr_mkey,
426 dma_addr_t filler_addr,
427 enum mlx5e_mpwrq_umr_mode umr_mode,
428 u32 xsk_chunk_size)
429 {
430 struct mlx5_mtt *mtt;
431 struct mlx5_ksm *ksm;
432 struct mlx5_klm *klm;
433 u32 octwords;
434 int inlen;
435 void *mkc;
436 u32 *in;
437 int err;
438 int i;
439
440 if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
441 umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
442 !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
443 mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
444 return -EINVAL;
445 }
446
447 octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
448
449 inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
450 MLX5_OCTWORD, octwords);
451 if (inlen < 0)
452 return inlen;
453
454 in = kvzalloc(inlen, GFP_KERNEL);
455 if (!in)
456 return -ENOMEM;
457
458 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
459
460 MLX5_SET(mkc, mkc, free, 1);
461 MLX5_SET(mkc, mkc, umr_en, 1);
462 MLX5_SET(mkc, mkc, lw, 1);
463 MLX5_SET(mkc, mkc, lr, 1);
464 MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
465 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
466 MLX5_SET(mkc, mkc, qpn, 0xffffff);
467 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
468 MLX5_SET64(mkc, mkc, len, npages << page_shift);
469 MLX5_SET(mkc, mkc, translations_octword_size, octwords);
470 if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
471 MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
472 else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
473 MLX5_SET(mkc, mkc, log_page_size, page_shift);
474 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
475
476 /* Initialize the mkey with all MTTs pointing to a default
477 * page (filler_addr). When the channels are activated, UMR
478 * WQEs will redirect the RX WQEs to the actual memory from
479 * the RQ's pool, while the gaps (wqe_overflow) remain mapped
480 * to the default page.
481 */
482 switch (umr_mode) {
483 case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
484 klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
485 for (i = 0; i < npages; i++) {
486 klm[i << 1] = (struct mlx5_klm) {
487 .va = cpu_to_be64(filler_addr),
488 .bcount = cpu_to_be32(xsk_chunk_size),
489 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
490 };
491 klm[(i << 1) + 1] = (struct mlx5_klm) {
492 .va = cpu_to_be64(filler_addr),
493 .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
494 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
495 };
496 }
497 break;
498 case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
499 ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
500 for (i = 0; i < npages; i++)
501 ksm[i] = (struct mlx5_ksm) {
502 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
503 .va = cpu_to_be64(filler_addr),
504 };
505 break;
506 case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
507 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
508 for (i = 0; i < npages; i++)
509 mtt[i] = (struct mlx5_mtt) {
510 .ptag = cpu_to_be64(filler_addr),
511 };
512 break;
513 case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
514 ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
515 for (i = 0; i < npages * 4; i++) {
516 ksm[i] = (struct mlx5_ksm) {
517 .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
518 .va = cpu_to_be64(filler_addr),
519 };
520 }
521 break;
522 }
523
524 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
525
526 kvfree(in);
527 return err;
528 }
529
mlx5e_create_rq_umr_mkey(struct mlx5_core_dev * mdev,struct mlx5e_rq * rq)530 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
531 {
532 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
533 u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
534 u32 num_entries, max_num_entries;
535 u32 umr_mkey;
536 int err;
537
538 max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
539
540 /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
541 if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
542 &num_entries) ||
543 num_entries > max_num_entries))
544 mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
545 __func__, wq_size, rq->mpwqe.mtts_per_wqe,
546 max_num_entries);
547
548 err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
549 &umr_mkey, rq->wqe_overflow.addr,
550 rq->mpwqe.umr_mode, xsk_chunk_size);
551 rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
552 return err;
553 }
554
mlx5e_init_frags_partition(struct mlx5e_rq * rq)555 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
556 {
557 struct mlx5e_wqe_frag_info next_frag = {};
558 struct mlx5e_wqe_frag_info *prev = NULL;
559 int i;
560
561 WARN_ON(rq->xsk_pool);
562
563 next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
564
565 /* Skip first release due to deferred release. */
566 next_frag.flags = BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
567
568 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
569 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
570 struct mlx5e_wqe_frag_info *frag =
571 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
572 int f;
573
574 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
575 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
576 /* Pages are assigned at runtime. */
577 next_frag.frag_page++;
578 next_frag.offset = 0;
579 if (prev)
580 prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
581 }
582 *frag = next_frag;
583
584 /* prepare next */
585 next_frag.offset += frag_info[f].frag_stride;
586 prev = frag;
587 }
588 }
589
590 if (prev)
591 prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
592 }
593
mlx5e_init_xsk_buffs(struct mlx5e_rq * rq)594 static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
595 {
596 int i;
597
598 /* Assumptions used by XSK batched allocator. */
599 WARN_ON(rq->wqe.info.num_frags != 1);
600 WARN_ON(rq->wqe.info.log_num_frags != 0);
601 WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
602
603 /* Considering the above assumptions a fragment maps to a single
604 * xsk_buff.
605 */
606 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
607 rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
608
609 /* Skip first release due to deferred release as WQES are
610 * not allocated yet.
611 */
612 rq->wqe.frags[i].flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
613 }
614 }
615
mlx5e_init_wqe_alloc_info(struct mlx5e_rq * rq,int node)616 static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
617 {
618 int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
619 int len = wq_sz << rq->wqe.info.log_num_frags;
620 struct mlx5e_wqe_frag_info *frags;
621 union mlx5e_alloc_units *aus;
622 int aus_sz;
623
624 if (rq->xsk_pool)
625 aus_sz = sizeof(*aus->xsk_buffs);
626 else
627 aus_sz = sizeof(*aus->frag_pages);
628
629 aus = kvzalloc_node(array_size(len, aus_sz), GFP_KERNEL, node);
630 if (!aus)
631 return -ENOMEM;
632
633 frags = kvzalloc_node(array_size(len, sizeof(*frags)), GFP_KERNEL, node);
634 if (!frags) {
635 kvfree(aus);
636 return -ENOMEM;
637 }
638
639 rq->wqe.alloc_units = aus;
640 rq->wqe.frags = frags;
641
642 if (rq->xsk_pool)
643 mlx5e_init_xsk_buffs(rq);
644 else
645 mlx5e_init_frags_partition(rq);
646
647 return 0;
648 }
649
mlx5e_free_wqe_alloc_info(struct mlx5e_rq * rq)650 static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
651 {
652 kvfree(rq->wqe.frags);
653 kvfree(rq->wqe.alloc_units);
654 }
655
mlx5e_rq_err_cqe_work(struct work_struct * recover_work)656 static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
657 {
658 struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
659
660 mlx5e_reporter_rq_cqe_err(rq);
661 }
662
mlx5e_rq_timeout_work(struct work_struct * timeout_work)663 static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
664 {
665 struct mlx5e_rq *rq = container_of(timeout_work,
666 struct mlx5e_rq,
667 rx_timeout_work);
668
669 mlx5e_reporter_rx_timeout(rq);
670 }
671
mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq * rq)672 static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
673 {
674 /* xsk can have page_shift < PAGE_SHIFT */
675 u16 page_order = max_t(s16, rq->mpwqe.page_shift - PAGE_SHIFT, 0);
676 u32 page_size = BIT(PAGE_SHIFT + page_order);
677
678 rq->wqe_overflow.page = alloc_pages(GFP_KERNEL, page_order);
679 if (!rq->wqe_overflow.page)
680 return -ENOMEM;
681
682 rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
683 page_size, rq->buff.map_dir);
684 if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
685 __free_pages(rq->wqe_overflow.page, page_order);
686 return -ENOMEM;
687 }
688 return 0;
689 }
690
mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq * rq)691 static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
692 {
693 u16 page_order = max_t(s16, rq->mpwqe.page_shift - PAGE_SHIFT, 0);
694 u32 page_size = BIT(PAGE_SHIFT + page_order);
695
696 dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, page_size,
697 rq->buff.map_dir);
698 __free_pages(rq->wqe_overflow.page, page_order);
699 }
700
mlx5e_init_rxq_rq(struct mlx5e_channel * c,struct mlx5e_params * params,u32 xdp_frag_size,struct mlx5e_rq * rq)701 static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
702 u32 xdp_frag_size, struct mlx5e_rq *rq)
703 {
704 struct mlx5_core_dev *mdev = c->mdev;
705 int err;
706
707 rq->wq_type = params->rq_wq_type;
708 rq->pdev = c->pdev;
709 rq->netdev = c->netdev;
710 rq->priv = c->priv;
711 rq->hwtstamp_config = &c->priv->hwtstamp_config;
712 rq->clock = mdev->clock;
713 rq->icosq = &c->icosq;
714 rq->ix = c->ix;
715 rq->channel = c;
716 rq->mdev = mdev;
717 rq->hw_mtu =
718 MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
719 rq->xdpsq = &c->rq_xdpsq;
720 rq->stats = &c->priv->channel_stats[c->ix]->rq;
721 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
722 err = mlx5e_rq_set_handlers(rq, params, NULL);
723 if (err)
724 return err;
725
726 return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
727 xdp_frag_size);
728 }
729
mlx5e_release_rq_hd_pages(struct mlx5e_rq * rq,struct mlx5e_shampo_hd * shampo)730 static void mlx5e_release_rq_hd_pages(struct mlx5e_rq *rq,
731 struct mlx5e_shampo_hd *shampo)
732
733 {
734 for (int i = 0; i < shampo->nentries; i++) {
735 struct mlx5e_dma_info *info = &shampo->hd_buf_pages[i];
736
737 if (!info->page)
738 continue;
739
740 dma_unmap_page(rq->pdev, info->addr, PAGE_SIZE,
741 rq->buff.map_dir);
742 __free_page(info->page);
743 }
744 }
745
mlx5e_alloc_rq_hd_pages(struct mlx5e_rq * rq,int node,struct mlx5e_shampo_hd * shampo)746 static int mlx5e_alloc_rq_hd_pages(struct mlx5e_rq *rq, int node,
747 struct mlx5e_shampo_hd *shampo)
748 {
749 int err, i;
750
751 for (i = 0; i < shampo->nentries; i++) {
752 struct page *page = alloc_pages_node(node, GFP_KERNEL, 0);
753 dma_addr_t addr;
754
755 if (!page) {
756 err = -ENOMEM;
757 goto err_free_pages;
758 }
759
760 addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
761 rq->buff.map_dir);
762 err = dma_mapping_error(rq->pdev, addr);
763 if (err) {
764 __free_page(page);
765 goto err_free_pages;
766 }
767
768 shampo->hd_buf_pages[i].page = page;
769 shampo->hd_buf_pages[i].addr = addr;
770 }
771
772 return 0;
773
774 err_free_pages:
775 mlx5e_release_rq_hd_pages(rq, shampo);
776
777 return err;
778 }
779
mlx5e_create_rq_hd_mkey(struct mlx5_core_dev * mdev,struct mlx5e_shampo_hd * shampo)780 static int mlx5e_create_rq_hd_mkey(struct mlx5_core_dev *mdev,
781 struct mlx5e_shampo_hd *shampo)
782 {
783 enum mlx5e_mpwrq_umr_mode umr_mode = MLX5E_MPWRQ_UMR_MODE_ALIGNED;
784 struct mlx5_mtt *mtt;
785 void *mkc, *in;
786 int inlen, err;
787 u32 octwords;
788
789 octwords = mlx5e_mpwrq_umr_octowords(shampo->nentries, umr_mode);
790 inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
791 MLX5_OCTWORD, octwords);
792 if (inlen < 0)
793 return inlen;
794
795 in = kvzalloc(inlen, GFP_KERNEL);
796 if (!in)
797 return -ENOMEM;
798
799 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
800
801 MLX5_SET(mkc, mkc, lw, 1);
802 MLX5_SET(mkc, mkc, lr, 1);
803 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
804 mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
805 MLX5_SET(mkc, mkc, qpn, 0xffffff);
806 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
807 MLX5_SET64(mkc, mkc, len, shampo->hd_buf_size);
808 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
809 MLX5_SET(mkc, mkc, translations_octword_size, octwords);
810 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
811 octwords);
812
813 mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
814 for (int i = 0; i < shampo->nentries; i++)
815 mtt[i].ptag = cpu_to_be64(shampo->hd_buf_pages[i].addr);
816
817 err = mlx5_core_create_mkey(mdev, &shampo->mkey, in, inlen);
818
819 kvfree(in);
820 return err;
821 }
822
mlx5_rq_shampo_alloc(struct mlx5_core_dev * mdev,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param,struct mlx5e_rq * rq,int node)823 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
824 struct mlx5e_params *params,
825 struct mlx5e_rq_param *rq_param,
826 struct mlx5e_rq *rq,
827 int node)
828 {
829 struct mlx5e_shampo_hd *shampo;
830 int nentries, err, shampo_sz;
831 u32 hd_per_wq, hd_buf_size;
832
833 if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
834 return 0;
835
836 hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rq_param);
837 hd_buf_size = hd_per_wq * BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
838 nentries = hd_buf_size / PAGE_SIZE;
839 if (!nentries) {
840 mlx5_core_err(mdev, "SHAMPO header buffer size %u < %lu\n",
841 hd_buf_size, PAGE_SIZE);
842 return -EINVAL;
843 }
844
845 shampo_sz = struct_size(shampo, hd_buf_pages, nentries);
846 shampo = kvzalloc_node(shampo_sz, GFP_KERNEL, node);
847 if (!shampo)
848 return -ENOMEM;
849
850 shampo->hd_per_wq = hd_per_wq;
851 shampo->hd_buf_size = hd_buf_size;
852 shampo->nentries = nentries;
853 err = mlx5e_alloc_rq_hd_pages(rq, node, shampo);
854 if (err)
855 goto err_free;
856
857 err = mlx5e_create_rq_hd_mkey(mdev, shampo);
858 if (err)
859 goto err_release_pages;
860
861 /* gro only data structures */
862 rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
863 if (!rq->hw_gro_data) {
864 err = -ENOMEM;
865 goto err_destroy_mkey;
866 }
867
868 rq->mpwqe.shampo = shampo;
869
870 return 0;
871
872 err_destroy_mkey:
873 mlx5_core_destroy_mkey(mdev, shampo->mkey);
874 err_release_pages:
875 mlx5e_release_rq_hd_pages(rq, shampo);
876 err_free:
877 kvfree(shampo);
878
879 return err;
880 }
881
mlx5e_rq_free_shampo(struct mlx5e_rq * rq)882 static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
883 {
884 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
885
886 if (!shampo)
887 return;
888
889 kvfree(rq->hw_gro_data);
890 mlx5_core_destroy_mkey(rq->mdev, shampo->mkey);
891 mlx5e_release_rq_hd_pages(rq, shampo);
892 kvfree(shampo);
893 }
894
mlx5e_alloc_rq(struct mlx5e_params * params,struct mlx5e_rq_param * rq_param,struct mlx5e_rq_opt_param * rqo,int node,struct mlx5e_rq * rq)895 static int mlx5e_alloc_rq(struct mlx5e_params *params,
896 struct mlx5e_rq_param *rq_param,
897 struct mlx5e_rq_opt_param *rqo,
898 int node, struct mlx5e_rq *rq)
899 {
900 void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
901 struct mlx5_core_dev *mdev = rq->mdev;
902 u32 pool_order = 0;
903 u32 pool_size;
904 int wq_sz;
905 int err;
906 int i;
907
908 rq_param->wq.db_numa_node = node;
909 INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
910 INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
911
912 if (params->xdp_prog)
913 bpf_prog_inc(params->xdp_prog);
914 RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
915
916 rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
917 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, rqo);
918 pool_size = 1 << params->log_rq_mtu_frames;
919
920 rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
921
922 switch (rq->wq_type) {
923 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
924 err = mlx5_wq_ll_create(mdev, &rq_param->wq, rqc_wq,
925 &rq->mpwqe.wq, &rq->wq_ctrl);
926 if (err)
927 goto err_rq_xdp_prog;
928
929 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
930
931 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
932
933 rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, rqo);
934 err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
935 if (err)
936 goto err_rq_wq_destroy;
937
938 rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, rqo);
939 rq->mpwqe.pages_per_wqe =
940 mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
941 rq->mpwqe.umr_mode);
942 rq->mpwqe.umr_wqebbs =
943 mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
944 rq->mpwqe.umr_mode);
945 rq->mpwqe.mtts_per_wqe =
946 mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
947 rq->mpwqe.umr_mode);
948
949 pool_size = rq->mpwqe.pages_per_wqe <<
950 mlx5e_mpwqe_get_log_rq_size(mdev, params, rqo);
951 pool_order = rq->mpwqe.page_shift - PAGE_SHIFT;
952
953 rq->mpwqe.log_stride_sz =
954 mlx5e_mpwqe_get_log_stride_size(mdev, params,
955 rqo);
956 rq->mpwqe.num_strides =
957 BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, rqo));
958 rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
959
960 rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
961
962 err = mlx5e_create_rq_umr_mkey(mdev, rq);
963 if (err)
964 goto err_rq_drop_page;
965
966 err = mlx5e_rq_alloc_mpwqe_info(rq, node);
967 if (err)
968 goto err_rq_mkey;
969
970 err = mlx5e_rq_alloc_mpwqe_linear_info(rq, node, params, rqo);
971 if (err)
972 goto err_free_mpwqe_info;
973
974 err = mlx5_rq_shampo_alloc(mdev, params, rq_param, rq, node);
975 if (err)
976 goto err_free_mpwqe_linear_info;
977
978 break;
979 default: /* MLX5_WQ_TYPE_CYCLIC */
980 err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq,
981 &rq->wqe.wq, &rq->wq_ctrl);
982 if (err)
983 goto err_rq_xdp_prog;
984
985 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
986
987 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
988
989 rq->wqe.info = rq_param->frags_info;
990 rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
991
992 err = mlx5e_init_wqe_alloc_info(rq, node);
993 if (err)
994 goto err_rq_wq_destroy;
995 }
996
997 if (mlx5e_rqo_xsk_param(rqo)) {
998 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
999 MEM_TYPE_XSK_BUFF_POOL, NULL);
1000 if (err)
1001 goto err_free_by_rq_type;
1002 xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
1003 } else {
1004 /* Create a page_pool and register it with rxq */
1005 struct page_pool_params pp_params = { 0 };
1006
1007 if (WARN_ON(BIT(PAGE_SHIFT + pool_order) / 64 >
1008 MLX5E_PAGECNT_BIAS_MAX)) {
1009 err = -E2BIG;
1010 goto err_free_by_rq_type;
1011 }
1012
1013 pp_params.order = pool_order;
1014 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1015 pp_params.pool_size = pool_size;
1016 pp_params.nid = node;
1017 pp_params.dev = rq->pdev;
1018 pp_params.napi = rq->cq.napi;
1019 pp_params.netdev = rq->netdev;
1020 pp_params.dma_dir = rq->buff.map_dir;
1021 pp_params.max_len = BIT(PAGE_SHIFT + pool_order);
1022 pp_params.queue_idx = rq->ix;
1023
1024 /* Shampo header data split allow for unreadable netmem */
1025 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1026 pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
1027
1028 /* page_pool can be used even when there is no rq->xdp_prog,
1029 * given page_pool does not handle DMA mapping there is no
1030 * required state to clear. And page_pool gracefully handle
1031 * elevated refcnt.
1032 */
1033 rq->page_pool = page_pool_create(&pp_params);
1034 if (IS_ERR(rq->page_pool)) {
1035 err = PTR_ERR(rq->page_pool);
1036 rq->page_pool = NULL;
1037 goto err_free_by_rq_type;
1038 }
1039 if (!rq->hd_page_pool)
1040 rq->hd_page_pool = rq->page_pool;
1041 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
1042 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1043 MEM_TYPE_PAGE_POOL, rq->page_pool);
1044 if (err)
1045 goto err_destroy_page_pool;
1046 }
1047 }
1048
1049 for (i = 0; i < wq_sz; i++) {
1050 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
1051 struct mlx5e_rx_wqe_ll *wqe =
1052 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
1053 u32 byte_count =
1054 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
1055 u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
1056 rq->mpwqe.page_shift;
1057 u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
1058 0 : rq->buff.headroom;
1059
1060 wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
1061 wqe->data[0].byte_count = cpu_to_be32(byte_count);
1062 wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
1063 } else {
1064 struct mlx5e_rx_wqe_cyc *wqe =
1065 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
1066 int f;
1067
1068 for (f = 0; f < rq->wqe.info.num_frags; f++) {
1069 u32 frag_size = rq->wqe.info.arr[f].frag_size |
1070 MLX5_HW_START_PADDING;
1071
1072 wqe->data[f].byte_count = cpu_to_be32(frag_size);
1073 wqe->data[f].lkey = rq->mkey_be;
1074 }
1075 /* check if num_frags is not a pow of two */
1076 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
1077 wqe->data[f].byte_count = 0;
1078 wqe->data[f].lkey = params->terminate_lkey_be;
1079 wqe->data[f].addr = 0;
1080 }
1081 }
1082 }
1083
1084 return 0;
1085
1086 err_destroy_page_pool:
1087 page_pool_destroy(rq->page_pool);
1088 err_free_by_rq_type:
1089 switch (rq->wq_type) {
1090 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1091 mlx5e_rq_free_shampo(rq);
1092 err_free_mpwqe_linear_info:
1093 kvfree(rq->mpwqe.linear_info);
1094 err_free_mpwqe_info:
1095 kvfree(rq->mpwqe.info);
1096 err_rq_mkey:
1097 mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
1098 err_rq_drop_page:
1099 mlx5e_free_mpwqe_rq_drop_page(rq);
1100 break;
1101 default: /* MLX5_WQ_TYPE_CYCLIC */
1102 mlx5e_free_wqe_alloc_info(rq);
1103 }
1104 err_rq_wq_destroy:
1105 mlx5_wq_destroy(&rq->wq_ctrl);
1106 err_rq_xdp_prog:
1107 if (params->xdp_prog)
1108 bpf_prog_put(params->xdp_prog);
1109
1110 return err;
1111 }
1112
mlx5e_free_rq(struct mlx5e_rq * rq)1113 static void mlx5e_free_rq(struct mlx5e_rq *rq)
1114 {
1115 kvfree(rq->dim);
1116 page_pool_destroy(rq->page_pool);
1117
1118 switch (rq->wq_type) {
1119 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1120 mlx5e_rq_free_shampo(rq);
1121 kvfree(rq->mpwqe.linear_info);
1122 kvfree(rq->mpwqe.info);
1123 mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
1124 mlx5e_free_mpwqe_rq_drop_page(rq);
1125 break;
1126 default: /* MLX5_WQ_TYPE_CYCLIC */
1127 mlx5e_free_wqe_alloc_info(rq);
1128 }
1129
1130 mlx5_wq_destroy(&rq->wq_ctrl);
1131
1132 if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
1133 struct bpf_prog *old_prog;
1134
1135 old_prog = rcu_dereference_protected(rq->xdp_prog,
1136 lockdep_is_held(&rq->priv->state_lock));
1137 if (old_prog)
1138 bpf_prog_put(old_prog);
1139 }
1140 xdp_rxq_info_unreg(&rq->xdp_rxq);
1141 }
1142
mlx5e_create_rq(struct mlx5e_rq * rq,struct mlx5e_rq_param * rq_param,u16 q_counter)1143 int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *rq_param,
1144 u16 q_counter)
1145 {
1146 struct mlx5_core_dev *mdev = rq->mdev;
1147 u8 ts_format;
1148 void *in;
1149 void *rqc;
1150 void *wq;
1151 int inlen;
1152 int err;
1153
1154 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
1155 sizeof(u64) * rq->wq_ctrl.buf.npages;
1156 in = kvzalloc(inlen, GFP_KERNEL);
1157 if (!in)
1158 return -ENOMEM;
1159
1160 ts_format = mlx5_is_real_time_rq(mdev) ?
1161 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
1162 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
1163 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1164 wq = MLX5_ADDR_OF(rqc, rqc, wq);
1165
1166 memcpy(rqc, rq_param->rqc, sizeof(rq_param->rqc));
1167
1168 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
1169 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1170 MLX5_SET(rqc, rqc, ts_format, ts_format);
1171 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
1172 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
1173 MLX5_ADAPTER_PAGE_SHIFT);
1174 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
1175
1176 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
1177 MLX5_SET(wq, wq, log_headers_buffer_entry_num,
1178 order_base_2(rq->mpwqe.shampo->hd_per_wq));
1179 MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
1180 }
1181
1182 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
1183 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1184
1185 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
1186
1187 kvfree(in);
1188
1189 return err;
1190 }
1191
mlx5e_modify_rq_state(struct mlx5e_rq * rq,int curr_state,int next_state)1192 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
1193 {
1194 struct mlx5_core_dev *mdev = rq->mdev;
1195
1196 void *in;
1197 void *rqc;
1198 int inlen;
1199 int err;
1200
1201 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1202 in = kvzalloc(inlen, GFP_KERNEL);
1203 if (!in)
1204 return -ENOMEM;
1205
1206 if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
1207 mlx5e_rqwq_reset(rq);
1208
1209 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1210
1211 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
1212 MLX5_SET(rqc, rqc, state, next_state);
1213
1214 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1215
1216 kvfree(in);
1217
1218 return err;
1219 }
1220
mlx5e_flush_rq_cq(struct mlx5e_rq * rq)1221 static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
1222 {
1223 struct mlx5_cqwq *cqwq = &rq->cq.wq;
1224 struct mlx5_cqe64 *cqe;
1225
1226 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
1227 while ((cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)))
1228 mlx5_cqwq_pop(cqwq);
1229 } else {
1230 while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
1231 mlx5_cqwq_pop(cqwq);
1232 }
1233
1234 mlx5_cqwq_update_db_record(cqwq);
1235 }
1236
mlx5e_flush_rq(struct mlx5e_rq * rq,int curr_state)1237 int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
1238 {
1239 struct net_device *dev = rq->netdev;
1240 int err;
1241
1242 err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
1243 if (err) {
1244 netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
1245 return err;
1246 }
1247
1248 mlx5e_free_rx_descs(rq);
1249 mlx5e_flush_rq_cq(rq);
1250
1251 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1252 if (err) {
1253 netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
1254 return err;
1255 }
1256
1257 return 0;
1258 }
1259
mlx5e_modify_rq_vsd(struct mlx5e_rq * rq,bool vsd)1260 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
1261 {
1262 struct mlx5_core_dev *mdev = rq->mdev;
1263 void *in;
1264 void *rqc;
1265 int inlen;
1266 int err;
1267
1268 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
1269 in = kvzalloc(inlen, GFP_KERNEL);
1270 if (!in)
1271 return -ENOMEM;
1272
1273 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1274
1275 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
1276 MLX5_SET64(modify_rq_in, in, modify_bitmask,
1277 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
1278 MLX5_SET(rqc, rqc, vsd, vsd);
1279 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
1280
1281 err = mlx5_core_modify_rq(mdev, rq->rqn, in);
1282
1283 kvfree(in);
1284
1285 return err;
1286 }
1287
mlx5e_destroy_rq(struct mlx5e_rq * rq)1288 void mlx5e_destroy_rq(struct mlx5e_rq *rq)
1289 {
1290 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
1291 }
1292
mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq * rq,int wait_time)1293 int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
1294 {
1295 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
1296
1297 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
1298
1299 do {
1300 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
1301 return 0;
1302
1303 msleep(20);
1304 } while (time_before(jiffies, exp_time));
1305
1306 netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
1307 rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
1308
1309 queue_work(rq->priv->wq, &rq->rx_timeout_work);
1310
1311 return -ETIMEDOUT;
1312 }
1313
mlx5e_free_rx_missing_descs(struct mlx5e_rq * rq)1314 void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
1315 {
1316 struct mlx5_wq_ll *wq;
1317 u16 head;
1318 int i;
1319
1320 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
1321 return;
1322
1323 wq = &rq->mpwqe.wq;
1324 head = wq->head;
1325
1326 /* Release WQEs that are in missing state: they have been
1327 * popped from the list after completion but were not freed
1328 * due to deferred release.
1329 * Also free the linked-list reserved entry, hence the "+ 1".
1330 */
1331 for (i = 0; i < mlx5_wq_ll_missing(wq) + 1; i++) {
1332 rq->dealloc_wqe(rq, head);
1333 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1334 }
1335
1336 rq->mpwqe.actual_wq_head = wq->head;
1337 rq->mpwqe.umr_in_progress = 0;
1338 rq->mpwqe.umr_completed = 0;
1339 }
1340
mlx5e_free_rx_descs(struct mlx5e_rq * rq)1341 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
1342 {
1343 __be16 wqe_ix_be;
1344 u16 wqe_ix;
1345
1346 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
1347 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1348
1349 mlx5e_free_rx_missing_descs(rq);
1350
1351 while (!mlx5_wq_ll_is_empty(wq)) {
1352 struct mlx5e_rx_wqe_ll *wqe;
1353
1354 wqe_ix_be = *wq->tail_next;
1355 wqe_ix = be16_to_cpu(wqe_ix_be);
1356 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
1357 rq->dealloc_wqe(rq, wqe_ix);
1358 mlx5_wq_ll_pop(wq, wqe_ix_be,
1359 &wqe->next.next_wqe_index);
1360 }
1361
1362 mlx5e_mpwqe_dealloc_linear_page(rq);
1363 } else {
1364 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1365 u16 missing = mlx5_wq_cyc_missing(wq);
1366 u16 head = mlx5_wq_cyc_get_head(wq);
1367
1368 while (!mlx5_wq_cyc_is_empty(wq)) {
1369 wqe_ix = mlx5_wq_cyc_get_tail(wq);
1370 rq->dealloc_wqe(rq, wqe_ix);
1371 mlx5_wq_cyc_pop(wq);
1372 }
1373 /* Missing slots might also contain unreleased pages due to
1374 * deferred release.
1375 */
1376 while (missing--) {
1377 wqe_ix = mlx5_wq_cyc_ctr2ix(wq, head++);
1378 rq->dealloc_wqe(rq, wqe_ix);
1379 }
1380 }
1381
1382 }
1383
mlx5e_open_rq(struct mlx5e_params * params,struct mlx5e_rq_param * rq_param,struct mlx5e_rq_opt_param * rqo,int node,u16 q_counter,struct mlx5e_rq * rq)1384 int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *rq_param,
1385 struct mlx5e_rq_opt_param *rqo, int node, u16 q_counter,
1386 struct mlx5e_rq *rq)
1387 {
1388 struct mlx5_core_dev *mdev = rq->mdev;
1389 int err;
1390
1391 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
1392 __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
1393
1394 err = mlx5e_alloc_rq(params, rq_param, rqo, node, rq);
1395 if (err)
1396 return err;
1397
1398 err = mlx5e_create_rq(rq, rq_param, q_counter);
1399 if (err)
1400 goto err_free_rq;
1401
1402 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
1403 if (err)
1404 goto err_destroy_rq;
1405
1406 if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
1407 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
1408
1409 if (rq->channel && !params->rx_dim_enabled) {
1410 rq->channel->rx_cq_moder = params->rx_cq_moderation;
1411 } else if (rq->channel) {
1412 u8 cq_period_mode;
1413
1414 cq_period_mode = params->rx_moder_use_cqe_mode ?
1415 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
1416 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1417 mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
1418 params->rx_dim_enabled);
1419
1420 err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
1421 if (err)
1422 goto err_destroy_rq;
1423 }
1424
1425 /* We disable csum_complete when XDP is enabled since
1426 * XDP programs might manipulate packets which will render
1427 * skb->checksum incorrect.
1428 */
1429 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
1430 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
1431
1432 /* For CQE compression on striding RQ, use stride index provided by
1433 * HW if capability is supported.
1434 */
1435 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
1436 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
1437 __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
1438
1439 /* For enhanced CQE compression packet processing. decompress
1440 * session according to the enhanced layout.
1441 */
1442 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
1443 MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
1444 __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
1445
1446 return 0;
1447
1448 err_destroy_rq:
1449 mlx5e_destroy_rq(rq);
1450 err_free_rq:
1451 mlx5e_free_rq(rq);
1452
1453 return err;
1454 }
1455
mlx5e_activate_rq(struct mlx5e_rq * rq)1456 void mlx5e_activate_rq(struct mlx5e_rq *rq)
1457 {
1458 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1459 }
1460
mlx5e_deactivate_rq(struct mlx5e_rq * rq)1461 void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
1462 {
1463 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
1464 synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
1465 }
1466
mlx5e_close_rq(struct mlx5e_rq * rq)1467 void mlx5e_close_rq(struct mlx5e_rq *rq)
1468 {
1469 if (rq->dim)
1470 cancel_work_sync(&rq->dim->work);
1471 cancel_work_sync(&rq->recover_work);
1472 cancel_work_sync(&rq->rx_timeout_work);
1473 mlx5e_destroy_rq(rq);
1474 mlx5e_free_rx_descs(rq);
1475 mlx5e_free_rq(rq);
1476 }
1477
mlx5e_profile_get_tisn(struct mlx5_core_dev * mdev,struct mlx5e_priv * priv,const struct mlx5e_profile * profile,u8 lag_port,u8 tc)1478 u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
1479 struct mlx5e_priv *priv,
1480 const struct mlx5e_profile *profile,
1481 u8 lag_port, u8 tc)
1482 {
1483 if (profile->get_tisn)
1484 return profile->get_tisn(mdev, priv, lag_port, tc);
1485
1486 return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc];
1487 }
1488
mlx5e_free_xdpsq_db(struct mlx5e_xdpsq * sq)1489 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
1490 {
1491 kvfree(sq->db.xdpi_fifo.xi);
1492 kvfree(sq->db.wqe_info);
1493 }
1494
mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq * sq,int numa)1495 static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
1496 {
1497 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
1498 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1499 int entries;
1500 size_t size;
1501
1502 /* upper bound for maximum num of entries of all xmit_modes. */
1503 entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS *
1504 MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO);
1505
1506 size = array_size(sizeof(*xdpi_fifo->xi), entries);
1507 xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
1508 if (!xdpi_fifo->xi)
1509 return -ENOMEM;
1510
1511 xdpi_fifo->pc = &sq->xdpi_fifo_pc;
1512 xdpi_fifo->cc = &sq->xdpi_fifo_cc;
1513 xdpi_fifo->mask = entries - 1;
1514
1515 return 0;
1516 }
1517
mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq * sq,int numa)1518 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
1519 {
1520 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1521 size_t size;
1522 int err;
1523
1524 size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
1525 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1526 if (!sq->db.wqe_info)
1527 return -ENOMEM;
1528
1529 err = mlx5e_alloc_xdpsq_fifo(sq, numa);
1530 if (err) {
1531 mlx5e_free_xdpsq_db(sq);
1532 return err;
1533 }
1534
1535 return 0;
1536 }
1537
mlx5e_alloc_xdpsq(struct mlx5e_channel * c,struct mlx5e_params * params,struct xsk_buff_pool * xsk_pool,struct mlx5e_sq_param * param,struct mlx5e_xdpsq * sq,bool is_redirect)1538 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1539 struct mlx5e_params *params,
1540 struct xsk_buff_pool *xsk_pool,
1541 struct mlx5e_sq_param *param,
1542 struct mlx5e_xdpsq *sq,
1543 bool is_redirect)
1544 {
1545 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1546 struct mlx5_core_dev *mdev = c->mdev;
1547 struct mlx5_wq_cyc *wq = &sq->wq;
1548 int err;
1549
1550 sq->pdev = c->pdev;
1551 sq->mkey_be = c->mkey_be;
1552 sq->channel = c;
1553 sq->uar_map = c->bfreg->map;
1554 sq->min_inline_mode = params->tx_min_inline_mode;
1555 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
1556 sq->xsk_pool = xsk_pool;
1557
1558 sq->stats = sq->xsk_pool ?
1559 &c->priv->channel_stats[c->ix]->xsksq :
1560 is_redirect ?
1561 &c->priv->channel_stats[c->ix]->xdpsq :
1562 &c->priv->channel_stats[c->ix]->rq_xdpsq;
1563 sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
1564 mlx5e_stop_room_for_max_wqe(mdev);
1565 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1566
1567 param->wq.db_numa_node = cpu_to_node(c->cpu);
1568 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1569 if (err)
1570 return err;
1571 wq->db = &wq->db[MLX5_SND_DBR];
1572
1573 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1574 if (err)
1575 goto err_sq_wq_destroy;
1576
1577 return 0;
1578
1579 err_sq_wq_destroy:
1580 mlx5_wq_destroy(&sq->wq_ctrl);
1581
1582 return err;
1583 }
1584
mlx5e_free_xdpsq(struct mlx5e_xdpsq * sq)1585 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1586 {
1587 mlx5e_free_xdpsq_db(sq);
1588 mlx5_wq_destroy(&sq->wq_ctrl);
1589 }
1590
mlx5e_free_icosq_db(struct mlx5e_icosq * sq)1591 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1592 {
1593 kvfree(sq->db.wqe_info);
1594 }
1595
mlx5e_alloc_icosq_db(struct mlx5e_icosq * sq,int numa)1596 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1597 {
1598 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1599 size_t size;
1600
1601 size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
1602 sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
1603 if (!sq->db.wqe_info)
1604 return -ENOMEM;
1605
1606 return 0;
1607 }
1608
mlx5e_icosq_err_cqe_work(struct work_struct * recover_work)1609 static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1610 {
1611 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1612 recover_work);
1613
1614 mlx5e_reporter_icosq_cqe_err(sq);
1615 }
1616
mlx5e_async_icosq_err_cqe_work(struct work_struct * recover_work)1617 static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
1618 {
1619 struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1620 recover_work);
1621
1622 /* Not implemented yet. */
1623
1624 netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
1625 }
1626
mlx5e_alloc_icosq(struct mlx5e_channel * c,struct mlx5e_sq_param * param,struct mlx5e_icosq * sq,work_func_t recover_work_func)1627 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1628 struct mlx5e_sq_param *param,
1629 struct mlx5e_icosq *sq,
1630 work_func_t recover_work_func)
1631 {
1632 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1633 struct mlx5_core_dev *mdev = c->mdev;
1634 struct mlx5_wq_cyc *wq = &sq->wq;
1635 int err;
1636
1637 sq->channel = c;
1638 sq->uar_map = c->bfreg->map;
1639 sq->reserved_room = param->stop_room;
1640
1641 param->wq.db_numa_node = cpu_to_node(c->cpu);
1642 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1643 if (err)
1644 return err;
1645 wq->db = &wq->db[MLX5_SND_DBR];
1646
1647 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1648 if (err)
1649 goto err_sq_wq_destroy;
1650
1651 INIT_WORK(&sq->recover_work, recover_work_func);
1652
1653 return 0;
1654
1655 err_sq_wq_destroy:
1656 mlx5_wq_destroy(&sq->wq_ctrl);
1657
1658 return err;
1659 }
1660
mlx5e_free_icosq(struct mlx5e_icosq * sq)1661 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1662 {
1663 mlx5e_free_icosq_db(sq);
1664 mlx5_wq_destroy(&sq->wq_ctrl);
1665 }
1666
mlx5e_free_txqsq_db(struct mlx5e_txqsq * sq)1667 void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1668 {
1669 kvfree(sq->db.wqe_info);
1670 kvfree(sq->db.skb_fifo.fifo);
1671 kvfree(sq->db.dma_fifo);
1672 }
1673
mlx5e_alloc_txqsq_db(struct mlx5e_txqsq * sq,int numa)1674 int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1675 {
1676 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1677 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1678
1679 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1680 sizeof(*sq->db.dma_fifo)),
1681 GFP_KERNEL, numa);
1682 sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
1683 sizeof(*sq->db.skb_fifo.fifo)),
1684 GFP_KERNEL, numa);
1685 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1686 sizeof(*sq->db.wqe_info)),
1687 GFP_KERNEL, numa);
1688 if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
1689 mlx5e_free_txqsq_db(sq);
1690 return -ENOMEM;
1691 }
1692
1693 sq->dma_fifo_mask = df_sz - 1;
1694
1695 sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
1696 sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
1697 sq->db.skb_fifo.mask = df_sz - 1;
1698
1699 return 0;
1700 }
1701
mlx5e_alloc_txqsq(struct mlx5e_channel * c,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc)1702 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1703 int txq_ix,
1704 struct mlx5e_params *params,
1705 struct mlx5e_sq_param *param,
1706 struct mlx5e_txqsq *sq,
1707 int tc)
1708 {
1709 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1710 struct mlx5_core_dev *mdev = c->mdev;
1711 struct mlx5_wq_cyc *wq = &sq->wq;
1712 int err;
1713
1714 sq->pdev = c->pdev;
1715 sq->clock = mdev->clock;
1716 sq->mkey_be = c->mkey_be;
1717 sq->netdev = c->netdev;
1718 sq->mdev = c->mdev;
1719 sq->channel = c;
1720 sq->priv = c->priv;
1721 sq->ch_ix = c->ix;
1722 sq->txq_ix = txq_ix;
1723 sq->uar_map = c->bfreg->map;
1724 sq->min_inline_mode = params->tx_min_inline_mode;
1725 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1726 sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
1727 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
1728 if (mlx5_ipsec_device_caps(c->priv->mdev))
1729 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1730 if (param->is_mpw)
1731 set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
1732 sq->stop_room = param->stop_room;
1733 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
1734
1735 param->wq.db_numa_node = cpu_to_node(c->cpu);
1736 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1737 if (err)
1738 return err;
1739 wq->db = &wq->db[MLX5_SND_DBR];
1740
1741 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1742 if (err)
1743 goto err_sq_wq_destroy;
1744
1745 return 0;
1746
1747 err_sq_wq_destroy:
1748 mlx5_wq_destroy(&sq->wq_ctrl);
1749
1750 return err;
1751 }
1752
mlx5e_free_txqsq(struct mlx5e_txqsq * sq)1753 void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1754 {
1755 kvfree(sq->dim);
1756 mlx5e_free_txqsq_db(sq);
1757 mlx5_wq_destroy(&sq->wq_ctrl);
1758 }
1759
mlx5e_create_sq(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param,struct mlx5e_create_sq_param * csp,u32 * sqn)1760 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1761 struct mlx5e_sq_param *param,
1762 struct mlx5e_create_sq_param *csp,
1763 u32 *sqn)
1764 {
1765 u8 ts_format;
1766 void *in;
1767 void *sqc;
1768 void *wq;
1769 int inlen;
1770 int err;
1771
1772 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1773 sizeof(u64) * csp->wq_ctrl->buf.npages;
1774 in = kvzalloc(inlen, GFP_KERNEL);
1775 if (!in)
1776 return -ENOMEM;
1777
1778 ts_format = mlx5_is_real_time_sq(mdev) ?
1779 MLX5_TIMESTAMP_FORMAT_REAL_TIME :
1780 MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
1781 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1782 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1783
1784 memcpy(sqc, param->sqc, sizeof(param->sqc));
1785 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1786 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1787 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1788 MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
1789 MLX5_SET(sqc, sqc, ts_format, ts_format);
1790
1791
1792 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1793 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1794
1795 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1796 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1797
1798 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1799 MLX5_SET(wq, wq, uar_page, csp->uar_page);
1800 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1801 MLX5_ADAPTER_PAGE_SHIFT);
1802 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1803
1804 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1805 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1806
1807 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1808
1809 kvfree(in);
1810
1811 return err;
1812 }
1813
mlx5e_modify_sq(struct mlx5_core_dev * mdev,u32 sqn,struct mlx5e_modify_sq_param * p)1814 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1815 struct mlx5e_modify_sq_param *p)
1816 {
1817 u64 bitmask = 0;
1818 void *in;
1819 void *sqc;
1820 int inlen;
1821 int err;
1822
1823 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1824 in = kvzalloc(inlen, GFP_KERNEL);
1825 if (!in)
1826 return -ENOMEM;
1827
1828 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1829
1830 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1831 MLX5_SET(sqc, sqc, state, p->next_state);
1832 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1833 bitmask |= 1;
1834 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1835 }
1836 if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
1837 bitmask |= 1 << 2;
1838 MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
1839 }
1840 MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
1841
1842 err = mlx5_core_modify_sq(mdev, sqn, in);
1843
1844 kvfree(in);
1845
1846 return err;
1847 }
1848
mlx5e_destroy_sq(struct mlx5_core_dev * mdev,u32 sqn)1849 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1850 {
1851 mlx5_core_destroy_sq(mdev, sqn);
1852 }
1853
mlx5e_create_sq_rdy(struct mlx5_core_dev * mdev,struct mlx5e_sq_param * param,struct mlx5e_create_sq_param * csp,u16 qos_queue_group_id,u32 * sqn)1854 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1855 struct mlx5e_sq_param *param,
1856 struct mlx5e_create_sq_param *csp,
1857 u16 qos_queue_group_id,
1858 u32 *sqn)
1859 {
1860 struct mlx5e_modify_sq_param msp = {0};
1861 int err;
1862
1863 err = mlx5e_create_sq(mdev, param, csp, sqn);
1864 if (err)
1865 return err;
1866
1867 msp.curr_state = MLX5_SQC_STATE_RST;
1868 msp.next_state = MLX5_SQC_STATE_RDY;
1869 if (qos_queue_group_id) {
1870 msp.qos_update = true;
1871 msp.qos_queue_group_id = qos_queue_group_id;
1872 }
1873 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1874 if (err)
1875 mlx5e_destroy_sq(mdev, *sqn);
1876
1877 return err;
1878 }
1879
1880 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1881 struct mlx5e_txqsq *sq, u32 rate);
1882
mlx5e_open_txqsq(struct mlx5e_channel * c,u32 tisn,int txq_ix,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_txqsq * sq,int tc,u16 qos_queue_group_id,struct mlx5e_sq_stats * sq_stats)1883 int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
1884 struct mlx5e_params *params, struct mlx5e_sq_param *param,
1885 struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
1886 struct mlx5e_sq_stats *sq_stats)
1887 {
1888 struct mlx5e_create_sq_param csp = {};
1889 u32 tx_rate;
1890 int err;
1891
1892 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1893 if (err)
1894 return err;
1895
1896 sq->stats = sq_stats;
1897
1898 csp.tisn = tisn;
1899 csp.tis_lst_sz = 1;
1900 csp.cqn = sq->cq.mcq.cqn;
1901 csp.wq_ctrl = &sq->wq_ctrl;
1902 csp.min_inline_mode = sq->min_inline_mode;
1903 csp.uar_page = c->bfreg->index;
1904 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
1905 if (err)
1906 goto err_free_txqsq;
1907
1908 tx_rate = c->priv->tx_rates[sq->txq_ix];
1909 if (tx_rate)
1910 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1911
1912 if (sq->channel && !params->tx_dim_enabled) {
1913 sq->channel->tx_cq_moder = params->tx_cq_moderation;
1914 } else if (sq->channel) {
1915 u8 cq_period_mode;
1916
1917 cq_period_mode = params->tx_moder_use_cqe_mode ?
1918 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
1919 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1920 mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
1921 cq_period_mode,
1922 params->tx_dim_enabled);
1923
1924 err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
1925 if (err)
1926 goto err_destroy_sq;
1927 }
1928
1929 return 0;
1930
1931 err_destroy_sq:
1932 mlx5e_destroy_sq(c->mdev, sq->sqn);
1933 err_free_txqsq:
1934 mlx5e_free_txqsq(sq);
1935
1936 return err;
1937 }
1938
mlx5e_activate_txqsq(struct mlx5e_txqsq * sq)1939 void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1940 {
1941 sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
1942 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1943 netdev_tx_reset_queue(sq->txq);
1944 netif_tx_start_queue(sq->txq);
1945 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
1946 }
1947
mlx5e_tx_disable_queue(struct netdev_queue * txq)1948 void mlx5e_tx_disable_queue(struct netdev_queue *txq)
1949 {
1950 __netif_tx_lock_bh(txq);
1951 netif_tx_stop_queue(txq);
1952 __netif_tx_unlock_bh(txq);
1953 }
1954
mlx5e_deactivate_txqsq(struct mlx5e_txqsq * sq)1955 void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1956 {
1957 struct mlx5_wq_cyc *wq = &sq->wq;
1958
1959 netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
1960 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1961 synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
1962
1963 mlx5e_tx_disable_queue(sq->txq);
1964
1965 /* last doorbell out, godspeed .. */
1966 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1967 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1968 struct mlx5e_tx_wqe *nop;
1969
1970 sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
1971 .num_wqebbs = 1,
1972 };
1973
1974 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1975 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1976 }
1977 }
1978
mlx5e_close_txqsq(struct mlx5e_txqsq * sq)1979 void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1980 {
1981 struct mlx5_core_dev *mdev = sq->mdev;
1982 struct mlx5_rate_limit rl = {0};
1983
1984 if (sq->dim)
1985 cancel_work_sync(&sq->dim->work);
1986 cancel_work_sync(&sq->recover_work);
1987 mlx5e_destroy_sq(mdev, sq->sqn);
1988 if (sq->rate_limit) {
1989 rl.rate = sq->rate_limit;
1990 mlx5_rl_remove_rate(mdev, &rl);
1991 }
1992 mlx5e_free_txqsq_descs(sq);
1993 mlx5e_free_txqsq(sq);
1994 }
1995
mlx5e_tx_err_cqe_work(struct work_struct * recover_work)1996 void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1997 {
1998 struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
1999 recover_work);
2000
2001 mlx5e_reporter_tx_err_cqe(sq);
2002 }
2003
mlx5e_get_def_tx_moderation(u8 cq_period_mode)2004 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
2005 {
2006 return (struct dim_cq_moder) {
2007 .cq_period_mode = cq_period_mode,
2008 .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
2009 .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
2010 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
2011 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
2012 };
2013 }
2014
mlx5e_reset_tx_moderation(struct dim_cq_moder * cq_moder,u8 cq_period_mode,bool dim_enabled)2015 bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
2016 bool dim_enabled)
2017 {
2018 bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
2019
2020 if (dim_enabled)
2021 *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
2022 else
2023 *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
2024
2025 return reset_needed;
2026 }
2027
mlx5e_reset_tx_channels_moderation(struct mlx5e_channels * chs,u8 cq_period_mode,bool dim_enabled,bool keep_dim_state)2028 bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
2029 bool dim_enabled, bool keep_dim_state)
2030 {
2031 bool reset = false;
2032 int i, tc;
2033
2034 for (i = 0; i < chs->num; i++) {
2035 for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
2036 if (keep_dim_state)
2037 dim_enabled = !!chs->c[i]->sq[tc].dim;
2038
2039 reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
2040 cq_period_mode, dim_enabled);
2041 }
2042 }
2043
2044 return reset;
2045 }
2046
mlx5e_open_icosq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct mlx5e_icosq * sq,work_func_t recover_work_func)2047 static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
2048 struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
2049 work_func_t recover_work_func)
2050 {
2051 struct mlx5e_create_sq_param csp = {};
2052 int err;
2053
2054 err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
2055 if (err)
2056 return err;
2057
2058 csp.cqn = sq->cq.mcq.cqn;
2059 csp.wq_ctrl = &sq->wq_ctrl;
2060 csp.min_inline_mode = params->tx_min_inline_mode;
2061 csp.uar_page = c->bfreg->index;
2062 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
2063 if (err)
2064 goto err_free_icosq;
2065
2066 spin_lock_init(&sq->lock);
2067
2068 if (param->is_tls) {
2069 sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
2070 if (IS_ERR(sq->ktls_resync)) {
2071 err = PTR_ERR(sq->ktls_resync);
2072 goto err_destroy_icosq;
2073 }
2074 }
2075 return 0;
2076
2077 err_destroy_icosq:
2078 mlx5e_destroy_sq(c->mdev, sq->sqn);
2079 err_free_icosq:
2080 mlx5e_free_icosq(sq);
2081
2082 return err;
2083 }
2084
mlx5e_activate_icosq(struct mlx5e_icosq * icosq)2085 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
2086 {
2087 set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
2088 }
2089
mlx5e_deactivate_icosq(struct mlx5e_icosq * icosq)2090 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
2091 {
2092 clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
2093 synchronize_net(); /* Sync with NAPI. */
2094 }
2095
mlx5e_close_icosq(struct mlx5e_icosq * sq)2096 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
2097 {
2098 struct mlx5e_channel *c = sq->channel;
2099
2100 if (sq->ktls_resync)
2101 mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
2102 mlx5e_destroy_sq(c->mdev, sq->sqn);
2103 mlx5e_free_icosq_descs(sq);
2104 mlx5e_free_icosq(sq);
2105 }
2106
mlx5e_open_xdpsq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_sq_param * param,struct xsk_buff_pool * xsk_pool,struct mlx5e_xdpsq * sq,bool is_redirect)2107 int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
2108 struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
2109 struct mlx5e_xdpsq *sq, bool is_redirect)
2110 {
2111 struct mlx5e_create_sq_param csp = {};
2112 int err;
2113
2114 err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
2115 if (err)
2116 return err;
2117
2118 csp.tis_lst_sz = 1;
2119 csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
2120 c->lag_port, 0); /* tc = 0 */
2121 csp.cqn = sq->cq.mcq.cqn;
2122 csp.wq_ctrl = &sq->wq_ctrl;
2123 csp.min_inline_mode = sq->min_inline_mode;
2124 csp.uar_page = c->bfreg->index;
2125 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2126
2127 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
2128 if (err)
2129 goto err_free_xdpsq;
2130
2131 mlx5e_set_xmit_fp(sq, param->is_mpw);
2132
2133 return 0;
2134
2135 err_free_xdpsq:
2136 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2137 mlx5e_free_xdpsq(sq);
2138
2139 return err;
2140 }
2141
mlx5e_close_xdpsq(struct mlx5e_xdpsq * sq)2142 void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
2143 {
2144 struct mlx5e_channel *c = sq->channel;
2145
2146 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
2147 synchronize_net(); /* Sync with NAPI. */
2148
2149 mlx5e_destroy_sq(c->mdev, sq->sqn);
2150 mlx5e_free_xdpsq_descs(sq);
2151 mlx5e_free_xdpsq(sq);
2152 }
2153
mlx5e_open_xdpredirect_sq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam,struct mlx5e_create_cq_param * ccp)2154 static struct mlx5e_xdpsq *mlx5e_open_xdpredirect_sq(struct mlx5e_channel *c,
2155 struct mlx5e_params *params,
2156 struct mlx5e_channel_param *cparam,
2157 struct mlx5e_create_cq_param *ccp)
2158 {
2159 struct mlx5e_xdpsq *xdpsq;
2160 int err;
2161
2162 xdpsq = kvzalloc_node(sizeof(*xdpsq), GFP_KERNEL, cpu_to_node(c->cpu));
2163 if (!xdpsq)
2164 return ERR_PTR(-ENOMEM);
2165
2166 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation,
2167 &cparam->xdp_sq.cqp, ccp, &xdpsq->cq);
2168 if (err)
2169 goto err_free_xdpsq;
2170
2171 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, xdpsq, true);
2172 if (err)
2173 goto err_close_xdpsq_cq;
2174
2175 return xdpsq;
2176
2177 err_close_xdpsq_cq:
2178 mlx5e_close_cq(&xdpsq->cq);
2179 err_free_xdpsq:
2180 kvfree(xdpsq);
2181
2182 return ERR_PTR(err);
2183 }
2184
mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq * xdpsq)2185 static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
2186 {
2187 mlx5e_close_xdpsq(xdpsq);
2188 mlx5e_close_cq(&xdpsq->cq);
2189 kvfree(xdpsq);
2190 }
2191
mlx5e_alloc_cq_common(struct mlx5_core_dev * mdev,struct net_device * netdev,struct workqueue_struct * workqueue,struct mlx5_uars_page * uar,struct mlx5e_cq_param * param,struct mlx5e_cq * cq)2192 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
2193 struct net_device *netdev,
2194 struct workqueue_struct *workqueue,
2195 struct mlx5_uars_page *uar,
2196 struct mlx5e_cq_param *param,
2197 struct mlx5e_cq *cq)
2198 {
2199 struct mlx5_core_cq *mcq = &cq->mcq;
2200 int err;
2201 u32 i;
2202
2203 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
2204 &cq->wq_ctrl);
2205 if (err)
2206 return err;
2207
2208 mcq->cqe_sz = 64;
2209 mcq->set_ci_db = cq->wq_ctrl.db.db;
2210 mcq->arm_db = cq->wq_ctrl.db.db + 1;
2211 *mcq->set_ci_db = 0;
2212 mcq->vector = param->eq_ix;
2213 mcq->comp = mlx5e_completion_event;
2214 mcq->event = mlx5e_cq_error_event;
2215
2216 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
2217 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
2218
2219 cqe->op_own = 0xf1;
2220 cqe->validity_iteration_count = 0xff;
2221 }
2222
2223 cq->mdev = mdev;
2224 cq->netdev = netdev;
2225 cq->workqueue = workqueue;
2226 cq->uar = uar;
2227
2228 return 0;
2229 }
2230
mlx5e_alloc_cq(struct mlx5_core_dev * mdev,struct mlx5e_cq_param * param,struct mlx5e_create_cq_param * ccp,struct mlx5e_cq * cq)2231 static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
2232 struct mlx5e_cq_param *param,
2233 struct mlx5e_create_cq_param *ccp,
2234 struct mlx5e_cq *cq)
2235 {
2236 int err;
2237
2238 param->wq.buf_numa_node = ccp->node;
2239 param->wq.db_numa_node = ccp->node;
2240 param->eq_ix = ccp->ix;
2241
2242 err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq,
2243 ccp->uar, param, cq);
2244
2245 cq->napi = ccp->napi;
2246 cq->ch_stats = ccp->ch_stats;
2247
2248 return err;
2249 }
2250
mlx5e_free_cq(struct mlx5e_cq * cq)2251 static void mlx5e_free_cq(struct mlx5e_cq *cq)
2252 {
2253 mlx5_wq_destroy(&cq->wq_ctrl);
2254 }
2255
mlx5e_create_cq(struct mlx5e_cq * cq,struct mlx5e_cq_param * param)2256 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
2257 {
2258 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
2259 struct mlx5_core_dev *mdev = cq->mdev;
2260 struct mlx5_core_cq *mcq = &cq->mcq;
2261
2262 void *in;
2263 void *cqc;
2264 int inlen;
2265 int eqn;
2266 int err;
2267
2268 err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
2269 if (err)
2270 return err;
2271
2272 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
2273 sizeof(u64) * cq->wq_ctrl.buf.npages;
2274 in = kvzalloc(inlen, GFP_KERNEL);
2275 if (!in)
2276 return -ENOMEM;
2277
2278 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
2279
2280 memcpy(cqc, param->cqc, sizeof(param->cqc));
2281
2282 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
2283 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
2284
2285 MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
2286
2287 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
2288 MLX5_SET(cqc, cqc, uar_page, cq->uar->index);
2289 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
2290 MLX5_ADAPTER_PAGE_SHIFT);
2291 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
2292
2293 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
2294
2295 kvfree(in);
2296
2297 if (err)
2298 return err;
2299
2300 mlx5e_cq_arm(cq);
2301
2302 return 0;
2303 }
2304
mlx5e_destroy_cq(struct mlx5e_cq * cq)2305 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
2306 {
2307 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
2308 }
2309
mlx5e_open_cq(struct mlx5_core_dev * mdev,struct dim_cq_moder moder,struct mlx5e_cq_param * param,struct mlx5e_create_cq_param * ccp,struct mlx5e_cq * cq)2310 int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
2311 struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
2312 struct mlx5e_cq *cq)
2313 {
2314 int err;
2315
2316 err = mlx5e_alloc_cq(mdev, param, ccp, cq);
2317 if (err)
2318 return err;
2319
2320 err = mlx5e_create_cq(cq, param);
2321 if (err)
2322 goto err_free_cq;
2323
2324 if (MLX5_CAP_GEN(mdev, cq_moderation) &&
2325 MLX5_CAP_GEN(mdev, cq_period_mode_modify))
2326 mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
2327 mlx5e_cq_period_mode(moder.cq_period_mode));
2328 return 0;
2329
2330 err_free_cq:
2331 mlx5e_free_cq(cq);
2332
2333 return err;
2334 }
2335
mlx5e_close_cq(struct mlx5e_cq * cq)2336 void mlx5e_close_cq(struct mlx5e_cq *cq)
2337 {
2338 mlx5e_destroy_cq(cq);
2339 mlx5e_free_cq(cq);
2340 }
2341
mlx5e_modify_cq_period_mode(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u8 cq_period_mode)2342 int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
2343 u8 cq_period_mode)
2344 {
2345 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
2346 void *cqc;
2347
2348 MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
2349 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
2350 MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
2351 MLX5_SET(modify_cq_in, in,
2352 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
2353 MLX5_CQ_MODIFY_PERIOD_MODE);
2354
2355 return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
2356 }
2357
mlx5e_modify_cq_moderation(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u16 cq_period,u16 cq_max_count,u8 cq_period_mode)2358 int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
2359 u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
2360 {
2361 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
2362 void *cqc;
2363
2364 MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
2365 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
2366 MLX5_SET(cqc, cqc, cq_period, cq_period);
2367 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
2368 MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
2369 MLX5_SET(modify_cq_in, in,
2370 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
2371 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
2372
2373 return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
2374 }
2375
mlx5e_open_tx_cqs(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_create_cq_param * ccp,struct mlx5e_channel_param * cparam)2376 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
2377 struct mlx5e_params *params,
2378 struct mlx5e_create_cq_param *ccp,
2379 struct mlx5e_channel_param *cparam)
2380 {
2381 int err;
2382 int tc;
2383
2384 for (tc = 0; tc < c->num_tc; tc++) {
2385 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
2386 ccp, &c->sq[tc].cq);
2387 if (err)
2388 goto err_close_tx_cqs;
2389 }
2390
2391 return 0;
2392
2393 err_close_tx_cqs:
2394 for (tc--; tc >= 0; tc--)
2395 mlx5e_close_cq(&c->sq[tc].cq);
2396
2397 return err;
2398 }
2399
mlx5e_close_tx_cqs(struct mlx5e_channel * c)2400 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
2401 {
2402 int tc;
2403
2404 for (tc = 0; tc < c->num_tc; tc++)
2405 mlx5e_close_cq(&c->sq[tc].cq);
2406 }
2407
mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq * tc_to_txq,unsigned int txq)2408 static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
2409 {
2410 int tc;
2411
2412 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
2413 if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
2414 return tc;
2415
2416 WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
2417 return -ENOENT;
2418 }
2419
mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params * params,int txq_ix,u32 * hw_id)2420 static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
2421 u32 *hw_id)
2422 {
2423 int tc;
2424
2425 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
2426 *hw_id = 0;
2427 return 0;
2428 }
2429
2430 tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
2431 if (tc < 0)
2432 return tc;
2433
2434 if (tc >= params->mqprio.num_tc) {
2435 WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
2436 tc, params->mqprio.num_tc);
2437 return -EINVAL;
2438 }
2439
2440 *hw_id = params->mqprio.channel.hw_id[tc];
2441 return 0;
2442 }
2443
mlx5e_open_sqs(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam)2444 static int mlx5e_open_sqs(struct mlx5e_channel *c,
2445 struct mlx5e_params *params,
2446 struct mlx5e_channel_param *cparam)
2447 {
2448 int err, tc;
2449
2450 for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
2451 int txq_ix = c->ix + tc * params->num_channels;
2452 u32 qos_queue_group_id;
2453 u32 tisn;
2454
2455 tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
2456 c->lag_port, tc);
2457 err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
2458 if (err)
2459 goto err_close_sqs;
2460
2461 err = mlx5e_open_txqsq(c, tisn, txq_ix,
2462 params, &cparam->txq_sq, &c->sq[tc], tc,
2463 qos_queue_group_id,
2464 &c->priv->channel_stats[c->ix]->sq[tc]);
2465 if (err)
2466 goto err_close_sqs;
2467 }
2468
2469 return 0;
2470
2471 err_close_sqs:
2472 for (tc--; tc >= 0; tc--)
2473 mlx5e_close_txqsq(&c->sq[tc]);
2474
2475 return err;
2476 }
2477
mlx5e_close_sqs(struct mlx5e_channel * c)2478 static void mlx5e_close_sqs(struct mlx5e_channel *c)
2479 {
2480 int tc;
2481
2482 for (tc = 0; tc < c->num_tc; tc++)
2483 mlx5e_close_txqsq(&c->sq[tc]);
2484 }
2485
mlx5e_set_sq_maxrate(struct net_device * dev,struct mlx5e_txqsq * sq,u32 rate)2486 static int mlx5e_set_sq_maxrate(struct net_device *dev,
2487 struct mlx5e_txqsq *sq, u32 rate)
2488 {
2489 struct mlx5e_priv *priv = netdev_priv(dev);
2490 struct mlx5_core_dev *mdev = priv->mdev;
2491 struct mlx5e_modify_sq_param msp = {0};
2492 struct mlx5_rate_limit rl = {0};
2493 u16 rl_index = 0;
2494 int err;
2495
2496 if (rate == sq->rate_limit)
2497 /* nothing to do */
2498 return 0;
2499
2500 if (sq->rate_limit) {
2501 rl.rate = sq->rate_limit;
2502 /* remove current rl index to free space to next ones */
2503 mlx5_rl_remove_rate(mdev, &rl);
2504 }
2505
2506 sq->rate_limit = 0;
2507
2508 if (rate) {
2509 rl.rate = rate;
2510 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
2511 if (err) {
2512 netdev_err(dev, "Failed configuring rate %u: %d\n",
2513 rate, err);
2514 return err;
2515 }
2516 }
2517
2518 msp.curr_state = MLX5_SQC_STATE_RDY;
2519 msp.next_state = MLX5_SQC_STATE_RDY;
2520 msp.rl_index = rl_index;
2521 msp.rl_update = true;
2522 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
2523 if (err) {
2524 netdev_err(dev, "Failed configuring rate %u: %d\n",
2525 rate, err);
2526 /* remove the rate from the table */
2527 if (rate)
2528 mlx5_rl_remove_rate(mdev, &rl);
2529 return err;
2530 }
2531
2532 sq->rate_limit = rate;
2533 return 0;
2534 }
2535
mlx5e_set_tx_maxrate(struct net_device * dev,int index,u32 rate)2536 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2537 {
2538 struct mlx5e_priv *priv = netdev_priv(dev);
2539 struct mlx5_core_dev *mdev = priv->mdev;
2540 struct mlx5e_txqsq *sq = priv->txq2sq[index];
2541 int err = 0;
2542
2543 if (!mlx5_rl_is_supported(mdev)) {
2544 netdev_err(dev, "Rate limiting is not supported on this device\n");
2545 return -EINVAL;
2546 }
2547
2548 /* rate is given in Mb/sec, HW config is in Kb/sec */
2549 rate = rate << 10;
2550
2551 /* Check whether rate in valid range, 0 is always valid */
2552 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
2553 netdev_err(dev, "TX rate %u, is not in range\n", rate);
2554 return -ERANGE;
2555 }
2556
2557 mutex_lock(&priv->state_lock);
2558 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2559 err = mlx5e_set_sq_maxrate(dev, sq, rate);
2560 if (!err)
2561 priv->tx_rates[index] = rate;
2562 mutex_unlock(&priv->state_lock);
2563
2564 return err;
2565 }
2566
mlx5e_open_rxq_rq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_rq_param * rq_param,struct mlx5e_rq_opt_param * rqo)2567 static int mlx5e_open_rxq_rq(struct mlx5e_channel *c,
2568 struct mlx5e_params *params,
2569 struct mlx5e_rq_param *rq_param,
2570 struct mlx5e_rq_opt_param *rqo)
2571 {
2572 u16 q_counter = c->priv->q_counter[c->sd_ix];
2573 int err;
2574
2575 err = mlx5e_init_rxq_rq(c, params, rq_param->xdp_frag_size, &c->rq);
2576 if (err)
2577 return err;
2578
2579 return mlx5e_open_rq(params, rq_param, rqo, cpu_to_node(c->cpu),
2580 q_counter, &c->rq);
2581 }
2582
2583 static struct mlx5e_icosq *
mlx5e_open_async_icosq(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam,struct mlx5e_create_cq_param * ccp)2584 mlx5e_open_async_icosq(struct mlx5e_channel *c,
2585 struct mlx5e_params *params,
2586 struct mlx5e_channel_param *cparam,
2587 struct mlx5e_create_cq_param *ccp)
2588 {
2589 struct dim_cq_moder icocq_moder = {0, 0};
2590 struct mlx5e_icosq *async_icosq;
2591 int err;
2592
2593 async_icosq = kvzalloc_node(sizeof(*async_icosq), GFP_KERNEL,
2594 cpu_to_node(c->cpu));
2595 if (!async_icosq)
2596 return ERR_PTR(-ENOMEM);
2597
2598 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, ccp,
2599 &async_icosq->cq);
2600 if (err)
2601 goto err_free_async_icosq;
2602
2603 err = mlx5e_open_icosq(c, params, &cparam->async_icosq, async_icosq,
2604 mlx5e_async_icosq_err_cqe_work);
2605 if (err)
2606 goto err_close_async_icosq_cq;
2607
2608 return async_icosq;
2609
2610 err_close_async_icosq_cq:
2611 mlx5e_close_cq(&async_icosq->cq);
2612 err_free_async_icosq:
2613 kvfree(async_icosq);
2614 return ERR_PTR(err);
2615 }
2616
mlx5e_close_async_icosq(struct mlx5e_icosq * async_icosq)2617 static void mlx5e_close_async_icosq(struct mlx5e_icosq *async_icosq)
2618 {
2619 mlx5e_close_icosq(async_icosq);
2620 mlx5e_close_cq(&async_icosq->cq);
2621 kvfree(async_icosq);
2622 }
2623
mlx5e_open_queues(struct mlx5e_channel * c,struct mlx5e_params * params,struct mlx5e_channel_param * cparam,bool async_icosq_needed)2624 static int mlx5e_open_queues(struct mlx5e_channel *c,
2625 struct mlx5e_params *params,
2626 struct mlx5e_channel_param *cparam,
2627 bool async_icosq_needed)
2628 {
2629 const struct net_device_ops *netdev_ops = c->netdev->netdev_ops;
2630 struct dim_cq_moder icocq_moder = {0, 0};
2631 struct mlx5e_create_cq_param ccp;
2632 int err;
2633
2634 mlx5e_build_create_cq_param(&ccp, c);
2635
2636 err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
2637 &c->icosq.cq);
2638 if (err)
2639 return err;
2640
2641 err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
2642 if (err)
2643 goto err_close_icosq_cq;
2644
2645 if (netdev_ops->ndo_xdp_xmit && c->xdp) {
2646 c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp);
2647 if (IS_ERR(c->xdpsq)) {
2648 err = PTR_ERR(c->xdpsq);
2649 goto err_close_tx_cqs;
2650 }
2651 }
2652
2653 err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
2654 &c->rq.cq);
2655 if (err)
2656 goto err_close_xdpredirect_sq;
2657
2658 err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
2659 &ccp, &c->rq_xdpsq.cq) : 0;
2660 if (err)
2661 goto err_close_rx_cq;
2662
2663 if (async_icosq_needed) {
2664 c->async_icosq = mlx5e_open_async_icosq(c, params, cparam,
2665 &ccp);
2666 if (IS_ERR(c->async_icosq)) {
2667 err = PTR_ERR(c->async_icosq);
2668 goto err_close_rq_xdpsq_cq;
2669 }
2670 }
2671
2672 mutex_init(&c->icosq_recovery_lock);
2673
2674 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
2675 mlx5e_icosq_err_cqe_work);
2676 if (err)
2677 goto err_close_async_icosq;
2678
2679 err = mlx5e_open_sqs(c, params, cparam);
2680 if (err)
2681 goto err_close_icosq;
2682
2683 err = mlx5e_open_rxq_rq(c, params, &cparam->rq, &cparam->rq_opt);
2684 if (err)
2685 goto err_close_sqs;
2686
2687 if (c->xdp) {
2688 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
2689 &c->rq_xdpsq, false);
2690 if (err)
2691 goto err_close_rq;
2692 }
2693
2694 return 0;
2695
2696 err_close_rq:
2697 mlx5e_close_rq(&c->rq);
2698
2699 err_close_sqs:
2700 mlx5e_close_sqs(c);
2701
2702 err_close_icosq:
2703 mlx5e_close_icosq(&c->icosq);
2704
2705 err_close_async_icosq:
2706 if (c->async_icosq)
2707 mlx5e_close_async_icosq(c->async_icosq);
2708
2709 err_close_rq_xdpsq_cq:
2710 if (c->xdp)
2711 mlx5e_close_cq(&c->rq_xdpsq.cq);
2712
2713 err_close_rx_cq:
2714 mlx5e_close_cq(&c->rq.cq);
2715
2716 err_close_xdpredirect_sq:
2717 if (c->xdpsq)
2718 mlx5e_close_xdpredirect_sq(c->xdpsq);
2719
2720 err_close_tx_cqs:
2721 mlx5e_close_tx_cqs(c);
2722
2723 err_close_icosq_cq:
2724 mlx5e_close_cq(&c->icosq.cq);
2725
2726 return err;
2727 }
2728
mlx5e_close_queues(struct mlx5e_channel * c)2729 static void mlx5e_close_queues(struct mlx5e_channel *c)
2730 {
2731 if (c->xdp)
2732 mlx5e_close_xdpsq(&c->rq_xdpsq);
2733 /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
2734 cancel_work_sync(&c->icosq.recover_work);
2735 mlx5e_close_rq(&c->rq);
2736 mlx5e_close_sqs(c);
2737 mlx5e_close_icosq(&c->icosq);
2738 mutex_destroy(&c->icosq_recovery_lock);
2739 if (c->async_icosq)
2740 mlx5e_close_async_icosq(c->async_icosq);
2741 if (c->xdp)
2742 mlx5e_close_cq(&c->rq_xdpsq.cq);
2743 mlx5e_close_cq(&c->rq.cq);
2744 if (c->xdpsq)
2745 mlx5e_close_xdpredirect_sq(c->xdpsq);
2746 mlx5e_close_tx_cqs(c);
2747 mlx5e_close_cq(&c->icosq.cq);
2748 }
2749
mlx5e_enumerate_lag_port(struct mlx5_core_dev * mdev,int ix)2750 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
2751 {
2752 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
2753
2754 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
2755 }
2756
mlx5e_channel_stats_alloc(struct mlx5e_priv * priv,int ix,int cpu)2757 static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
2758 {
2759 if (ix > priv->stats_nch) {
2760 netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
2761 priv->stats_nch);
2762 return -EINVAL;
2763 }
2764
2765 if (priv->channel_stats[ix])
2766 return 0;
2767
2768 /* Asymmetric dynamic memory allocation.
2769 * Freed in mlx5e_priv_arrays_free, not on channel closure.
2770 */
2771 netdev_dbg(priv->netdev, "Creating channel stats %d\n", ix);
2772 priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
2773 GFP_KERNEL, cpu_to_node(cpu));
2774 if (!priv->channel_stats[ix])
2775 return -ENOMEM;
2776 priv->stats_nch++;
2777
2778 return 0;
2779 }
2780
mlx5e_trigger_napi_icosq(struct mlx5e_channel * c)2781 void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
2782 {
2783 struct mlx5e_icosq *sq = &c->icosq;
2784 bool locked;
2785
2786 set_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state);
2787 synchronize_net();
2788
2789 locked = mlx5e_icosq_sync_lock(sq);
2790 mlx5e_trigger_irq(sq);
2791 mlx5e_icosq_sync_unlock(sq, locked);
2792
2793 clear_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state);
2794 }
2795
mlx5e_trigger_napi_async_icosq(struct mlx5e_channel * c)2796 void mlx5e_trigger_napi_async_icosq(struct mlx5e_channel *c)
2797 {
2798 struct mlx5e_icosq *sq = c->async_icosq;
2799
2800 spin_lock_bh(&sq->lock);
2801 mlx5e_trigger_irq(sq);
2802 spin_unlock_bh(&sq->lock);
2803 }
2804
mlx5e_trigger_napi_sched(struct napi_struct * napi)2805 void mlx5e_trigger_napi_sched(struct napi_struct *napi)
2806 {
2807 local_bh_disable();
2808 napi_schedule(napi);
2809 local_bh_enable();
2810 }
2811
mlx5e_channel_pick_doorbell(struct mlx5e_channel * c)2812 static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
2813 {
2814 struct mlx5e_hw_objs *hw_objs = &c->mdev->mlx5e_res.hw_objs;
2815
2816 /* No dedicated Ethernet doorbells, use the global one. */
2817 if (hw_objs->num_bfregs == 0) {
2818 c->bfreg = &c->mdev->priv.bfreg;
2819 return;
2820 }
2821
2822 /* Round-robin between doorbells. */
2823 c->bfreg = hw_objs->bfregs + c->vec_ix % hw_objs->num_bfregs;
2824 }
2825
mlx5e_open_channel(struct mlx5e_priv * priv,int ix,struct mlx5e_params * params,struct netdev_queue_config * qcfg,struct xsk_buff_pool * xsk_pool,struct mlx5e_channel ** cp)2826 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
2827 struct mlx5e_params *params,
2828 struct netdev_queue_config *qcfg,
2829 struct xsk_buff_pool *xsk_pool,
2830 struct mlx5e_channel **cp)
2831 {
2832 struct net_device *netdev = priv->netdev;
2833 struct mlx5e_channel_param *cparam;
2834 struct mlx5_core_dev *mdev;
2835 struct mlx5e_xsk_param xsk;
2836 bool async_icosq_needed;
2837 struct mlx5e_channel *c;
2838 unsigned int irq;
2839 int vec_ix;
2840 int cpu;
2841 int err;
2842
2843 mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
2844 vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
2845 cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
2846
2847 err = mlx5_comp_irqn_get(mdev, vec_ix, &irq);
2848 if (err)
2849 return err;
2850
2851 err = mlx5e_channel_stats_alloc(priv, ix, cpu);
2852 if (err)
2853 return err;
2854
2855 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
2856 cparam = kvzalloc_obj(*cparam);
2857 if (!c || !cparam) {
2858 err = -ENOMEM;
2859 goto err_free;
2860 }
2861
2862 err = mlx5e_build_channel_param(mdev, params, qcfg, cparam);
2863 if (err)
2864 goto err_free;
2865
2866 c->priv = priv;
2867 c->mdev = mdev;
2868 c->ix = ix;
2869 c->vec_ix = vec_ix;
2870 c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
2871 c->cpu = cpu;
2872 c->pdev = mlx5_core_dma_dev(mdev);
2873 c->netdev = priv->netdev;
2874 c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
2875 c->num_tc = mlx5e_get_dcb_num_tc(params);
2876 c->xdp = !!params->xdp_prog;
2877 c->stats = &priv->channel_stats[ix]->ch;
2878 c->aff_mask = irq_get_effective_affinity_mask(irq);
2879 c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
2880
2881 mlx5e_channel_pick_doorbell(c);
2882
2883 netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
2884 netif_napi_set_irq_locked(&c->napi, irq);
2885
2886 async_icosq_needed = !!params->xdp_prog || priv->ktls_rx_was_enabled;
2887 err = mlx5e_open_queues(c, params, cparam, async_icosq_needed);
2888 if (unlikely(err))
2889 goto err_napi_del;
2890
2891 if (xsk_pool) {
2892 mlx5e_build_xsk_param(xsk_pool, &xsk);
2893 mlx5e_build_xsk_channel_param(priv->mdev, params, &xsk, cparam);
2894 err = mlx5e_open_xsk(priv, params, cparam, xsk_pool, c);
2895 if (unlikely(err))
2896 goto err_close_queues;
2897 }
2898
2899 *cp = c;
2900
2901 kvfree(cparam);
2902 return 0;
2903
2904 err_close_queues:
2905 mlx5e_close_queues(c);
2906
2907 err_napi_del:
2908 netif_napi_del_locked(&c->napi);
2909
2910 err_free:
2911 kvfree(cparam);
2912 kvfree(c);
2913
2914 return err;
2915 }
2916
mlx5e_activate_channel(struct mlx5e_channel * c)2917 static void mlx5e_activate_channel(struct mlx5e_channel *c)
2918 {
2919 int tc;
2920
2921 napi_enable_locked(&c->napi);
2922
2923 for (tc = 0; tc < c->num_tc; tc++)
2924 mlx5e_activate_txqsq(&c->sq[tc]);
2925 mlx5e_activate_icosq(&c->icosq);
2926 if (c->async_icosq)
2927 mlx5e_activate_icosq(c->async_icosq);
2928
2929 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2930 mlx5e_activate_xsk(c);
2931 else
2932 mlx5e_activate_rq(&c->rq);
2933
2934 netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
2935 }
2936
mlx5e_deactivate_channel(struct mlx5e_channel * c)2937 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2938 {
2939 int tc;
2940
2941 netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
2942
2943 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2944 mlx5e_deactivate_xsk(c);
2945 else
2946 mlx5e_deactivate_rq(&c->rq);
2947
2948 if (c->async_icosq)
2949 mlx5e_deactivate_icosq(c->async_icosq);
2950 mlx5e_deactivate_icosq(&c->icosq);
2951 for (tc = 0; tc < c->num_tc; tc++)
2952 mlx5e_deactivate_txqsq(&c->sq[tc]);
2953 mlx5e_qos_deactivate_queues(c);
2954
2955 napi_disable_locked(&c->napi);
2956 }
2957
mlx5e_close_channel(struct mlx5e_channel * c)2958 static void mlx5e_close_channel(struct mlx5e_channel *c)
2959 {
2960 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
2961 mlx5e_close_xsk(c);
2962 mlx5e_close_queues(c);
2963 mlx5e_qos_close_queues(c);
2964 netif_napi_del_locked(&c->napi);
2965
2966 kvfree(c);
2967 }
2968
mlx5e_open_channels(struct mlx5e_priv * priv,struct mlx5e_channels * chs)2969 int mlx5e_open_channels(struct mlx5e_priv *priv,
2970 struct mlx5e_channels *chs)
2971 {
2972 int err = -ENOMEM;
2973 int i;
2974
2975 chs->num = chs->params.num_channels;
2976
2977 chs->c = kzalloc_objs(struct mlx5e_channel *, chs->num);
2978 if (!chs->c)
2979 goto err_out;
2980
2981 for (i = 0; i < chs->num; i++) {
2982 struct xsk_buff_pool *xsk_pool = NULL;
2983
2984 if (chs->params.xdp_prog)
2985 xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
2986
2987 err = mlx5e_open_channel(priv, i, &chs->params, NULL,
2988 xsk_pool, &chs->c[i]);
2989 if (err)
2990 goto err_close_channels;
2991 }
2992
2993 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
2994 err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
2995 if (err)
2996 goto err_close_channels;
2997 }
2998
2999 if (priv->htb) {
3000 err = mlx5e_qos_open_queues(priv, chs);
3001 if (err)
3002 goto err_close_ptp;
3003 }
3004
3005 mlx5e_health_channels_update(priv);
3006 return 0;
3007
3008 err_close_ptp:
3009 if (chs->ptp)
3010 mlx5e_ptp_close(chs->ptp);
3011
3012 err_close_channels:
3013 for (i--; i >= 0; i--)
3014 mlx5e_close_channel(chs->c[i]);
3015
3016 kfree(chs->c);
3017 err_out:
3018 chs->num = 0;
3019 return err;
3020 }
3021
mlx5e_activate_channels(struct mlx5e_priv * priv,struct mlx5e_channels * chs)3022 static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
3023 {
3024 int i;
3025
3026 for (i = 0; i < chs->num; i++)
3027 mlx5e_activate_channel(chs->c[i]);
3028
3029 if (priv->htb)
3030 mlx5e_qos_activate_queues(priv);
3031
3032 for (i = 0; i < chs->num; i++)
3033 mlx5e_trigger_napi_icosq(chs->c[i]);
3034
3035 if (chs->ptp)
3036 mlx5e_ptp_activate_channel(chs->ptp);
3037 }
3038
mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels * chs)3039 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
3040 {
3041 int err = 0;
3042 int i;
3043
3044 for (i = 0; i < chs->num; i++) {
3045 int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
3046 struct mlx5e_channel *c = chs->c[i];
3047
3048 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
3049 continue;
3050
3051 err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
3052
3053 /* Don't wait on the XSK RQ, because the newer xdpsock sample
3054 * doesn't provide any Fill Ring entries at the setup stage.
3055 */
3056 }
3057
3058 return err ? -ETIMEDOUT : 0;
3059 }
3060
mlx5e_deactivate_channels(struct mlx5e_channels * chs)3061 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
3062 {
3063 int i;
3064
3065 if (chs->ptp)
3066 mlx5e_ptp_deactivate_channel(chs->ptp);
3067
3068 for (i = 0; i < chs->num; i++)
3069 mlx5e_deactivate_channel(chs->c[i]);
3070 }
3071
mlx5e_close_channels(struct mlx5e_channels * chs)3072 void mlx5e_close_channels(struct mlx5e_channels *chs)
3073 {
3074 int i;
3075
3076 ASSERT_RTNL();
3077 if (chs->ptp) {
3078 mlx5e_ptp_close(chs->ptp);
3079 chs->ptp = NULL;
3080 }
3081 for (i = 0; i < chs->num; i++)
3082 mlx5e_close_channel(chs->c[i]);
3083
3084 kfree(chs->c);
3085 chs->num = 0;
3086 }
3087
mlx5e_modify_tirs_packet_merge(struct mlx5e_priv * priv)3088 static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
3089 {
3090 struct mlx5e_rx_res *res = priv->rx_res;
3091
3092 return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
3093 }
3094
3095 static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
3096
mlx5e_set_mtu(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 mtu)3097 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
3098 struct mlx5e_params *params, u16 mtu)
3099 {
3100 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
3101 int err;
3102
3103 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
3104 if (err)
3105 return err;
3106
3107 /* Update vport context MTU */
3108 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
3109 return 0;
3110 }
3111
mlx5e_query_mtu(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u16 * mtu)3112 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
3113 struct mlx5e_params *params, u16 *mtu)
3114 {
3115 u16 hw_mtu = 0;
3116 int err;
3117
3118 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
3119 if (err || !hw_mtu) /* fallback to port oper mtu */
3120 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
3121
3122 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
3123 }
3124
mlx5e_set_dev_port_mtu(struct mlx5e_priv * priv)3125 int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
3126 {
3127 struct mlx5e_params *params = &priv->channels.params;
3128 struct net_device *netdev = priv->netdev;
3129 struct mlx5_core_dev *mdev = priv->mdev;
3130 u16 mtu;
3131 int err;
3132
3133 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
3134 if (err)
3135 return err;
3136
3137 mlx5e_query_mtu(mdev, params, &mtu);
3138 if (mtu != params->sw_mtu)
3139 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
3140 __func__, mtu, params->sw_mtu);
3141
3142 params->sw_mtu = mtu;
3143 return 0;
3144 }
3145
3146 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
3147
mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv * priv)3148 void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
3149 {
3150 struct mlx5e_params *params = &priv->channels.params;
3151 struct net_device *netdev = priv->netdev;
3152 struct mlx5_core_dev *mdev = priv->mdev;
3153 u16 max_mtu;
3154
3155 /* MTU range: 68 - hw-specific max */
3156 netdev->min_mtu = ETH_MIN_MTU;
3157
3158 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
3159 netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
3160 ETH_MAX_MTU);
3161 }
3162
mlx5e_netdev_set_tcs(struct net_device * netdev,u16 nch,u8 ntc,struct netdev_tc_txq * tc_to_txq)3163 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
3164 struct netdev_tc_txq *tc_to_txq)
3165 {
3166 int tc, err;
3167
3168 netdev_reset_tc(netdev);
3169
3170 if (ntc == 1)
3171 return 0;
3172
3173 err = netdev_set_num_tc(netdev, ntc);
3174 if (err) {
3175 netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc);
3176 return err;
3177 }
3178
3179 for (tc = 0; tc < ntc; tc++) {
3180 u16 count, offset;
3181
3182 count = tc_to_txq[tc].count;
3183 offset = tc_to_txq[tc].offset;
3184 netdev_set_tc_queue(netdev, tc, count, offset);
3185 }
3186
3187 return 0;
3188 }
3189
mlx5e_update_tx_netdev_queues(struct mlx5e_priv * priv)3190 int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
3191 {
3192 int nch, ntc, num_txqs, err;
3193 int qos_queues = 0;
3194
3195 if (priv->htb)
3196 qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
3197
3198 nch = priv->channels.params.num_channels;
3199 ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
3200 num_txqs = nch * ntc + qos_queues;
3201 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
3202 num_txqs += ntc;
3203
3204 netdev_dbg(priv->netdev, "Setting num_txqs %d\n", num_txqs);
3205 err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
3206 if (err)
3207 netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
3208
3209 return err;
3210 }
3211
mlx5e_set_default_xps_cpumasks(struct mlx5e_priv * priv,struct mlx5e_params * params)3212 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
3213 struct mlx5e_params *params)
3214 {
3215 int ix;
3216
3217 for (ix = 0; ix < params->num_channels; ix++) {
3218 int num_comp_vectors, irq, vec_ix;
3219 struct mlx5_core_dev *mdev;
3220
3221 mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
3222 num_comp_vectors = mlx5_comp_vectors_max(mdev);
3223 cpumask_clear(priv->scratchpad.cpumask);
3224 vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
3225
3226 for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
3227 int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
3228
3229 cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
3230 }
3231
3232 netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
3233 }
3234 }
3235
mlx5e_update_tc_and_tx_queues(struct mlx5e_priv * priv)3236 static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
3237 {
3238 struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
3239 struct net_device *netdev = priv->netdev;
3240 int old_num_txqs, old_ntc;
3241 int nch, ntc;
3242 int err;
3243 int i;
3244
3245 old_num_txqs = netdev->real_num_tx_queues;
3246 old_ntc = netdev->num_tc ? : 1;
3247 for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
3248 old_tc_to_txq[i] = netdev->tc_to_txq[i];
3249
3250 nch = priv->channels.params.num_channels;
3251 ntc = priv->channels.params.mqprio.num_tc;
3252 tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
3253
3254 err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
3255 if (err)
3256 goto err_out;
3257 err = mlx5e_update_tx_netdev_queues(priv);
3258 if (err)
3259 goto err_tcs;
3260 mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
3261
3262 return 0;
3263
3264 err_tcs:
3265 WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
3266 old_tc_to_txq));
3267 err_out:
3268 return err;
3269 }
3270
3271 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
3272
mlx5e_num_channels_changed(struct mlx5e_priv * priv)3273 static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
3274 {
3275 u16 count = priv->channels.params.num_channels;
3276 struct net_device *netdev = priv->netdev;
3277 int old_num_rxqs;
3278 int err;
3279
3280 old_num_rxqs = netdev->real_num_rx_queues;
3281 err = netif_set_real_num_rx_queues(netdev, count);
3282 if (err) {
3283 netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
3284 __func__, err);
3285 return err;
3286 }
3287 err = mlx5e_update_tc_and_tx_queues(priv);
3288 if (err) {
3289 /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
3290 * Since channel number changed, it increased. That means, the call to
3291 * netif_set_real_num_rx_queues below should not fail, because it
3292 * decreases the number of RX queues.
3293 */
3294 WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
3295 return err;
3296 }
3297
3298 /* This function may be called on attach, before priv->rx_res is created. */
3299 if (priv->rx_res) {
3300 mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count);
3301
3302 if (!netif_is_rxfh_configured(priv->netdev))
3303 mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
3304 }
3305
3306 return 0;
3307 }
3308
3309 MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
3310
mlx5e_build_txq_maps(struct mlx5e_priv * priv)3311 static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
3312 {
3313 int i, ch, tc, num_tc;
3314
3315 ch = priv->channels.num;
3316 num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
3317
3318 for (i = 0; i < ch; i++) {
3319 for (tc = 0; tc < num_tc; tc++) {
3320 struct mlx5e_channel *c = priv->channels.c[i];
3321 struct mlx5e_txqsq *sq = &c->sq[tc];
3322
3323 priv->txq2sq[sq->txq_ix] = sq;
3324 priv->txq2sq_stats[sq->txq_ix] = sq->stats;
3325 }
3326 }
3327
3328 if (!priv->channels.ptp)
3329 goto out;
3330
3331 if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
3332 goto out;
3333
3334 for (tc = 0; tc < num_tc; tc++) {
3335 struct mlx5e_ptp *c = priv->channels.ptp;
3336 struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
3337
3338 priv->txq2sq[sq->txq_ix] = sq;
3339 priv->txq2sq_stats[sq->txq_ix] = sq->stats;
3340 }
3341
3342 out:
3343 /* Make the change to txq2sq visible before the queue is started.
3344 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
3345 * which pairs with this barrier.
3346 */
3347 smp_wmb();
3348 }
3349
mlx5e_activate_priv_channels(struct mlx5e_priv * priv)3350 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
3351 {
3352 mlx5e_build_txq_maps(priv);
3353 mlx5e_activate_channels(priv, &priv->channels);
3354 mlx5e_xdp_tx_enable(priv);
3355
3356 /* dev_watchdog() wants all TX queues to be started when the carrier is
3357 * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
3358 * Make it happy to avoid TX timeout false alarms.
3359 */
3360 netif_tx_start_all_queues(priv->netdev);
3361
3362 if (mlx5e_is_vport_rep(priv))
3363 mlx5e_rep_activate_channels(priv);
3364
3365 set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
3366
3367 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
3368
3369 if (priv->rx_res)
3370 mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
3371 }
3372
mlx5e_cancel_tx_timeout_work(struct mlx5e_priv * priv)3373 static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv)
3374 {
3375 WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state));
3376 if (current_work() != &priv->tx_timeout_work)
3377 cancel_work_sync(&priv->tx_timeout_work);
3378 }
3379
mlx5e_deactivate_priv_channels(struct mlx5e_priv * priv)3380 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
3381 {
3382 if (priv->rx_res)
3383 mlx5e_rx_res_channels_deactivate(priv->rx_res);
3384
3385 clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state);
3386 mlx5e_cancel_tx_timeout_work(priv);
3387
3388 if (mlx5e_is_vport_rep(priv))
3389 mlx5e_rep_deactivate_channels(priv);
3390
3391 /* The results of ndo_select_queue are unreliable, while netdev config
3392 * is being changed (real_num_tx_queues, num_tc). Stop all queues to
3393 * prevent ndo_start_xmit from being called, so that it can assume that
3394 * the selected queue is always valid.
3395 */
3396 netif_tx_disable(priv->netdev);
3397
3398 mlx5e_xdp_tx_disable(priv);
3399 mlx5e_deactivate_channels(&priv->channels);
3400 }
3401
mlx5e_switch_priv_params(struct mlx5e_priv * priv,struct mlx5e_params * new_params,mlx5e_fp_preactivate preactivate,void * context)3402 static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
3403 struct mlx5e_params *new_params,
3404 mlx5e_fp_preactivate preactivate,
3405 void *context)
3406 {
3407 struct mlx5e_params old_params;
3408
3409 old_params = priv->channels.params;
3410 priv->channels.params = *new_params;
3411
3412 if (preactivate) {
3413 int err;
3414
3415 err = preactivate(priv, context);
3416 if (err) {
3417 priv->channels.params = old_params;
3418 return err;
3419 }
3420 }
3421
3422 mlx5e_set_xdp_feature(priv);
3423 return 0;
3424 }
3425
mlx5e_switch_priv_channels(struct mlx5e_priv * priv,struct mlx5e_channels * old_chs,struct mlx5e_channels * new_chs,mlx5e_fp_preactivate preactivate,void * context)3426 static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
3427 struct mlx5e_channels *old_chs,
3428 struct mlx5e_channels *new_chs,
3429 mlx5e_fp_preactivate preactivate,
3430 void *context)
3431 {
3432 struct net_device *netdev = priv->netdev;
3433 int carrier_ok;
3434 int err = 0;
3435
3436 carrier_ok = netif_carrier_ok(netdev);
3437 netif_carrier_off(netdev);
3438
3439 mlx5e_deactivate_priv_channels(priv);
3440
3441 priv->channels = *new_chs;
3442
3443 /* New channels are ready to roll, call the preactivate hook if needed
3444 * to modify HW settings or update kernel parameters.
3445 */
3446 if (preactivate) {
3447 err = preactivate(priv, context);
3448 if (err) {
3449 priv->channels = *old_chs;
3450 goto out;
3451 }
3452 }
3453
3454 mlx5e_set_xdp_feature(priv);
3455 if (!MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
3456 mlx5e_close_channels(old_chs);
3457 priv->profile->update_rx(priv);
3458
3459 mlx5e_selq_apply(&priv->selq);
3460 out:
3461 mlx5e_activate_priv_channels(priv);
3462
3463 /* return carrier back if needed */
3464 if (carrier_ok)
3465 netif_carrier_on(netdev);
3466
3467 return err;
3468 }
3469
mlx5e_safe_switch_params(struct mlx5e_priv * priv,struct mlx5e_params * params,mlx5e_fp_preactivate preactivate,void * context,bool reset)3470 int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
3471 struct mlx5e_params *params,
3472 mlx5e_fp_preactivate preactivate,
3473 void *context, bool reset)
3474 {
3475 struct mlx5e_channels *old_chs, *new_chs;
3476 int err;
3477
3478 reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
3479 if (!reset)
3480 return mlx5e_switch_priv_params(priv, params, preactivate, context);
3481
3482 old_chs = kzalloc_obj(*old_chs);
3483 new_chs = kzalloc_obj(*new_chs);
3484 if (!old_chs || !new_chs) {
3485 err = -ENOMEM;
3486 goto err_free_chs;
3487 }
3488
3489 new_chs->params = *params;
3490
3491 mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
3492
3493 err = mlx5e_open_channels(priv, new_chs);
3494 if (err)
3495 goto err_cancel_selq;
3496
3497 *old_chs = priv->channels;
3498
3499 err = mlx5e_switch_priv_channels(priv, old_chs, new_chs,
3500 preactivate, context);
3501 if (err)
3502 goto err_close;
3503
3504 if (MLX5_CAP_GEN(priv->mdev, tis_tir_td_order))
3505 mlx5e_close_channels(old_chs);
3506
3507 kfree(new_chs);
3508 kfree(old_chs);
3509 return 0;
3510
3511 err_close:
3512 mlx5e_close_channels(new_chs);
3513
3514 err_cancel_selq:
3515 mlx5e_selq_cancel(&priv->selq);
3516 err_free_chs:
3517 kfree(new_chs);
3518 kfree(old_chs);
3519 return err;
3520 }
3521
mlx5e_safe_reopen_channels(struct mlx5e_priv * priv)3522 int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
3523 {
3524 return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
3525 }
3526
mlx5e_timestamp_init(struct mlx5e_priv * priv)3527 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
3528 {
3529 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3530 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3531 }
3532
mlx5e_modify_admin_state(struct mlx5_core_dev * mdev,enum mlx5_port_status state)3533 static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
3534 enum mlx5_port_status state)
3535 {
3536 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3537 int vport_admin_state;
3538
3539 mlx5_set_port_admin_status(mdev, state);
3540
3541 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
3542 !MLX5_CAP_GEN(mdev, uplink_follow))
3543 return;
3544
3545 if (state == MLX5_PORT_UP)
3546 vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
3547 else
3548 vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3549
3550 mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
3551 }
3552
mlx5e_open_locked(struct net_device * netdev)3553 int mlx5e_open_locked(struct net_device *netdev)
3554 {
3555 struct mlx5e_priv *priv = netdev_priv(netdev);
3556 int err;
3557
3558 mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
3559
3560 set_bit(MLX5E_STATE_OPENED, &priv->state);
3561
3562 err = mlx5e_open_channels(priv, &priv->channels);
3563 if (err)
3564 goto err_clear_state_opened_flag;
3565
3566 err = priv->profile->update_rx(priv);
3567 if (err)
3568 goto err_close_channels;
3569
3570 mlx5e_selq_apply(&priv->selq);
3571 mlx5e_activate_priv_channels(priv);
3572 mlx5e_apply_traps(priv, true);
3573 if (priv->profile->update_carrier)
3574 priv->profile->update_carrier(priv);
3575
3576 mlx5e_queue_update_stats(priv);
3577 return 0;
3578
3579 err_close_channels:
3580 mlx5e_close_channels(&priv->channels);
3581 err_clear_state_opened_flag:
3582 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3583 mlx5e_selq_cancel(&priv->selq);
3584 return err;
3585 }
3586
mlx5e_open(struct net_device * netdev)3587 int mlx5e_open(struct net_device *netdev)
3588 {
3589 struct mlx5e_priv *priv = netdev_priv(netdev);
3590 int err;
3591
3592 mutex_lock(&priv->state_lock);
3593 err = mlx5e_open_locked(netdev);
3594 if (!err)
3595 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
3596 mutex_unlock(&priv->state_lock);
3597
3598 return err;
3599 }
3600
mlx5e_close_locked(struct net_device * netdev)3601 int mlx5e_close_locked(struct net_device *netdev)
3602 {
3603 struct mlx5e_priv *priv = netdev_priv(netdev);
3604
3605 /* May already be CLOSED in case a previous configuration operation
3606 * (e.g RX/TX queue size change) that involves close&open failed.
3607 */
3608 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3609 return 0;
3610
3611 mlx5e_apply_traps(priv, false);
3612 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3613
3614 netif_carrier_off(priv->netdev);
3615 mlx5e_deactivate_priv_channels(priv);
3616 mlx5e_close_channels(&priv->channels);
3617
3618 return 0;
3619 }
3620
mlx5e_close(struct net_device * netdev)3621 int mlx5e_close(struct net_device *netdev)
3622 {
3623 struct mlx5e_priv *priv = netdev_priv(netdev);
3624 int err;
3625
3626 if (!netif_device_present(netdev))
3627 return -ENODEV;
3628
3629 mutex_lock(&priv->state_lock);
3630 mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
3631 err = mlx5e_close_locked(netdev);
3632 mutex_unlock(&priv->state_lock);
3633
3634 return err;
3635 }
3636
mlx5e_free_drop_rq(struct mlx5e_rq * rq)3637 static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
3638 {
3639 mlx5_wq_destroy(&rq->wq_ctrl);
3640 }
3641
mlx5e_alloc_drop_rq(struct mlx5_core_dev * mdev,struct mlx5e_rq * rq,struct mlx5e_rq_param * rq_param)3642 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3643 struct mlx5e_rq *rq,
3644 struct mlx5e_rq_param *rq_param)
3645 {
3646 void *rqc_wq = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
3647 int err;
3648
3649 rq_param->wq.db_numa_node = rq_param->wq.buf_numa_node;
3650
3651 err = mlx5_wq_cyc_create(mdev, &rq_param->wq, rqc_wq, &rq->wqe.wq,
3652 &rq->wq_ctrl);
3653 if (err)
3654 return err;
3655
3656 /* Mark as unused given "Drop-RQ" packets never reach XDP */
3657 xdp_rxq_info_unused(&rq->xdp_rxq);
3658
3659 rq->mdev = mdev;
3660
3661 return 0;
3662 }
3663
mlx5e_alloc_drop_cq(struct mlx5e_priv * priv,struct mlx5e_cq * cq,struct mlx5e_cq_param * param)3664 static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
3665 struct mlx5e_cq *cq,
3666 struct mlx5e_cq_param *param)
3667 {
3668 struct mlx5_core_dev *mdev = priv->mdev;
3669
3670 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3671 param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
3672
3673 return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq,
3674 mdev->priv.bfreg.up, param, cq);
3675 }
3676
mlx5e_open_drop_rq(struct mlx5e_priv * priv,struct mlx5e_rq * drop_rq)3677 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3678 struct mlx5e_rq *drop_rq)
3679 {
3680 struct mlx5_core_dev *mdev = priv->mdev;
3681 struct mlx5e_cq_param cq_param = {};
3682 struct mlx5e_rq_param rq_param = {};
3683 struct mlx5e_cq *cq = &drop_rq->cq;
3684 int err;
3685
3686 mlx5e_build_drop_rq_param(mdev, &rq_param);
3687
3688 err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
3689 if (err)
3690 return err;
3691
3692 err = mlx5e_create_cq(cq, &cq_param);
3693 if (err)
3694 goto err_free_cq;
3695
3696 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3697 if (err)
3698 goto err_destroy_cq;
3699
3700 err = mlx5e_create_rq(drop_rq, &rq_param, priv->drop_rq_q_counter);
3701 if (err)
3702 goto err_free_rq;
3703
3704 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3705 if (err)
3706 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3707
3708 return 0;
3709
3710 err_free_rq:
3711 mlx5e_free_drop_rq(drop_rq);
3712
3713 err_destroy_cq:
3714 mlx5e_destroy_cq(cq);
3715
3716 err_free_cq:
3717 mlx5e_free_cq(cq);
3718
3719 return err;
3720 }
3721
mlx5e_close_drop_rq(struct mlx5e_rq * drop_rq)3722 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3723 {
3724 mlx5e_destroy_rq(drop_rq);
3725 mlx5e_free_drop_rq(drop_rq);
3726 mlx5e_destroy_cq(&drop_rq->cq);
3727 mlx5e_free_cq(&drop_rq->cq);
3728 }
3729
mlx5e_cleanup_nic_tx(struct mlx5e_priv * priv)3730 static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3731 {
3732 if (priv->mqprio_rl) {
3733 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3734 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3735 priv->mqprio_rl = NULL;
3736 }
3737 mlx5e_accel_cleanup_tx(priv);
3738 }
3739
mlx5e_modify_channels_vsd(struct mlx5e_channels * chs,bool vsd)3740 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3741 {
3742 int err;
3743 int i;
3744
3745 for (i = 0; i < chs->num; i++) {
3746 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3747 if (err)
3748 return err;
3749 }
3750 if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
3751 return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
3752
3753 return 0;
3754 }
3755
mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq * tc_to_txq,int ntc,int nch)3756 static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3757 int ntc, int nch)
3758 {
3759 int tc;
3760
3761 memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
3762
3763 /* Map netdev TCs to offset 0.
3764 * We have our own UP to TXQ mapping for DCB mode of QoS
3765 */
3766 for (tc = 0; tc < ntc; tc++) {
3767 tc_to_txq[tc] = (struct netdev_tc_txq) {
3768 .count = nch,
3769 .offset = 0,
3770 };
3771 }
3772 }
3773
mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq * tc_to_txq,struct tc_mqprio_qopt * qopt)3774 static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
3775 struct tc_mqprio_qopt *qopt)
3776 {
3777 int tc;
3778
3779 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3780 tc_to_txq[tc] = (struct netdev_tc_txq) {
3781 .count = qopt->count[tc],
3782 .offset = qopt->offset[tc],
3783 };
3784 }
3785 }
3786
mlx5e_params_mqprio_dcb_set(struct mlx5e_params * params,u8 num_tc)3787 static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
3788 {
3789 params->mqprio.mode = TC_MQPRIO_MODE_DCB;
3790 params->mqprio.num_tc = num_tc;
3791 mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
3792 params->num_channels);
3793 }
3794
mlx5e_mqprio_rl_update_params(struct mlx5e_params * params,struct mlx5e_mqprio_rl * rl)3795 static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
3796 struct mlx5e_mqprio_rl *rl)
3797 {
3798 int tc;
3799
3800 for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
3801 u32 hw_id = 0;
3802
3803 if (rl)
3804 mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
3805 params->mqprio.channel.hw_id[tc] = hw_id;
3806 }
3807 }
3808
mlx5e_params_mqprio_channel_set(struct mlx5e_params * params,struct tc_mqprio_qopt_offload * mqprio,struct mlx5e_mqprio_rl * rl)3809 static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
3810 struct tc_mqprio_qopt_offload *mqprio,
3811 struct mlx5e_mqprio_rl *rl)
3812 {
3813 int tc;
3814
3815 params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
3816 params->mqprio.num_tc = mqprio->qopt.num_tc;
3817
3818 for (tc = 0; tc < TC_MAX_QUEUE; tc++)
3819 params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
3820
3821 mlx5e_mqprio_rl_update_params(params, rl);
3822 mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
3823 }
3824
mlx5e_params_mqprio_reset(struct mlx5e_params * params)3825 static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
3826 {
3827 mlx5e_params_mqprio_dcb_set(params, 1);
3828 }
3829
mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv * priv,struct tc_mqprio_qopt * mqprio)3830 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
3831 struct tc_mqprio_qopt *mqprio)
3832 {
3833 struct mlx5e_params new_params;
3834 u8 tc = mqprio->num_tc;
3835 int err;
3836
3837 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3838
3839 if (tc && tc != MLX5_MAX_NUM_TC)
3840 return -EINVAL;
3841
3842 new_params = priv->channels.params;
3843 mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
3844
3845 err = mlx5e_safe_switch_params(priv, &new_params,
3846 mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
3847
3848 if (!err && priv->mqprio_rl) {
3849 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3850 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3851 priv->mqprio_rl = NULL;
3852 }
3853
3854 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3855 mlx5e_get_dcb_num_tc(&priv->channels.params));
3856 return err;
3857 }
3858
mlx5e_mqprio_channel_validate(struct mlx5e_priv * priv,struct tc_mqprio_qopt_offload * mqprio)3859 static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
3860 struct tc_mqprio_qopt_offload *mqprio)
3861 {
3862 struct net_device *netdev = priv->netdev;
3863 struct mlx5e_ptp *ptp_channel;
3864 int agg_count = 0;
3865 int i;
3866
3867 ptp_channel = priv->channels.ptp;
3868 if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
3869 netdev_err(netdev,
3870 "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
3871 return -EINVAL;
3872 }
3873
3874 if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
3875 mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
3876 return -EINVAL;
3877
3878 for (i = 0; i < mqprio->qopt.num_tc; i++) {
3879 if (!mqprio->qopt.count[i]) {
3880 netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i);
3881 return -EINVAL;
3882 }
3883 if (mqprio->min_rate[i]) {
3884 netdev_err(netdev, "Min tx rate is not supported\n");
3885 return -EINVAL;
3886 }
3887
3888 if (mqprio->max_rate[i]) {
3889 int err;
3890
3891 err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
3892 if (err)
3893 return err;
3894 }
3895
3896 if (mqprio->qopt.offset[i] != agg_count) {
3897 netdev_err(netdev, "Discontinuous queues config is not supported\n");
3898 return -EINVAL;
3899 }
3900 agg_count += mqprio->qopt.count[i];
3901 }
3902
3903 if (priv->channels.params.num_channels != agg_count) {
3904 netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
3905 agg_count, priv->channels.params.num_channels);
3906 return -EINVAL;
3907 }
3908
3909 return 0;
3910 }
3911
mlx5e_mqprio_rate_limit(u8 num_tc,u64 max_rate[])3912 static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
3913 {
3914 int tc;
3915
3916 for (tc = 0; tc < num_tc; tc++)
3917 if (max_rate[tc])
3918 return true;
3919 return false;
3920 }
3921
mlx5e_mqprio_rl_create(struct mlx5_core_dev * mdev,u8 num_tc,u64 max_rate[])3922 static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
3923 u8 num_tc, u64 max_rate[])
3924 {
3925 struct mlx5e_mqprio_rl *rl;
3926 int err;
3927
3928 if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
3929 return NULL;
3930
3931 rl = mlx5e_mqprio_rl_alloc();
3932 if (!rl)
3933 return ERR_PTR(-ENOMEM);
3934
3935 err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
3936 if (err) {
3937 mlx5e_mqprio_rl_free(rl);
3938 return ERR_PTR(err);
3939 }
3940
3941 return rl;
3942 }
3943
mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv * priv,struct tc_mqprio_qopt_offload * mqprio)3944 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
3945 struct tc_mqprio_qopt_offload *mqprio)
3946 {
3947 struct mlx5e_params new_params;
3948 struct mlx5e_mqprio_rl *rl;
3949 int err;
3950
3951 err = mlx5e_mqprio_channel_validate(priv, mqprio);
3952 if (err)
3953 return err;
3954
3955 rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
3956 if (IS_ERR(rl))
3957 return PTR_ERR(rl);
3958
3959 new_params = priv->channels.params;
3960 mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
3961
3962 err = mlx5e_safe_switch_params(priv, &new_params,
3963 mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
3964 if (err) {
3965 if (rl) {
3966 mlx5e_mqprio_rl_cleanup(rl);
3967 mlx5e_mqprio_rl_free(rl);
3968 }
3969 return err;
3970 }
3971
3972 if (priv->mqprio_rl) {
3973 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
3974 mlx5e_mqprio_rl_free(priv->mqprio_rl);
3975 }
3976 priv->mqprio_rl = rl;
3977
3978 return 0;
3979 }
3980
mlx5e_setup_tc_mqprio(struct mlx5e_priv * priv,struct tc_mqprio_qopt_offload * mqprio)3981 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
3982 struct tc_mqprio_qopt_offload *mqprio)
3983 {
3984 /* MQPRIO is another toplevel qdisc that can't be attached
3985 * simultaneously with the offloaded HTB.
3986 */
3987 if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
3988 NL_SET_ERR_MSG_MOD(mqprio->extack,
3989 "MQPRIO cannot be configured when HTB offload is enabled.");
3990 return -EOPNOTSUPP;
3991 }
3992
3993 switch (mqprio->mode) {
3994 case TC_MQPRIO_MODE_DCB:
3995 return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt);
3996 case TC_MQPRIO_MODE_CHANNEL:
3997 return mlx5e_setup_tc_mqprio_channel(priv, mqprio);
3998 default:
3999 return -EOPNOTSUPP;
4000 }
4001 }
4002
4003 static LIST_HEAD(mlx5e_block_cb_list);
4004
mlx5e_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)4005 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
4006 void *type_data)
4007 {
4008 struct mlx5e_priv *priv = netdev_priv(dev);
4009 bool tc_unbind = false;
4010 int err;
4011
4012 if (type == TC_SETUP_BLOCK &&
4013 ((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
4014 tc_unbind = true;
4015
4016 if (!netif_device_present(dev) && !tc_unbind)
4017 return -ENODEV;
4018
4019 switch (type) {
4020 case TC_SETUP_BLOCK: {
4021 struct flow_block_offload *f = type_data;
4022
4023 f->unlocked_driver_cb = true;
4024 return flow_block_cb_setup_simple(type_data,
4025 &mlx5e_block_cb_list,
4026 mlx5e_setup_tc_block_cb,
4027 priv, priv, true);
4028 }
4029 case TC_SETUP_QDISC_MQPRIO:
4030 mutex_lock(&priv->state_lock);
4031 err = mlx5e_setup_tc_mqprio(priv, type_data);
4032 mutex_unlock(&priv->state_lock);
4033 return err;
4034 case TC_SETUP_QDISC_HTB:
4035 mutex_lock(&priv->state_lock);
4036 err = mlx5e_htb_setup_tc(priv, type_data);
4037 mutex_unlock(&priv->state_lock);
4038 return err;
4039 default:
4040 return -EOPNOTSUPP;
4041 }
4042 }
4043
mlx5e_fold_sw_stats64(struct mlx5e_priv * priv,struct rtnl_link_stats64 * s)4044 void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
4045 {
4046 int i;
4047
4048 for (i = 0; i < priv->stats_nch; i++) {
4049 struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
4050 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
4051 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
4052 int j;
4053
4054 s->rx_packets += rq_stats->packets + xskrq_stats->packets;
4055 s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
4056 s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
4057
4058 for (j = 0; j < priv->max_opened_tc; j++) {
4059 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
4060
4061 s->tx_packets += sq_stats->packets;
4062 s->tx_bytes += sq_stats->bytes;
4063 s->tx_dropped += sq_stats->dropped;
4064 }
4065 }
4066 if (priv->tx_ptp_opened) {
4067 for (i = 0; i < priv->max_opened_tc; i++) {
4068 struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
4069
4070 s->tx_packets += sq_stats->packets;
4071 s->tx_bytes += sq_stats->bytes;
4072 s->tx_dropped += sq_stats->dropped;
4073 }
4074 }
4075 if (priv->rx_ptp_opened) {
4076 struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
4077
4078 s->rx_packets += rq_stats->packets;
4079 s->rx_bytes += rq_stats->bytes;
4080 s->multicast += rq_stats->mcast_packets;
4081 }
4082
4083 #ifdef CONFIG_MLX5_EN_PSP
4084 if (priv->psp)
4085 s->tx_dropped += atomic_read(&priv->psp->tx_drop);
4086 #endif
4087 }
4088
4089 void
mlx5e_get_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)4090 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
4091 {
4092 struct mlx5e_priv *priv = netdev_priv(dev);
4093 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
4094
4095 if (!netif_device_present(dev))
4096 return;
4097
4098 /* In switchdev mode, monitor counters doesn't monitor
4099 * rx/tx stats of 802_3. The update stats mechanism
4100 * should keep the 802_3 layout counters updated
4101 */
4102 if (!mlx5e_monitor_counter_supported(priv) ||
4103 mlx5e_is_uplink_rep(priv)) {
4104 /* update HW stats in background for next time */
4105 mlx5e_queue_update_stats(priv);
4106 }
4107
4108 netdev_stats_to_stats64(stats, &dev->stats);
4109
4110 if (mlx5e_is_uplink_rep(priv)) {
4111 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
4112
4113 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
4114 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
4115 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
4116 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
4117
4118 /* vport multicast also counts packets that are dropped due to steering
4119 * or rx out of buffer
4120 */
4121 stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
4122 } else {
4123 mlx5e_fold_sw_stats64(priv, stats);
4124 }
4125
4126 stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer;
4127 stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards);
4128
4129 stats->rx_length_errors +=
4130 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
4131 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
4132 PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
4133 VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
4134 stats->rx_crc_errors +=
4135 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
4136 stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors);
4137 stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards);
4138 stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors +
4139 stats->rx_frame_errors;
4140 stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors;
4141 }
4142
mlx5e_nic_set_rx_mode(struct mlx5e_priv * priv)4143 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
4144 {
4145 queue_work(priv->wq, &priv->set_rx_mode_work);
4146 }
4147
mlx5e_set_rx_mode(struct net_device * dev,struct netdev_hw_addr_list * uc,struct netdev_hw_addr_list * mc)4148 static void mlx5e_set_rx_mode(struct net_device *dev,
4149 struct netdev_hw_addr_list *uc,
4150 struct netdev_hw_addr_list *mc)
4151 {
4152 struct mlx5e_priv *priv = netdev_priv(dev);
4153
4154 mlx5e_fs_set_rx_mode_work(priv->fs, dev, uc, mc);
4155 }
4156
mlx5e_set_mac(struct net_device * netdev,void * addr)4157 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
4158 {
4159 struct mlx5e_priv *priv = netdev_priv(netdev);
4160 struct sockaddr *saddr = addr;
4161
4162 if (!is_valid_ether_addr(saddr->sa_data))
4163 return -EADDRNOTAVAIL;
4164
4165 netif_addr_lock_bh(netdev);
4166 eth_hw_addr_set(netdev, saddr->sa_data);
4167 netif_addr_unlock_bh(netdev);
4168
4169 mlx5e_nic_set_rx_mode(priv);
4170
4171 return 0;
4172 }
4173
4174 #define MLX5E_SET_FEATURE(features, feature, enable) \
4175 do { \
4176 if (enable) \
4177 *features |= feature; \
4178 else \
4179 *features &= ~feature; \
4180 } while (0)
4181
4182 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
4183
set_feature_lro(struct net_device * netdev,bool enable)4184 static int set_feature_lro(struct net_device *netdev, bool enable)
4185 {
4186 struct mlx5e_priv *priv = netdev_priv(netdev);
4187 struct mlx5_core_dev *mdev = priv->mdev;
4188 struct mlx5e_params *cur_params;
4189 struct mlx5e_params new_params;
4190 bool reset = true;
4191 int err = 0;
4192
4193 mutex_lock(&priv->state_lock);
4194
4195 cur_params = &priv->channels.params;
4196 new_params = *cur_params;
4197
4198 if (enable)
4199 new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
4200 else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
4201 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
4202 else
4203 goto out;
4204
4205 if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
4206 new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
4207 if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
4208 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
4209 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
4210 reset = false;
4211 }
4212 }
4213
4214 err = mlx5e_safe_switch_params(priv, &new_params,
4215 mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
4216 out:
4217 mutex_unlock(&priv->state_lock);
4218 return err;
4219 }
4220
set_feature_hw_gro(struct net_device * netdev,bool enable)4221 static int set_feature_hw_gro(struct net_device *netdev, bool enable)
4222 {
4223 struct mlx5e_priv *priv = netdev_priv(netdev);
4224 struct mlx5e_params new_params;
4225 bool reset = true;
4226 int err = 0;
4227
4228 mutex_lock(&priv->state_lock);
4229 new_params = priv->channels.params;
4230
4231 if (enable) {
4232 new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
4233 } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
4234 new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
4235 } else {
4236 goto out;
4237 }
4238
4239 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
4240 out:
4241 mutex_unlock(&priv->state_lock);
4242 return err;
4243 }
4244
set_feature_cvlan_filter(struct net_device * netdev,bool enable)4245 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
4246 {
4247 struct mlx5e_priv *priv = netdev_priv(netdev);
4248
4249 if (enable)
4250 mlx5e_enable_cvlan_filter(priv->fs,
4251 !!(priv->netdev->flags & IFF_PROMISC));
4252 else
4253 mlx5e_disable_cvlan_filter(priv->fs,
4254 !!(priv->netdev->flags & IFF_PROMISC));
4255
4256 return 0;
4257 }
4258
set_feature_hw_tc(struct net_device * netdev,bool enable)4259 static int set_feature_hw_tc(struct net_device *netdev, bool enable)
4260 {
4261 struct mlx5e_priv *priv = netdev_priv(netdev);
4262 int err = 0;
4263
4264 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
4265 int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
4266 MLX5_TC_FLAG(NIC_OFFLOAD);
4267 if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
4268 netdev_err(netdev,
4269 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
4270 return -EINVAL;
4271 }
4272 #endif
4273
4274 mutex_lock(&priv->state_lock);
4275 if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
4276 netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
4277 err = -EINVAL;
4278 }
4279 mutex_unlock(&priv->state_lock);
4280
4281 return err;
4282 }
4283
set_feature_rx_all(struct net_device * netdev,bool enable)4284 static int set_feature_rx_all(struct net_device *netdev, bool enable)
4285 {
4286 struct mlx5e_priv *priv = netdev_priv(netdev);
4287 struct mlx5_core_dev *mdev = priv->mdev;
4288
4289 return mlx5_set_port_fcs(mdev, !enable);
4290 }
4291
mlx5e_get_def_rx_moderation(u8 cq_period_mode)4292 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4293 {
4294 return (struct dim_cq_moder) {
4295 .cq_period_mode = cq_period_mode,
4296 .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
4297 .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
4298 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
4299 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
4300 };
4301 }
4302
mlx5e_reset_rx_moderation(struct dim_cq_moder * cq_moder,u8 cq_period_mode,bool dim_enabled)4303 bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
4304 bool dim_enabled)
4305 {
4306 bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
4307
4308 if (dim_enabled)
4309 *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
4310 else
4311 *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
4312
4313 return reset_needed;
4314 }
4315
mlx5e_reset_rx_channels_moderation(struct mlx5e_channels * chs,u8 cq_period_mode,bool dim_enabled,bool keep_dim_state)4316 bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
4317 bool dim_enabled, bool keep_dim_state)
4318 {
4319 bool reset = false;
4320 int i;
4321
4322 for (i = 0; i < chs->num; i++) {
4323 if (keep_dim_state)
4324 dim_enabled = !!chs->c[i]->rq.dim;
4325
4326 reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
4327 cq_period_mode, dim_enabled);
4328 }
4329
4330 return reset;
4331 }
4332
mlx5e_set_rx_port_ts(struct mlx5_core_dev * mdev,bool enable)4333 static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
4334 {
4335 u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
4336 bool supported, curr_state;
4337 int err;
4338
4339 if (!MLX5_CAP_GEN(mdev, ports_check))
4340 return 0;
4341
4342 err = mlx5_query_ports_check(mdev, in, sizeof(in));
4343 if (err)
4344 return err;
4345
4346 supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
4347 curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
4348
4349 if (!supported || enable == curr_state)
4350 return 0;
4351
4352 MLX5_SET(pcmr_reg, in, local_port, 1);
4353 MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
4354
4355 return mlx5_set_ports_check(mdev, in, sizeof(in));
4356 }
4357
mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv * priv,void * ctx)4358 static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
4359 {
4360 struct mlx5_core_dev *mdev = priv->mdev;
4361 bool enable = *(bool *)ctx;
4362
4363 return mlx5e_set_rx_port_ts(mdev, enable);
4364 }
4365
set_feature_rx_fcs(struct net_device * netdev,bool enable)4366 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
4367 {
4368 struct mlx5e_priv *priv = netdev_priv(netdev);
4369 struct mlx5e_channels *chs = &priv->channels;
4370 struct mlx5e_params new_params;
4371 int err;
4372 bool rx_ts_over_crc = !enable;
4373
4374 mutex_lock(&priv->state_lock);
4375
4376 new_params = chs->params;
4377 new_params.scatter_fcs_en = enable;
4378 err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
4379 &rx_ts_over_crc, true);
4380 mutex_unlock(&priv->state_lock);
4381 return err;
4382 }
4383
set_feature_rx_vlan(struct net_device * netdev,bool enable)4384 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
4385 {
4386 struct mlx5e_priv *priv = netdev_priv(netdev);
4387 int err = 0;
4388
4389 mutex_lock(&priv->state_lock);
4390
4391 mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
4392 priv->channels.params.vlan_strip_disable = !enable;
4393
4394 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4395 goto unlock;
4396
4397 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
4398 if (err) {
4399 mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
4400 priv->channels.params.vlan_strip_disable = enable;
4401 }
4402 unlock:
4403 mutex_unlock(&priv->state_lock);
4404
4405 return err;
4406 }
4407
mlx5e_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)4408 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4409 {
4410 struct mlx5e_priv *priv = netdev_priv(dev);
4411 struct mlx5e_flow_steering *fs = priv->fs;
4412
4413 if (mlx5e_is_uplink_rep(priv))
4414 return 0; /* no vlan table for uplink rep */
4415
4416 return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
4417 }
4418
mlx5e_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)4419 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4420 {
4421 struct mlx5e_priv *priv = netdev_priv(dev);
4422 struct mlx5e_flow_steering *fs = priv->fs;
4423
4424 if (mlx5e_is_uplink_rep(priv))
4425 return 0; /* no vlan table for uplink rep */
4426
4427 return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
4428 }
4429
4430 #ifdef CONFIG_MLX5_EN_ARFS
set_feature_arfs(struct net_device * netdev,bool enable)4431 static int set_feature_arfs(struct net_device *netdev, bool enable)
4432 {
4433 struct mlx5e_priv *priv = netdev_priv(netdev);
4434 int err;
4435
4436 if (enable)
4437 err = mlx5e_arfs_enable(priv->fs);
4438 else
4439 err = mlx5e_arfs_disable(priv->fs);
4440
4441 return err;
4442 }
4443 #endif
4444
mlx5e_handle_feature(struct net_device * netdev,netdev_features_t * features,netdev_features_t feature,mlx5e_feature_handler feature_handler)4445 static int mlx5e_handle_feature(struct net_device *netdev,
4446 netdev_features_t *features,
4447 netdev_features_t feature,
4448 mlx5e_feature_handler feature_handler)
4449 {
4450 netdev_features_t changes = *features ^ netdev->features;
4451 bool enable = !!(*features & feature);
4452 int err;
4453
4454 if (!(changes & feature))
4455 return 0;
4456
4457 err = feature_handler(netdev, enable);
4458 if (err) {
4459 MLX5E_SET_FEATURE(features, feature, !enable);
4460 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
4461 enable ? "Enable" : "Disable", &feature, err);
4462 return err;
4463 }
4464
4465 return 0;
4466 }
4467
mlx5e_set_xdp_feature(struct mlx5e_priv * priv)4468 void mlx5e_set_xdp_feature(struct mlx5e_priv *priv)
4469 {
4470 struct mlx5e_params *params = &priv->channels.params;
4471 struct net_device *netdev = priv->netdev;
4472 xdp_features_t val = 0;
4473
4474 if (netdev->netdev_ops->ndo_bpf &&
4475 params->packet_merge.type == MLX5E_PACKET_MERGE_NONE)
4476 val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4477 NETDEV_XDP_ACT_XSK_ZEROCOPY |
4478 NETDEV_XDP_ACT_RX_SG;
4479
4480 if (netdev->netdev_ops->ndo_xdp_xmit && params->xdp_prog)
4481 val |= NETDEV_XDP_ACT_NDO_XMIT |
4482 NETDEV_XDP_ACT_NDO_XMIT_SG;
4483
4484 xdp_set_features_flag_locked(netdev, val);
4485 }
4486
mlx5e_set_features(struct net_device * netdev,netdev_features_t features)4487 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
4488 {
4489 netdev_features_t oper_features = features;
4490 int err = 0;
4491
4492 #define MLX5E_HANDLE_FEATURE(feature, handler) \
4493 mlx5e_handle_feature(netdev, &oper_features, feature, handler)
4494
4495 if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) {
4496 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
4497 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
4498 err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
4499 } else {
4500 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
4501 err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
4502 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
4503 }
4504 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
4505 set_feature_cvlan_filter);
4506 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
4507 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
4508 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
4509 #ifdef CONFIG_MLX5_EN_ARFS
4510 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
4511 #endif
4512 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
4513
4514 if (err) {
4515 netdev->features = oper_features;
4516 return -EINVAL;
4517 }
4518
4519 return 0;
4520 }
4521
mlx5e_fix_uplink_rep_features(struct net_device * netdev,netdev_features_t features)4522 static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
4523 netdev_features_t features)
4524 {
4525 features &= ~NETIF_F_HW_TLS_RX;
4526 if (netdev->features & NETIF_F_HW_TLS_RX)
4527 netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
4528
4529 features &= ~NETIF_F_HW_TLS_TX;
4530 if (netdev->features & NETIF_F_HW_TLS_TX)
4531 netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
4532
4533 features &= ~NETIF_F_NTUPLE;
4534 if (netdev->features & NETIF_F_NTUPLE)
4535 netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
4536
4537 features &= ~NETIF_F_GRO_HW;
4538 if (netdev->features & NETIF_F_GRO_HW)
4539 netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
4540
4541 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4542 if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
4543 netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
4544
4545 features &= ~NETIF_F_HW_MACSEC;
4546 if (netdev->features & NETIF_F_HW_MACSEC)
4547 netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
4548
4549 return features;
4550 }
4551
mlx5e_fix_features(struct net_device * netdev,netdev_features_t features)4552 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
4553 netdev_features_t features)
4554 {
4555 struct netdev_config *cfg = netdev->cfg_pending;
4556 struct mlx5e_priv *priv = netdev_priv(netdev);
4557 struct mlx5e_vlan_table *vlan;
4558 struct mlx5e_params *params;
4559
4560 if (!netif_device_present(netdev))
4561 return features;
4562
4563 vlan = mlx5e_fs_get_vlan(priv->fs);
4564 mutex_lock(&priv->state_lock);
4565 params = &priv->channels.params;
4566 if (!vlan ||
4567 !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
4568 /* HW strips the outer C-tag header, this is a problem
4569 * for S-tag traffic.
4570 */
4571 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4572 if (!params->vlan_strip_disable)
4573 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
4574 }
4575
4576 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
4577 if (features & NETIF_F_LRO) {
4578 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
4579 features &= ~NETIF_F_LRO;
4580 }
4581 if (features & NETIF_F_GRO_HW) {
4582 netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
4583 features &= ~NETIF_F_GRO_HW;
4584 }
4585 }
4586
4587 if (params->xdp_prog) {
4588 if (features & NETIF_F_LRO) {
4589 netdev_warn(netdev, "LRO is incompatible with XDP\n");
4590 features &= ~NETIF_F_LRO;
4591 }
4592 if (features & NETIF_F_GRO_HW) {
4593 netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
4594 features &= ~NETIF_F_GRO_HW;
4595 }
4596 }
4597
4598 if (priv->xsk.refcnt) {
4599 if (features & NETIF_F_LRO) {
4600 netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
4601 priv->xsk.refcnt);
4602 features &= ~NETIF_F_LRO;
4603 }
4604 if (features & NETIF_F_GRO_HW) {
4605 netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
4606 priv->xsk.refcnt);
4607 features &= ~NETIF_F_GRO_HW;
4608 }
4609 }
4610
4611 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
4612 features &= ~NETIF_F_RXHASH;
4613 if (netdev->features & NETIF_F_RXHASH)
4614 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
4615
4616 if (features & NETIF_F_GRO_HW) {
4617 netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
4618 features &= ~NETIF_F_GRO_HW;
4619 }
4620 }
4621
4622 /* The header-data split ring param requires HW GRO to stay enabled. */
4623 if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
4624 !(features & NETIF_F_GRO_HW)) {
4625 netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n");
4626 features |= NETIF_F_GRO_HW;
4627 }
4628
4629 if (mlx5e_is_uplink_rep(priv)) {
4630 features = mlx5e_fix_uplink_rep_features(netdev, features);
4631 netdev->netns_immutable = true;
4632 } else {
4633 netdev->netns_immutable = false;
4634 }
4635
4636 mutex_unlock(&priv->state_lock);
4637
4638 return features;
4639 }
4640
mlx5e_xsk_validate_mtu(struct net_device * netdev,struct mlx5e_channels * chs,struct mlx5e_params * new_params,struct mlx5_core_dev * mdev)4641 static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
4642 struct mlx5e_channels *chs,
4643 struct mlx5e_params *new_params,
4644 struct mlx5_core_dev *mdev)
4645 {
4646 u16 ix;
4647
4648 for (ix = 0; ix < chs->params.num_channels; ix++) {
4649 struct xsk_buff_pool *xsk_pool =
4650 mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
4651 struct mlx5e_rq_opt_param rqo = {0};
4652 struct mlx5e_xsk_param xsk;
4653 int max_xdp_mtu;
4654
4655 if (!xsk_pool)
4656 continue;
4657
4658 mlx5e_build_xsk_param(xsk_pool, &xsk);
4659 rqo.xsk = &xsk;
4660 max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &rqo);
4661
4662 /* Validate XSK params and XDP MTU in advance */
4663 if (!mlx5e_validate_xsk_param(new_params, &rqo, mdev) ||
4664 new_params->sw_mtu > max_xdp_mtu) {
4665 u32 hr = mlx5e_get_linear_rq_headroom(new_params, &rqo);
4666 int max_mtu_frame, max_mtu_page, max_mtu;
4667
4668 /* Two criteria must be met:
4669 * 1. HW MTU + all headrooms <= XSK frame size.
4670 * 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
4671 */
4672 max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
4673 max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
4674 max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
4675
4676 netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
4677 new_params->sw_mtu, ix, max_mtu);
4678 return false;
4679 }
4680 }
4681
4682 return true;
4683 }
4684
mlx5e_params_validate_xdp(struct net_device * netdev,struct mlx5_core_dev * mdev,struct mlx5e_params * params)4685 static bool mlx5e_params_validate_xdp(struct net_device *netdev,
4686 struct mlx5_core_dev *mdev,
4687 struct mlx5e_params *params)
4688 {
4689 bool is_linear;
4690
4691 /* No XSK params: AF_XDP can't be enabled yet at the point of setting
4692 * the XDP program.
4693 */
4694 is_linear = params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC ?
4695 mlx5e_rx_is_linear_skb(mdev, params, NULL) :
4696 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
4697
4698 if (!is_linear) {
4699 if (!params->xdp_prog->aux->xdp_has_frags) {
4700 netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
4701 params->sw_mtu,
4702 mlx5e_xdp_max_mtu(params, NULL));
4703 return false;
4704 }
4705 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
4706 !mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
4707 netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
4708 params->sw_mtu,
4709 mlx5e_xdp_max_mtu(params, NULL));
4710 return false;
4711 }
4712 }
4713
4714 return true;
4715 }
4716
mlx5e_change_mtu(struct net_device * netdev,int new_mtu,mlx5e_fp_preactivate preactivate)4717 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
4718 mlx5e_fp_preactivate preactivate)
4719 {
4720 struct mlx5e_priv *priv = netdev_priv(netdev);
4721 struct mlx5e_params new_params;
4722 struct mlx5e_params *params;
4723 int err = 0;
4724
4725 mutex_lock(&priv->state_lock);
4726
4727 params = &priv->channels.params;
4728
4729 new_params = *params;
4730 new_params.sw_mtu = new_mtu;
4731 err = mlx5e_validate_params(priv->mdev, &new_params);
4732 if (err)
4733 goto out;
4734
4735 if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
4736 &new_params)) {
4737 err = -EINVAL;
4738 goto out;
4739 }
4740
4741 if (priv->xsk.refcnt &&
4742 !mlx5e_xsk_validate_mtu(netdev, &priv->channels,
4743 &new_params, priv->mdev)) {
4744 err = -EINVAL;
4745 goto out;
4746 }
4747
4748 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL,
4749 true);
4750
4751 out:
4752 WRITE_ONCE(netdev->mtu, params->sw_mtu);
4753 mutex_unlock(&priv->state_lock);
4754
4755 if (!err)
4756 netdev_update_features(netdev);
4757
4758 return err;
4759 }
4760
mlx5e_change_nic_mtu(struct net_device * netdev,int new_mtu)4761 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
4762 {
4763 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
4764 }
4765
mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv * priv,void * ctx)4766 int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
4767 {
4768 bool set = *(bool *)ctx;
4769
4770 return mlx5e_ptp_rx_manage_fs(priv, set);
4771 }
4772
mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv * priv,bool rx_filter)4773 static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
4774 {
4775 bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4776 int err;
4777
4778 if (!rx_filter)
4779 /* Reset CQE compression to Admin default */
4780 return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
4781
4782 if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4783 return 0;
4784
4785 /* Disable CQE compression */
4786 netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
4787 err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
4788 if (err)
4789 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
4790
4791 return err;
4792 }
4793
mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv * priv,bool ptp_rx)4794 static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
4795 {
4796 struct mlx5e_params new_params;
4797
4798 if (ptp_rx == priv->channels.params.ptp_rx)
4799 return 0;
4800
4801 new_params = priv->channels.params;
4802 new_params.ptp_rx = ptp_rx;
4803 return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
4804 &new_params.ptp_rx, true);
4805 }
4806
mlx5e_hwtstamp_set(struct mlx5e_priv * priv,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)4807 int mlx5e_hwtstamp_set(struct mlx5e_priv *priv,
4808 struct kernel_hwtstamp_config *config,
4809 struct netlink_ext_ack *extack)
4810 {
4811 bool rx_cqe_compress_def;
4812 bool ptp_rx;
4813 int err;
4814
4815 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
4816 (mlx5_clock_get_ptp_index(priv->mdev) == -1)) {
4817 NL_SET_ERR_MSG_MOD(extack,
4818 "Timestamps are not supported on this device");
4819 return -EOPNOTSUPP;
4820 }
4821
4822 /* TX HW timestamp */
4823 switch (config->tx_type) {
4824 case HWTSTAMP_TX_OFF:
4825 case HWTSTAMP_TX_ON:
4826 break;
4827 default:
4828 return -ERANGE;
4829 }
4830
4831 mutex_lock(&priv->state_lock);
4832 rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
4833
4834 /* RX HW timestamp */
4835 switch (config->rx_filter) {
4836 case HWTSTAMP_FILTER_NONE:
4837 ptp_rx = false;
4838 break;
4839 case HWTSTAMP_FILTER_ALL:
4840 case HWTSTAMP_FILTER_SOME:
4841 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4842 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4843 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4844 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4845 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4846 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4847 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4848 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4849 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4850 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4851 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4852 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4853 case HWTSTAMP_FILTER_NTP_ALL:
4854 config->rx_filter = HWTSTAMP_FILTER_ALL;
4855 /* ptp_rx is set if both HW TS is set and CQE
4856 * compression is set
4857 */
4858 ptp_rx = rx_cqe_compress_def;
4859 break;
4860 default:
4861 err = -ERANGE;
4862 goto err_unlock;
4863 }
4864
4865 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
4866 err = mlx5e_hwstamp_config_no_ptp_rx(priv,
4867 config->rx_filter != HWTSTAMP_FILTER_NONE);
4868 else
4869 err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
4870 if (err)
4871 goto err_unlock;
4872
4873 priv->hwtstamp_config = *config;
4874 mutex_unlock(&priv->state_lock);
4875
4876 /* might need to fix some features */
4877 netdev_update_features(priv->netdev);
4878
4879 return 0;
4880 err_unlock:
4881 mutex_unlock(&priv->state_lock);
4882 return err;
4883 }
4884
mlx5e_hwtstamp_set_ndo(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)4885 static int mlx5e_hwtstamp_set_ndo(struct net_device *netdev,
4886 struct kernel_hwtstamp_config *config,
4887 struct netlink_ext_ack *extack)
4888 {
4889 struct mlx5e_priv *priv = netdev_priv(netdev);
4890
4891 return mlx5e_hwtstamp_set(priv, config, extack);
4892 }
4893
mlx5e_hwtstamp_get(struct mlx5e_priv * priv,struct kernel_hwtstamp_config * config)4894 int mlx5e_hwtstamp_get(struct mlx5e_priv *priv,
4895 struct kernel_hwtstamp_config *config)
4896 {
4897 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
4898 return -EOPNOTSUPP;
4899
4900 *config = priv->hwtstamp_config;
4901
4902 return 0;
4903 }
4904
mlx5e_hwtstamp_get_ndo(struct net_device * dev,struct kernel_hwtstamp_config * config)4905 static int mlx5e_hwtstamp_get_ndo(struct net_device *dev,
4906 struct kernel_hwtstamp_config *config)
4907 {
4908 struct mlx5e_priv *priv = netdev_priv(dev);
4909
4910 return mlx5e_hwtstamp_get(priv, config);
4911 }
4912
4913 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_set_vf_mac(struct net_device * dev,int vf,u8 * mac)4914 int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
4915 {
4916 struct mlx5e_priv *priv = netdev_priv(dev);
4917 struct mlx5_core_dev *mdev = priv->mdev;
4918
4919 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
4920 }
4921
mlx5e_set_vf_vlan(struct net_device * dev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)4922 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
4923 __be16 vlan_proto)
4924 {
4925 struct mlx5e_priv *priv = netdev_priv(dev);
4926 struct mlx5_core_dev *mdev = priv->mdev;
4927
4928 if (vlan_proto != htons(ETH_P_8021Q))
4929 return -EPROTONOSUPPORT;
4930
4931 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
4932 vlan, qos);
4933 }
4934
mlx5e_set_vf_spoofchk(struct net_device * dev,int vf,bool setting)4935 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
4936 {
4937 struct mlx5e_priv *priv = netdev_priv(dev);
4938 struct mlx5_core_dev *mdev = priv->mdev;
4939
4940 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
4941 }
4942
mlx5e_set_vf_trust(struct net_device * dev,int vf,bool setting)4943 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
4944 {
4945 struct mlx5e_priv *priv = netdev_priv(dev);
4946 struct mlx5_core_dev *mdev = priv->mdev;
4947
4948 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
4949 }
4950
mlx5e_set_vf_rate(struct net_device * dev,int vf,int min_tx_rate,int max_tx_rate)4951 int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
4952 int max_tx_rate)
4953 {
4954 struct mlx5e_priv *priv = netdev_priv(dev);
4955 struct mlx5_core_dev *mdev = priv->mdev;
4956
4957 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
4958 max_tx_rate, min_tx_rate);
4959 }
4960
mlx5_vport_link2ifla(u8 esw_link)4961 static int mlx5_vport_link2ifla(u8 esw_link)
4962 {
4963 switch (esw_link) {
4964 case MLX5_VPORT_ADMIN_STATE_DOWN:
4965 return IFLA_VF_LINK_STATE_DISABLE;
4966 case MLX5_VPORT_ADMIN_STATE_UP:
4967 return IFLA_VF_LINK_STATE_ENABLE;
4968 }
4969 return IFLA_VF_LINK_STATE_AUTO;
4970 }
4971
mlx5_ifla_link2vport(u8 ifla_link)4972 static int mlx5_ifla_link2vport(u8 ifla_link)
4973 {
4974 switch (ifla_link) {
4975 case IFLA_VF_LINK_STATE_DISABLE:
4976 return MLX5_VPORT_ADMIN_STATE_DOWN;
4977 case IFLA_VF_LINK_STATE_ENABLE:
4978 return MLX5_VPORT_ADMIN_STATE_UP;
4979 }
4980 return MLX5_VPORT_ADMIN_STATE_AUTO;
4981 }
4982
mlx5e_set_vf_link_state(struct net_device * dev,int vf,int link_state)4983 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4984 int link_state)
4985 {
4986 struct mlx5e_priv *priv = netdev_priv(dev);
4987 struct mlx5_core_dev *mdev = priv->mdev;
4988
4989 if (mlx5e_is_uplink_rep(priv))
4990 return -EOPNOTSUPP;
4991
4992 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4993 mlx5_ifla_link2vport(link_state));
4994 }
4995
mlx5e_get_vf_config(struct net_device * dev,int vf,struct ifla_vf_info * ivi)4996 int mlx5e_get_vf_config(struct net_device *dev,
4997 int vf, struct ifla_vf_info *ivi)
4998 {
4999 struct mlx5e_priv *priv = netdev_priv(dev);
5000 struct mlx5_core_dev *mdev = priv->mdev;
5001 int err;
5002
5003 if (!netif_device_present(dev))
5004 return -EOPNOTSUPP;
5005
5006 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
5007 if (err)
5008 return err;
5009 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
5010 return 0;
5011 }
5012
mlx5e_get_vf_stats(struct net_device * dev,int vf,struct ifla_vf_stats * vf_stats)5013 int mlx5e_get_vf_stats(struct net_device *dev,
5014 int vf, struct ifla_vf_stats *vf_stats)
5015 {
5016 struct mlx5e_priv *priv = netdev_priv(dev);
5017 struct mlx5_core_dev *mdev = priv->mdev;
5018
5019 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
5020 vf_stats);
5021 }
5022
5023 static bool
mlx5e_has_offload_stats(const struct net_device * dev,int attr_id)5024 mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
5025 {
5026 struct mlx5e_priv *priv = netdev_priv(dev);
5027
5028 if (!netif_device_present(dev))
5029 return false;
5030
5031 if (!mlx5e_is_uplink_rep(priv))
5032 return false;
5033
5034 return mlx5e_rep_has_offload_stats(dev, attr_id);
5035 }
5036
5037 static int
mlx5e_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)5038 mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
5039 void *sp)
5040 {
5041 struct mlx5e_priv *priv = netdev_priv(dev);
5042
5043 if (!mlx5e_is_uplink_rep(priv))
5044 return -EOPNOTSUPP;
5045
5046 return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
5047 }
5048 #endif
5049
mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev * mdev,u8 proto_type)5050 static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
5051 {
5052 switch (proto_type) {
5053 case IPPROTO_GRE:
5054 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
5055 case IPPROTO_IPIP:
5056 case IPPROTO_IPV6:
5057 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
5058 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
5059 default:
5060 return false;
5061 }
5062 }
5063
mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev * mdev,struct sk_buff * skb)5064 static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
5065 struct sk_buff *skb)
5066 {
5067 switch (skb->inner_protocol) {
5068 case htons(ETH_P_IP):
5069 case htons(ETH_P_IPV6):
5070 case htons(ETH_P_TEB):
5071 return true;
5072 case htons(ETH_P_MPLS_UC):
5073 case htons(ETH_P_MPLS_MC):
5074 return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
5075 }
5076 return false;
5077 }
5078
mlx5e_tunnel_features_check(struct mlx5e_priv * priv,struct sk_buff * skb,netdev_features_t features)5079 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
5080 struct sk_buff *skb,
5081 netdev_features_t features)
5082 {
5083 unsigned int offset = 0;
5084 struct udphdr *udph;
5085 u8 proto;
5086 u16 port;
5087
5088 switch (vlan_get_protocol(skb)) {
5089 case htons(ETH_P_IP):
5090 proto = ip_hdr(skb)->protocol;
5091 break;
5092 case htons(ETH_P_IPV6):
5093 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
5094 break;
5095 default:
5096 goto out;
5097 }
5098
5099 switch (proto) {
5100 case IPPROTO_GRE:
5101 if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
5102 return features;
5103 break;
5104 case IPPROTO_IPIP:
5105 case IPPROTO_IPV6:
5106 if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
5107 return features;
5108 break;
5109 case IPPROTO_UDP:
5110 udph = udp_hdr(skb);
5111 port = be16_to_cpu(udph->dest);
5112
5113 /* Verify if UDP port is being offloaded by HW */
5114 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
5115 return vxlan_features_check(skb, features);
5116
5117 #if IS_ENABLED(CONFIG_GENEVE)
5118 /* Support Geneve offload for default UDP port */
5119 if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
5120 return features;
5121 #endif
5122 break;
5123 #ifdef CONFIG_MLX5_EN_IPSEC
5124 case IPPROTO_ESP:
5125 return mlx5e_ipsec_feature_check(skb, features);
5126 #endif
5127 }
5128
5129 out:
5130 /* Disable CSUM and GSO if skb cannot be offloaded by HW */
5131 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5132 }
5133
mlx5e_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)5134 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
5135 struct net_device *netdev,
5136 netdev_features_t features)
5137 {
5138 struct mlx5e_priv *priv = netdev_priv(netdev);
5139
5140 features = vlan_features_check(skb, features);
5141
5142 /* Validate if the tunneled packet is being offloaded by HW */
5143 if (skb->encapsulation &&
5144 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
5145 return mlx5e_tunnel_features_check(priv, skb, features);
5146
5147 return features;
5148 }
5149
mlx5e_tx_timeout_work(struct work_struct * work)5150 static void mlx5e_tx_timeout_work(struct work_struct *work)
5151 {
5152 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
5153 tx_timeout_work);
5154 struct net_device *netdev = priv->netdev;
5155 int i;
5156
5157 for (i = 0; i < netdev->real_num_tx_queues; i++) {
5158 struct netdev_queue *dev_queue =
5159 netdev_get_tx_queue(netdev, i);
5160 struct mlx5e_txqsq *sq = priv->txq2sq[i];
5161
5162 if (!netif_xmit_timeout_ms(dev_queue))
5163 continue;
5164
5165 if (mlx5e_reporter_tx_timeout(sq))
5166 /* break if tried to reopened channels */
5167 break;
5168 }
5169 }
5170
mlx5e_tx_timeout(struct net_device * dev,unsigned int txqueue)5171 static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
5172 {
5173 struct mlx5e_priv *priv = netdev_priv(dev);
5174
5175 netdev_err(dev, "TX timeout detected\n");
5176 queue_work(priv->wq, &priv->tx_timeout_work);
5177 }
5178
mlx5e_xdp_allowed(struct net_device * netdev,struct mlx5_core_dev * mdev,struct mlx5e_params * params)5179 static int mlx5e_xdp_allowed(struct net_device *netdev, struct mlx5_core_dev *mdev,
5180 struct mlx5e_params *params)
5181 {
5182 if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
5183 netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
5184 return -EINVAL;
5185 }
5186
5187 if (!mlx5e_params_validate_xdp(netdev, mdev, params))
5188 return -EINVAL;
5189
5190 return 0;
5191 }
5192
mlx5e_rq_replace_xdp_prog(struct mlx5e_rq * rq,struct bpf_prog * prog)5193 static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
5194 {
5195 struct bpf_prog *old_prog;
5196
5197 old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
5198 lockdep_is_held(&rq->priv->state_lock));
5199 if (old_prog)
5200 bpf_prog_put(old_prog);
5201 }
5202
mlx5e_xdp_set(struct net_device * netdev,struct bpf_prog * prog)5203 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
5204 {
5205 struct mlx5e_priv *priv = netdev_priv(netdev);
5206 struct mlx5e_params new_params;
5207 struct bpf_prog *old_prog;
5208 int err = 0;
5209 bool reset;
5210 int i;
5211
5212 mutex_lock(&priv->state_lock);
5213
5214 new_params = priv->channels.params;
5215 new_params.xdp_prog = prog;
5216
5217 if (prog) {
5218 err = mlx5e_xdp_allowed(netdev, priv->mdev, &new_params);
5219 if (err)
5220 goto unlock;
5221 }
5222
5223 /* no need for full reset when exchanging programs */
5224 reset = (!priv->channels.params.xdp_prog || !prog);
5225
5226 old_prog = priv->channels.params.xdp_prog;
5227
5228 err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
5229 if (err)
5230 goto unlock;
5231
5232 if (old_prog)
5233 bpf_prog_put(old_prog);
5234
5235 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
5236 goto unlock;
5237
5238 /* exchanging programs w/o reset, we update ref counts on behalf
5239 * of the channels RQs here.
5240 */
5241 bpf_prog_add(prog, priv->channels.num);
5242 for (i = 0; i < priv->channels.num; i++) {
5243 struct mlx5e_channel *c = priv->channels.c[i];
5244
5245 mlx5e_rq_replace_xdp_prog(&c->rq, prog);
5246 if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
5247 bpf_prog_inc(prog);
5248 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
5249 }
5250 }
5251
5252 unlock:
5253 mutex_unlock(&priv->state_lock);
5254
5255 /* Need to fix some features. */
5256 if (!err)
5257 netdev_update_features(netdev);
5258
5259 return err;
5260 }
5261
mlx5e_xdp(struct net_device * dev,struct netdev_bpf * xdp)5262 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5263 {
5264 switch (xdp->command) {
5265 case XDP_SETUP_PROG:
5266 return mlx5e_xdp_set(dev, xdp->prog);
5267 case XDP_SETUP_XSK_POOL:
5268 return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
5269 xdp->xsk.queue_id);
5270 default:
5271 return -EINVAL;
5272 }
5273 }
5274
5275 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)5276 static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5277 struct net_device *dev, u32 filter_mask,
5278 int nlflags)
5279 {
5280 struct mlx5e_priv *priv = netdev_priv(dev);
5281 struct mlx5_core_dev *mdev = priv->mdev;
5282 u8 mode, setting;
5283
5284 if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
5285 return -EOPNOTSUPP;
5286 mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
5287 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5288 mode,
5289 0, 0, nlflags, filter_mask, NULL);
5290 }
5291
mlx5e_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)5292 static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5293 u16 flags, struct netlink_ext_ack *extack)
5294 {
5295 struct mlx5e_priv *priv = netdev_priv(dev);
5296 struct mlx5_core_dev *mdev = priv->mdev;
5297 struct nlattr *attr, *br_spec;
5298 u16 mode = BRIDGE_MODE_UNDEF;
5299 u8 setting;
5300 int rem;
5301
5302 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5303 if (!br_spec)
5304 return -EINVAL;
5305
5306 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
5307 mode = nla_get_u16(attr);
5308 if (mode > BRIDGE_MODE_VEPA)
5309 return -EINVAL;
5310
5311 break;
5312 }
5313
5314 if (mode == BRIDGE_MODE_UNDEF)
5315 return -EINVAL;
5316
5317 setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
5318 return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
5319 }
5320 #endif
5321
5322 const struct net_device_ops mlx5e_netdev_ops = {
5323 .ndo_open = mlx5e_open,
5324 .ndo_stop = mlx5e_close,
5325 .ndo_start_xmit = mlx5e_xmit,
5326 .ndo_setup_tc = mlx5e_setup_tc,
5327 .ndo_select_queue = mlx5e_select_queue,
5328 .ndo_get_stats64 = mlx5e_get_stats,
5329 .ndo_set_rx_mode_async = mlx5e_set_rx_mode,
5330 .ndo_set_mac_address = mlx5e_set_mac,
5331 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
5332 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
5333 .ndo_set_features = mlx5e_set_features,
5334 .ndo_fix_features = mlx5e_fix_features,
5335 .ndo_change_mtu = mlx5e_change_nic_mtu,
5336 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
5337 .ndo_features_check = mlx5e_features_check,
5338 .ndo_tx_timeout = mlx5e_tx_timeout,
5339 .ndo_bpf = mlx5e_xdp,
5340 .ndo_xdp_xmit = mlx5e_xdp_xmit,
5341 .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
5342 .ndo_hwtstamp_get = mlx5e_hwtstamp_get_ndo,
5343 .ndo_hwtstamp_set = mlx5e_hwtstamp_set_ndo,
5344 #ifdef CONFIG_MLX5_EN_ARFS
5345 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
5346 #endif
5347 #ifdef CONFIG_MLX5_ESWITCH
5348 .ndo_bridge_setlink = mlx5e_bridge_setlink,
5349 .ndo_bridge_getlink = mlx5e_bridge_getlink,
5350
5351 /* SRIOV E-Switch NDOs */
5352 .ndo_set_vf_mac = mlx5e_set_vf_mac,
5353 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
5354 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
5355 .ndo_set_vf_trust = mlx5e_set_vf_trust,
5356 .ndo_set_vf_rate = mlx5e_set_vf_rate,
5357 .ndo_get_vf_config = mlx5e_get_vf_config,
5358 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
5359 .ndo_get_vf_stats = mlx5e_get_vf_stats,
5360 .ndo_has_offload_stats = mlx5e_has_offload_stats,
5361 .ndo_get_offload_stats = mlx5e_get_offload_stats,
5362 #endif
5363 };
5364
mlx5e_build_nic_params(struct mlx5e_priv * priv,struct mlx5e_xsk * xsk,u16 mtu)5365 void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
5366 {
5367 struct mlx5e_params *params = &priv->channels.params;
5368 struct mlx5_core_dev *mdev = priv->mdev;
5369
5370 params->sw_mtu = mtu;
5371 params->hard_mtu = MLX5E_ETH_HARD_MTU;
5372 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
5373 priv->max_nch);
5374 mlx5e_params_mqprio_reset(params);
5375
5376 /* SQ */
5377 params->log_sq_size = is_kdump_kernel() ?
5378 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
5379 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
5380 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
5381
5382 /* XDP SQ */
5383 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
5384
5385 /* set CQE compression */
5386 params->rx_cqe_compress_def = false;
5387 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
5388 MLX5_CAP_GEN(mdev, vport_group_manager))
5389 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
5390
5391 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
5392 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
5393
5394 /* RQ */
5395 mlx5e_build_rq_params(mdev, params);
5396
5397 params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
5398
5399 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
5400
5401 /* CQ moderation params */
5402 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
5403 MLX5_CAP_GEN(mdev, cq_period_mode_modify);
5404 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
5405 MLX5_CAP_GEN(mdev, cq_period_mode_modify);
5406 params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
5407 params->tx_moder_use_cqe_mode = false;
5408 mlx5e_reset_rx_moderation(¶ms->rx_cq_moderation, params->rx_moder_use_cqe_mode,
5409 params->rx_dim_enabled);
5410 mlx5e_reset_tx_moderation(¶ms->tx_cq_moderation, params->tx_moder_use_cqe_mode,
5411 params->tx_dim_enabled);
5412
5413 /* TX inline */
5414 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
5415
5416 /* AF_XDP */
5417 params->xsk = xsk;
5418
5419 /* Do not update netdev->features directly in here
5420 * on mlx5e_attach_netdev() we will call mlx5e_update_features()
5421 * To update netdev->features please modify mlx5e_fix_features()
5422 */
5423 }
5424
mlx5e_set_netdev_dev_addr(struct net_device * netdev)5425 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
5426 {
5427 struct mlx5e_priv *priv = netdev_priv(netdev);
5428 u8 addr[ETH_ALEN];
5429
5430 mlx5_query_mac_address(priv->mdev, addr);
5431 if (is_zero_ether_addr(addr) &&
5432 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
5433 eth_hw_addr_random(netdev);
5434 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
5435 return;
5436 }
5437
5438 eth_hw_addr_set(netdev, addr);
5439 }
5440
mlx5e_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)5441 static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
5442 unsigned int entry, struct udp_tunnel_info *ti)
5443 {
5444 struct mlx5e_priv *priv = netdev_priv(netdev);
5445
5446 return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
5447 }
5448
mlx5e_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)5449 static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
5450 unsigned int entry, struct udp_tunnel_info *ti)
5451 {
5452 struct mlx5e_priv *priv = netdev_priv(netdev);
5453
5454 return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
5455 }
5456
mlx5e_vxlan_set_netdev_info(struct mlx5e_priv * priv)5457 void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
5458 {
5459 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
5460 return;
5461
5462 priv->nic_info.set_port = mlx5e_vxlan_set_port;
5463 priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
5464 priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
5465 priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
5466 /* Don't count the space hard-coded to the IANA port */
5467 priv->nic_info.tables[0].n_entries =
5468 mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
5469
5470 priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
5471 }
5472
mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev * mdev)5473 static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
5474 {
5475 int tt;
5476
5477 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
5478 if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
5479 return true;
5480 }
5481 return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
5482 }
5483
mlx5e_get_queue_stats_rx(struct net_device * dev,int i,struct netdev_queue_stats_rx * stats)5484 static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
5485 struct netdev_queue_stats_rx *stats)
5486 {
5487 struct mlx5e_priv *priv = netdev_priv(dev);
5488 struct mlx5e_channel_stats *channel_stats;
5489 struct mlx5e_rq_stats *xskrq_stats;
5490 struct mlx5e_rq_stats *rq_stats;
5491
5492 if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
5493 return;
5494
5495 channel_stats = priv->channel_stats[i];
5496 xskrq_stats = &channel_stats->xskrq;
5497 rq_stats = &channel_stats->rq;
5498
5499 stats->packets = rq_stats->packets + xskrq_stats->packets;
5500 stats->bytes = rq_stats->bytes + xskrq_stats->bytes;
5501 stats->alloc_fail = rq_stats->buff_alloc_err +
5502 xskrq_stats->buff_alloc_err;
5503 }
5504
mlx5e_get_queue_stats_tx(struct net_device * dev,int i,struct netdev_queue_stats_tx * stats)5505 static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
5506 struct netdev_queue_stats_tx *stats)
5507 {
5508 struct mlx5e_priv *priv = netdev_priv(dev);
5509 struct mlx5e_sq_stats *sq_stats;
5510
5511 if (!priv->stats_nch)
5512 return;
5513
5514 /* no special case needed for ptp htb etc since txq2sq_stats is kept up
5515 * to date for active sq_stats, otherwise get_base_stats takes care of
5516 * inactive sqs.
5517 */
5518 sq_stats = priv->txq2sq_stats[i];
5519 stats->packets = sq_stats->packets;
5520 stats->bytes = sq_stats->bytes;
5521 }
5522
mlx5e_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)5523 static void mlx5e_get_base_stats(struct net_device *dev,
5524 struct netdev_queue_stats_rx *rx,
5525 struct netdev_queue_stats_tx *tx)
5526 {
5527 struct mlx5e_priv *priv = netdev_priv(dev);
5528 struct mlx5e_ptp *ptp_channel;
5529 int i, tc;
5530
5531 if (!mlx5e_is_uplink_rep(priv)) {
5532 rx->packets = 0;
5533 rx->bytes = 0;
5534 rx->alloc_fail = 0;
5535
5536 for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) {
5537 struct netdev_queue_stats_rx rx_i = {0};
5538
5539 mlx5e_get_queue_stats_rx(dev, i, &rx_i);
5540
5541 rx->packets += rx_i.packets;
5542 rx->bytes += rx_i.bytes;
5543 rx->alloc_fail += rx_i.alloc_fail;
5544 }
5545
5546 /* always report PTP RX stats from base as there is no
5547 * corresponding channel to report them under in
5548 * mlx5e_get_queue_stats_rx.
5549 */
5550 if (priv->rx_ptp_opened) {
5551 struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
5552
5553 rx->packets += rq_stats->packets;
5554 rx->bytes += rq_stats->bytes;
5555 }
5556 }
5557
5558 tx->packets = 0;
5559 tx->bytes = 0;
5560
5561 for (i = 0; i < priv->stats_nch; i++) {
5562 struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
5563
5564 /* handle two cases:
5565 *
5566 * 1. channels which are active. In this case,
5567 * report only deactivated TCs on these channels.
5568 *
5569 * 2. channels which were deactivated
5570 * (i > priv->channels.params.num_channels)
5571 * must have all of their TCs [0 .. priv->max_opened_tc)
5572 * examined because deactivated channels will not be in the
5573 * range of [0..real_num_tx_queues) and will not have their
5574 * stats reported by mlx5e_get_queue_stats_tx.
5575 */
5576 if (i < priv->channels.params.num_channels)
5577 tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
5578 else
5579 tc = 0;
5580
5581 for (; tc < priv->max_opened_tc; tc++) {
5582 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc];
5583
5584 tx->packets += sq_stats->packets;
5585 tx->bytes += sq_stats->bytes;
5586 }
5587 }
5588
5589 /* if PTP TX was opened at some point and has since either:
5590 * - been shutdown and set to NULL, or
5591 * - simply disabled (bit unset)
5592 *
5593 * report stats directly from the ptp_stats structures as these queues
5594 * are now unavailable and there is no txq index to retrieve these
5595 * stats via calls to mlx5e_get_queue_stats_tx.
5596 */
5597 ptp_channel = priv->channels.ptp;
5598 if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) {
5599 for (tc = 0; tc < priv->max_opened_tc; tc++) {
5600 struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc];
5601
5602 tx->packets += sq_stats->packets;
5603 tx->bytes += sq_stats->bytes;
5604 }
5605 }
5606 }
5607
5608 static const struct netdev_stat_ops mlx5e_stat_ops = {
5609 .get_queue_stats_rx = mlx5e_get_queue_stats_rx,
5610 .get_queue_stats_tx = mlx5e_get_queue_stats_tx,
5611 .get_base_stats = mlx5e_get_base_stats,
5612 };
5613
5614 struct mlx5_qmgmt_data {
5615 struct mlx5e_channel *c;
5616 };
5617
mlx5e_queue_default_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg)5618 static void mlx5e_queue_default_qcfg(struct net_device *dev,
5619 struct netdev_queue_config *qcfg)
5620 {
5621 qcfg->rx_page_size = PAGE_SIZE;
5622 }
5623
mlx5e_queue_validate_qcfg(struct net_device * dev,struct netdev_queue_config * qcfg,struct netlink_ext_ack * extack)5624 static int mlx5e_queue_validate_qcfg(struct net_device *dev,
5625 struct netdev_queue_config *qcfg,
5626 struct netlink_ext_ack *extack)
5627 {
5628 struct mlx5e_priv *priv = netdev_priv(dev);
5629 struct mlx5_core_dev *mdev = priv->mdev;
5630 u32 max;
5631
5632 if (!is_power_of_2(qcfg->rx_page_size)) {
5633 netdev_err(priv->netdev, "rx_page_size not power of 2: %u",
5634 qcfg->rx_page_size);
5635 return -EINVAL;
5636 }
5637
5638 max = mlx5e_mpwrq_max_page_size(mdev);
5639 if (qcfg->rx_page_size < PAGE_SIZE || qcfg->rx_page_size > max) {
5640 netdev_err(priv->netdev,
5641 "Selected rx_page_size %u not in supported range [%lu, %u]\n",
5642 qcfg->rx_page_size, PAGE_SIZE, max);
5643 return -ERANGE;
5644 }
5645
5646 return 0;
5647 }
5648
mlx5e_queue_validate_page_size(struct net_device * dev,struct netdev_queue_config * qcfg,int queue_index)5649 static bool mlx5e_queue_validate_page_size(struct net_device *dev,
5650 struct netdev_queue_config *qcfg,
5651 int queue_index)
5652 {
5653 if (qcfg->rx_page_size == PAGE_SIZE)
5654 return true;
5655
5656 if (!netif_rxq_has_unreadable_mp(dev, queue_index))
5657 return false;
5658
5659 return true;
5660 }
5661
mlx5e_queue_mem_alloc(struct net_device * dev,struct netdev_queue_config * qcfg,void * newq,int queue_index)5662 static int mlx5e_queue_mem_alloc(struct net_device *dev,
5663 struct netdev_queue_config *qcfg,
5664 void *newq, int queue_index)
5665 {
5666 struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
5667 struct mlx5e_priv *priv = netdev_priv(dev);
5668 struct mlx5e_channels *chs = &priv->channels;
5669 struct mlx5e_params params = chs->params;
5670 int err;
5671
5672 mutex_lock(&priv->state_lock);
5673 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
5674 err = -ENODEV;
5675 goto unlock;
5676 }
5677
5678 if (queue_index >= chs->num) {
5679 err = -ERANGE;
5680 goto unlock;
5681 }
5682
5683 if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) ||
5684 chs->params.ptp_rx ||
5685 chs->params.xdp_prog ||
5686 priv->htb) {
5687 netdev_err(priv->netdev,
5688 "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n");
5689 err = -EOPNOTSUPP;
5690 goto unlock;
5691 }
5692
5693 if (!mlx5e_queue_validate_page_size(dev, qcfg, queue_index)) {
5694 netdev_err(priv->netdev, "High order pages are supported only in Zero-Copy mode\n");
5695 err = -EINVAL;
5696 goto unlock;
5697 }
5698
5699 err = mlx5e_open_channel(priv, queue_index, ¶ms, qcfg, NULL,
5700 &new->c);
5701 unlock:
5702 mutex_unlock(&priv->state_lock);
5703 return err;
5704 }
5705
mlx5e_queue_mem_free(struct net_device * dev,void * mem)5706 static void mlx5e_queue_mem_free(struct net_device *dev, void *mem)
5707 {
5708 struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem;
5709
5710 /* not supposed to happen since mlx5e_queue_start never fails
5711 * but this is how this should be implemented just in case
5712 */
5713 if (data->c)
5714 mlx5e_close_channel(data->c);
5715 }
5716
mlx5e_queue_stop(struct net_device * dev,void * oldq,int queue_index)5717 static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
5718 {
5719 /* In mlx5 a txq cannot be simply stopped in isolation, only restarted.
5720 * mlx5e_queue_start does not fail, we stop the old queue there.
5721 * TODO: Improve this.
5722 */
5723 return 0;
5724 }
5725
mlx5e_queue_start(struct net_device * dev,struct netdev_queue_config * qcfg,void * newq,int queue_index)5726 static int mlx5e_queue_start(struct net_device *dev,
5727 struct netdev_queue_config *qcfg,
5728 void *newq, int queue_index)
5729 {
5730 struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
5731 struct mlx5e_priv *priv = netdev_priv(dev);
5732 struct mlx5e_channel *old;
5733
5734 mutex_lock(&priv->state_lock);
5735
5736 /* stop and close the old */
5737 old = priv->channels.c[queue_index];
5738 mlx5e_deactivate_priv_channels(priv);
5739 /* close old before activating new, to avoid napi conflict */
5740 mlx5e_close_channel(old);
5741
5742 /* start the new */
5743 priv->channels.c[queue_index] = new->c;
5744 mlx5e_activate_priv_channels(priv);
5745 mutex_unlock(&priv->state_lock);
5746 return 0;
5747 }
5748
mlx5e_queue_get_dma_dev(struct net_device * dev,int queue_index)5749 static struct device *mlx5e_queue_get_dma_dev(struct net_device *dev,
5750 int queue_index)
5751 {
5752 struct mlx5e_priv *priv = netdev_priv(dev);
5753 struct mlx5e_channels *channels;
5754 struct device *pdev = NULL;
5755 struct mlx5e_channel *ch;
5756
5757 channels = &priv->channels;
5758
5759 mutex_lock(&priv->state_lock);
5760
5761 if (queue_index >= channels->num)
5762 goto out;
5763
5764 ch = channels->c[queue_index];
5765 pdev = ch->pdev;
5766 out:
5767 mutex_unlock(&priv->state_lock);
5768
5769 return pdev;
5770 }
5771
5772 static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
5773 .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
5774 .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
5775 .ndo_queue_mem_free = mlx5e_queue_mem_free,
5776 .ndo_queue_start = mlx5e_queue_start,
5777 .ndo_queue_stop = mlx5e_queue_stop,
5778 .ndo_queue_get_dma_dev = mlx5e_queue_get_dma_dev,
5779 .ndo_default_qcfg = mlx5e_queue_default_qcfg,
5780 .ndo_validate_qcfg = mlx5e_queue_validate_qcfg,
5781 .supported_params = QCFG_RX_PAGE_SIZE,
5782 };
5783
mlx5e_build_nic_netdev(struct net_device * netdev)5784 static void mlx5e_build_nic_netdev(struct net_device *netdev)
5785 {
5786 struct mlx5e_priv *priv = netdev_priv(netdev);
5787 struct mlx5_core_dev *mdev = priv->mdev;
5788 bool fcs_supported;
5789 bool fcs_enabled;
5790
5791 SET_NETDEV_DEV(netdev, mdev->device);
5792
5793 netdev->netdev_ops = &mlx5e_netdev_ops;
5794 netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops;
5795 netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
5796 netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
5797 netdev->request_ops_lock = true;
5798 netdev_lockdep_set_classes(netdev);
5799
5800 mlx5e_dcbnl_build_netdev(netdev);
5801
5802 netdev->watchdog_timeo = 15 * HZ;
5803
5804 netdev->stat_ops = &mlx5e_stat_ops;
5805 netdev->ethtool_ops = &mlx5e_ethtool_ops;
5806
5807 netdev->vlan_features |= NETIF_F_SG;
5808 netdev->vlan_features |= NETIF_F_HW_CSUM;
5809 netdev->vlan_features |= NETIF_F_HW_MACSEC;
5810 netdev->vlan_features |= NETIF_F_GRO;
5811 netdev->vlan_features |= NETIF_F_TSO;
5812 netdev->vlan_features |= NETIF_F_TSO6;
5813 netdev->vlan_features |= NETIF_F_RXCSUM;
5814 netdev->vlan_features |= NETIF_F_RXHASH;
5815 netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
5816
5817 netdev->mpls_features |= NETIF_F_SG;
5818 netdev->mpls_features |= NETIF_F_HW_CSUM;
5819 netdev->mpls_features |= NETIF_F_TSO;
5820 netdev->mpls_features |= NETIF_F_TSO6;
5821
5822 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
5823 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
5824
5825 /* Tunneled LRO is not supported in the driver, and the same RQs are
5826 * shared between inner and outer TIRs, so the driver can't disable LRO
5827 * for inner TIRs while having it enabled for outer TIRs. Due to this,
5828 * block LRO altogether if the firmware declares tunneled LRO support.
5829 */
5830 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
5831 !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
5832 !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
5833 mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
5834 MLX5E_MPWRQ_UMR_MODE_ALIGNED))
5835 netdev->vlan_features |= NETIF_F_LRO;
5836
5837 if (mlx5e_hw_gro_supported(mdev) &&
5838 mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
5839 MLX5E_MPWRQ_UMR_MODE_ALIGNED))
5840 netdev->vlan_features |= NETIF_F_GRO_HW;
5841
5842 netdev->hw_features = netdev->vlan_features;
5843 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
5844 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
5845 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5846 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
5847
5848 if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
5849 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
5850 netdev->hw_enc_features |= NETIF_F_TSO;
5851 netdev->hw_enc_features |= NETIF_F_TSO6;
5852 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
5853 }
5854
5855 if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
5856 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
5857 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5858 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
5859 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5860 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
5861 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
5862 NETIF_F_GSO_UDP_TUNNEL_CSUM;
5863 }
5864
5865 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
5866 netdev->hw_features |= NETIF_F_GSO_GRE |
5867 NETIF_F_GSO_GRE_CSUM;
5868 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
5869 NETIF_F_GSO_GRE_CSUM;
5870 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
5871 netdev->vlan_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
5872 }
5873
5874 if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
5875 netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
5876 NETIF_F_GSO_IPXIP6;
5877 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
5878 NETIF_F_GSO_IPXIP6;
5879 netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
5880 NETIF_F_GSO_IPXIP6;
5881 }
5882
5883 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
5884 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
5885 netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
5886
5887 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
5888
5889 if (fcs_supported)
5890 netdev->hw_features |= NETIF_F_RXALL;
5891
5892 if (MLX5_CAP_ETH(mdev, scatter_fcs))
5893 netdev->hw_features |= NETIF_F_RXFCS;
5894
5895 if (mlx5_qos_is_supported(mdev))
5896 netdev->hw_features |= NETIF_F_HW_TC;
5897
5898 netdev->features = netdev->hw_features;
5899
5900 /* Defaults */
5901 if (fcs_enabled)
5902 netdev->features &= ~NETIF_F_RXALL;
5903 netdev->features &= ~NETIF_F_LRO;
5904 netdev->features &= ~NETIF_F_GRO_HW;
5905 netdev->features &= ~NETIF_F_RXFCS;
5906
5907 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
5908 if (FT_CAP(flow_modify_en) &&
5909 FT_CAP(modify_root) &&
5910 FT_CAP(identified_miss_table_mode) &&
5911 FT_CAP(flow_table_modify)) {
5912 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
5913 netdev->hw_features |= NETIF_F_HW_TC;
5914 #endif
5915 #if IS_ENABLED(CONFIG_MLX5_EN_ARFS)
5916 netdev->hw_features |= NETIF_F_NTUPLE;
5917 #elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC)
5918 netdev->features |= NETIF_F_NTUPLE;
5919 #endif
5920 }
5921
5922 netdev->features |= NETIF_F_HIGHDMA;
5923 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5924
5925 netdev->priv_flags |= IFF_UNICAST_FLT;
5926
5927 netdev->netmem_tx = true;
5928
5929 netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
5930 mlx5e_set_xdp_feature(priv);
5931 mlx5e_set_netdev_dev_addr(netdev);
5932 mlx5e_macsec_build_netdev(priv);
5933 mlx5e_ipsec_build_netdev(priv);
5934 mlx5e_ktls_build_netdev(priv);
5935 }
5936
mlx5e_create_q_counters(struct mlx5e_priv * priv)5937 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
5938 {
5939 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
5940 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
5941 struct mlx5_core_dev *mdev = priv->mdev;
5942 struct mlx5_core_dev *pos;
5943 int err, i;
5944
5945 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
5946
5947 mlx5_sd_for_each_dev(i, mdev, pos) {
5948 err = mlx5_cmd_exec_inout(pos, alloc_q_counter, in, out);
5949 if (!err)
5950 priv->q_counter[i] =
5951 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5952 }
5953
5954 err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
5955 if (!err)
5956 priv->drop_rq_q_counter =
5957 MLX5_GET(alloc_q_counter_out, out, counter_set_id);
5958 }
5959
mlx5e_destroy_q_counters(struct mlx5e_priv * priv)5960 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
5961 {
5962 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
5963 struct mlx5_core_dev *pos;
5964 int i;
5965
5966 MLX5_SET(dealloc_q_counter_in, in, opcode,
5967 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
5968 mlx5_sd_for_each_dev(i, priv->mdev, pos) {
5969 if (priv->q_counter[i]) {
5970 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5971 priv->q_counter[i]);
5972 mlx5_cmd_exec_in(pos, dealloc_q_counter, in);
5973 }
5974 }
5975
5976 if (priv->drop_rq_q_counter) {
5977 MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
5978 priv->drop_rq_q_counter);
5979 mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
5980 }
5981 }
5982
mlx5e_nic_init(struct mlx5_core_dev * mdev,struct net_device * netdev)5983 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
5984 struct net_device *netdev)
5985 {
5986 const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
5987 struct mlx5e_priv *priv = netdev_priv(netdev);
5988 struct mlx5e_flow_steering *fs;
5989 int err;
5990
5991 mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
5992 mlx5e_vxlan_set_netdev_info(priv);
5993
5994 mlx5e_timestamp_init(priv);
5995
5996 priv->dfs_root = debugfs_create_dir("nic",
5997 mlx5_debugfs_get_dev_root(mdev));
5998
5999 fs = mlx5e_fs_init(priv->profile, mdev,
6000 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
6001 priv->dfs_root);
6002 if (!fs) {
6003 err = -ENOMEM;
6004 mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
6005 debugfs_remove_recursive(priv->dfs_root);
6006 return err;
6007 }
6008 priv->fs = fs;
6009
6010 err = mlx5e_psp_init(priv);
6011 if (err)
6012 mlx5_core_err(mdev, "PSP initialization failed, %d\n", err);
6013
6014 err = mlx5e_ktls_init(priv);
6015 if (err)
6016 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
6017
6018 mlx5e_health_create_reporters(priv);
6019
6020 /* If netdev is already registered (e.g. move from uplink to nic profile),
6021 * RTNL lock must be held before triggering netdev notifiers.
6022 */
6023 if (take_rtnl)
6024 rtnl_lock();
6025
6026 /* update XDP supported features */
6027 mlx5e_set_xdp_feature(priv);
6028
6029 if (take_rtnl)
6030 rtnl_unlock();
6031
6032 return 0;
6033 }
6034
mlx5e_nic_cleanup(struct mlx5e_priv * priv)6035 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
6036 {
6037 mlx5e_health_destroy_reporters(priv);
6038 mlx5e_ktls_cleanup(priv);
6039 mlx5e_psp_cleanup(priv);
6040 mlx5e_fs_cleanup(priv->fs);
6041 debugfs_remove_recursive(priv->dfs_root);
6042 priv->fs = NULL;
6043 }
6044
mlx5e_init_nic_rx(struct mlx5e_priv * priv)6045 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
6046 {
6047 struct mlx5_core_dev *mdev = priv->mdev;
6048 enum mlx5e_rx_res_features features;
6049 int err;
6050
6051 mlx5e_create_q_counters(priv);
6052
6053 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
6054 if (err) {
6055 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
6056 goto err_destroy_q_counters;
6057 }
6058
6059 features = MLX5E_RX_RES_FEATURE_PTP;
6060 if (mlx5_tunnel_inner_ft_supported(mdev))
6061 features |= MLX5E_RX_RES_FEATURE_INNER_FT;
6062 if (mlx5_get_sd(priv->mdev))
6063 features |= MLX5E_RX_RES_FEATURE_MULTI_VHCA;
6064
6065 priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
6066 &priv->channels.params.packet_merge,
6067 priv->channels.params.num_channels);
6068 if (IS_ERR(priv->rx_res)) {
6069 err = PTR_ERR(priv->rx_res);
6070 priv->rx_res = NULL;
6071 mlx5_core_err(mdev, "create rx resources failed, %d\n", err);
6072 goto err_close_drop_rq;
6073 }
6074
6075 err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
6076 priv->netdev);
6077 if (err) {
6078 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
6079 goto err_destroy_rx_res;
6080 }
6081
6082 err = mlx5e_tc_nic_init(priv);
6083 if (err)
6084 goto err_destroy_flow_steering;
6085
6086 err = mlx5e_accel_init_rx(priv);
6087 if (err)
6088 goto err_tc_nic_cleanup;
6089
6090 #ifdef CONFIG_MLX5_EN_ARFS
6091 priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
6092 #endif
6093
6094 return 0;
6095
6096 err_tc_nic_cleanup:
6097 mlx5e_tc_nic_cleanup(priv);
6098 err_destroy_flow_steering:
6099 mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
6100 priv->profile);
6101 err_destroy_rx_res:
6102 mlx5e_rx_res_destroy(priv->rx_res);
6103 priv->rx_res = NULL;
6104 err_close_drop_rq:
6105 mlx5e_close_drop_rq(&priv->drop_rq);
6106 err_destroy_q_counters:
6107 mlx5e_destroy_q_counters(priv);
6108 return err;
6109 }
6110
mlx5e_cleanup_nic_rx(struct mlx5e_priv * priv)6111 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
6112 {
6113 mlx5e_accel_cleanup_rx(priv);
6114 mlx5e_tc_nic_cleanup(priv);
6115 mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
6116 priv->profile);
6117 mlx5e_rx_res_destroy(priv->rx_res);
6118 priv->rx_res = NULL;
6119 mlx5e_close_drop_rq(&priv->drop_rq);
6120 mlx5e_destroy_q_counters(priv);
6121 }
6122
mlx5e_set_mqprio_rl(struct mlx5e_priv * priv)6123 static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
6124 {
6125 struct mlx5e_params *params;
6126 struct mlx5e_mqprio_rl *rl;
6127
6128 params = &priv->channels.params;
6129 if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
6130 return;
6131
6132 rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
6133 params->mqprio.channel.max_rate);
6134 if (IS_ERR(rl))
6135 rl = NULL;
6136 priv->mqprio_rl = rl;
6137 mlx5e_mqprio_rl_update_params(params, rl);
6138 }
6139
mlx5e_init_nic_tx(struct mlx5e_priv * priv)6140 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
6141 {
6142 int err;
6143
6144 err = mlx5e_accel_init_tx(priv);
6145 if (err)
6146 return err;
6147
6148 mlx5e_set_mqprio_rl(priv);
6149 mlx5e_dcbnl_initialize(priv);
6150 return 0;
6151 }
6152
mlx5e_nic_enable(struct mlx5e_priv * priv)6153 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
6154 {
6155 struct net_device *netdev = priv->netdev;
6156 struct mlx5_core_dev *mdev = priv->mdev;
6157 int err;
6158
6159 mlx5e_fs_init_l2_addr(priv->fs, netdev);
6160 mlx5e_ipsec_init(priv);
6161 mlx5e_psp_register(priv);
6162
6163 err = mlx5e_macsec_init(priv);
6164 if (err)
6165 mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
6166
6167 /* Marking the link as currently not needed by the Driver */
6168 if (!netif_running(netdev))
6169 mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
6170
6171 mlx5e_set_netdev_mtu_boundaries(priv);
6172 mlx5e_set_dev_port_mtu(priv);
6173
6174 mlx5_lag_add_netdev(mdev, netdev);
6175
6176 mlx5e_enable_async_events(priv);
6177 mlx5e_enable_blocking_events(priv);
6178 if (mlx5e_monitor_counter_supported(priv))
6179 mlx5e_monitor_counter_init(priv);
6180
6181 mlx5e_pcie_cong_event_init(priv);
6182 mlx5e_hv_vhca_stats_create(priv);
6183 if (netdev->reg_state != NETREG_REGISTERED)
6184 return;
6185 mlx5e_dcbnl_init_app(priv);
6186
6187 mlx5e_nic_set_rx_mode(priv);
6188
6189 rtnl_lock();
6190 netdev_lock(netdev);
6191 if (netif_running(netdev))
6192 mlx5e_open(netdev);
6193 udp_tunnel_nic_reset_ntf(priv->netdev);
6194 netdev_unlock(netdev);
6195 netif_device_attach(netdev);
6196 rtnl_unlock();
6197 }
6198
mlx5e_nic_disable(struct mlx5e_priv * priv)6199 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
6200 {
6201 struct mlx5_core_dev *mdev = priv->mdev;
6202
6203 if (priv->netdev->reg_state == NETREG_REGISTERED)
6204 mlx5e_dcbnl_delete_app(priv);
6205
6206 rtnl_lock();
6207 netdev_lock(priv->netdev);
6208 if (netif_running(priv->netdev))
6209 mlx5e_close(priv->netdev);
6210 netif_device_detach(priv->netdev);
6211 if (priv->en_trap) {
6212 mlx5e_deactivate_trap(priv);
6213 mlx5e_close_trap(priv->en_trap);
6214 priv->en_trap = NULL;
6215 }
6216 netdev_unlock(priv->netdev);
6217 rtnl_unlock();
6218
6219 mlx5e_nic_set_rx_mode(priv);
6220
6221 mlx5e_pcie_cong_event_cleanup(priv);
6222 mlx5e_hv_vhca_stats_destroy(priv);
6223 if (mlx5e_monitor_counter_supported(priv))
6224 mlx5e_monitor_counter_cleanup(priv);
6225
6226 mlx5e_ipsec_disable_events(priv);
6227 mlx5e_disable_blocking_events(priv);
6228 mlx5e_disable_async_events(priv);
6229 mlx5_lag_remove_netdev(mdev, priv->netdev);
6230 mlx5_vxlan_reset_to_default(mdev->vxlan);
6231 mlx5e_macsec_cleanup(priv);
6232 mlx5e_psp_unregister(priv);
6233 mlx5e_ipsec_cleanup(priv);
6234 }
6235
mlx5e_update_nic_rx(struct mlx5e_priv * priv)6236 static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
6237 {
6238 return mlx5e_refresh_tirs(priv->mdev, false, false);
6239 }
6240
6241 static const struct mlx5e_profile mlx5e_nic_profile = {
6242 .init = mlx5e_nic_init,
6243 .cleanup = mlx5e_nic_cleanup,
6244 .init_rx = mlx5e_init_nic_rx,
6245 .cleanup_rx = mlx5e_cleanup_nic_rx,
6246 .init_tx = mlx5e_init_nic_tx,
6247 .cleanup_tx = mlx5e_cleanup_nic_tx,
6248 .enable = mlx5e_nic_enable,
6249 .disable = mlx5e_nic_disable,
6250 .update_rx = mlx5e_update_nic_rx,
6251 .update_stats = mlx5e_stats_update_ndo_stats,
6252 .update_carrier = mlx5e_update_carrier,
6253 .rx_handlers = &mlx5e_rx_handlers_nic,
6254 .max_tc = MLX5_MAX_NUM_TC,
6255 .stats_grps = mlx5e_nic_stats_grps,
6256 .stats_grps_num = mlx5e_nic_stats_grps_num,
6257 .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
6258 BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
6259 BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
6260 BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
6261 BIT(MLX5E_PROFILE_FEATURE_FS_TC),
6262 };
6263
mlx5e_profile_max_num_channels(struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile)6264 static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
6265 const struct mlx5e_profile *profile)
6266 {
6267 int nch;
6268
6269 nch = mlx5e_get_max_num_channels(mdev);
6270
6271 if (profile->max_nch_limit)
6272 nch = min_t(int, nch, profile->max_nch_limit(mdev));
6273 return nch;
6274 }
6275
6276 static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev * mdev,struct net_device * netdev,const struct mlx5e_profile * profile)6277 mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
6278 const struct mlx5e_profile *profile)
6279
6280 {
6281 unsigned int max_nch, tmp;
6282
6283 /* core resources */
6284 max_nch = mlx5e_profile_max_num_channels(mdev, profile);
6285
6286 /* netdev rx queues */
6287 max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
6288
6289 /* netdev tx queues */
6290 tmp = netdev->num_tx_queues;
6291 if (mlx5_qos_is_supported(mdev))
6292 tmp -= mlx5e_qos_max_leaf_nodes(mdev);
6293 if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
6294 tmp -= profile->max_tc;
6295 tmp = tmp / profile->max_tc;
6296 max_nch = min_t(unsigned int, max_nch, tmp);
6297
6298 return max_nch;
6299 }
6300
mlx5e_get_pf_num_tirs(struct mlx5_core_dev * mdev)6301 int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
6302 {
6303 /* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
6304 * and 1 set of direct TIRS
6305 */
6306 return 2 * MLX5E_NUM_INDIR_TIRS
6307 + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
6308 }
6309
mlx5e_set_rx_mode_work(struct work_struct * work)6310 void mlx5e_set_rx_mode_work(struct work_struct *work)
6311 {
6312 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
6313 set_rx_mode_work);
6314 struct net_device *dev = priv->netdev;
6315
6316 netdev_lock_ops(dev);
6317 mlx5e_fs_set_rx_mode_work(priv->fs, dev, NULL, NULL);
6318 netdev_unlock_ops(dev);
6319 }
6320
6321 /* mlx5e generic netdev management API (move to en_common.c) */
mlx5e_priv_init(struct mlx5e_priv * priv,const struct mlx5e_profile * profile,struct net_device * netdev,struct mlx5_core_dev * mdev)6322 int mlx5e_priv_init(struct mlx5e_priv *priv,
6323 const struct mlx5e_profile *profile,
6324 struct net_device *netdev,
6325 struct mlx5_core_dev *mdev)
6326 {
6327 int nch, num_txqs, node;
6328 int err;
6329
6330 num_txqs = netdev->num_tx_queues;
6331 nch = mlx5e_calc_max_nch(mdev, netdev, profile);
6332 node = dev_to_node(mlx5_core_dma_dev(mdev));
6333
6334 /* priv init */
6335 priv->mdev = mdev;
6336 priv->netdev = netdev;
6337 priv->max_nch = nch;
6338 priv->max_opened_tc = 1;
6339
6340 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
6341 return -ENOMEM;
6342
6343 mutex_init(&priv->state_lock);
6344
6345 err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
6346 if (err)
6347 goto err_free_cpumask;
6348
6349 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
6350 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
6351 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
6352 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
6353
6354 priv->wq = create_singlethread_workqueue("mlx5e");
6355 if (!priv->wq)
6356 goto err_free_selq;
6357
6358 priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
6359 if (!priv->txq2sq)
6360 goto err_destroy_workqueue;
6361
6362 priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
6363 if (!priv->txq2sq_stats)
6364 goto err_free_txq2sq;
6365
6366 priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
6367 if (!priv->tx_rates)
6368 goto err_free_txq2sq_stats;
6369
6370 priv->channel_stats =
6371 kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
6372 if (!priv->channel_stats)
6373 goto err_free_tx_rates;
6374
6375 priv->fec_ranges = kzalloc_objs(*priv->fec_ranges, ETHTOOL_FEC_HIST_MAX);
6376 if (!priv->fec_ranges)
6377 goto err_free_channel_stats;
6378
6379 return 0;
6380
6381 err_free_channel_stats:
6382 kfree(priv->channel_stats);
6383 err_free_tx_rates:
6384 kfree(priv->tx_rates);
6385 err_free_txq2sq_stats:
6386 kfree(priv->txq2sq_stats);
6387 err_free_txq2sq:
6388 kfree(priv->txq2sq);
6389 err_destroy_workqueue:
6390 destroy_workqueue(priv->wq);
6391 err_free_selq:
6392 mlx5e_selq_cleanup(&priv->selq);
6393 err_free_cpumask:
6394 free_cpumask_var(priv->scratchpad.cpumask);
6395 return -ENOMEM;
6396 }
6397
mlx5e_priv_cleanup(struct mlx5e_priv * priv)6398 void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
6399 {
6400 bool destroying = test_bit(MLX5E_STATE_DESTROYING, &priv->state);
6401 int i;
6402
6403 /* bail if change profile failed and also rollback failed */
6404 if (!priv->mdev)
6405 return;
6406
6407 kfree(priv->fec_ranges);
6408 for (i = 0; i < priv->stats_nch; i++)
6409 kvfree(priv->channel_stats[i]);
6410 kfree(priv->channel_stats);
6411 kfree(priv->tx_rates);
6412 kfree(priv->txq2sq_stats);
6413 kfree(priv->txq2sq);
6414 destroy_workqueue(priv->wq);
6415 mlx5e_selq_cleanup(&priv->selq);
6416 free_cpumask_var(priv->scratchpad.cpumask);
6417
6418 for (i = 0; i < priv->htb_max_qos_sqs; i++)
6419 kfree(priv->htb_qos_sq_stats[i]);
6420 kvfree(priv->htb_qos_sq_stats);
6421
6422 if (priv->mqprio_rl) {
6423 mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
6424 mlx5e_mqprio_rl_free(priv->mqprio_rl);
6425 }
6426
6427 memset(priv, 0, sizeof(*priv));
6428 if (destroying) /* restore destroying bit, to allow unload */
6429 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
6430 }
6431
mlx5e_get_max_num_txqs(struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile)6432 static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
6433 const struct mlx5e_profile *profile)
6434 {
6435 unsigned int nch, ptp_txqs, qos_txqs;
6436
6437 nch = mlx5e_profile_max_num_channels(mdev, profile);
6438
6439 ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
6440 mlx5e_profile_feature_cap(profile, PTP_TX) ?
6441 profile->max_tc : 0;
6442
6443 qos_txqs = mlx5_qos_is_supported(mdev) &&
6444 mlx5e_profile_feature_cap(profile, QOS_HTB) ?
6445 mlx5e_qos_max_leaf_nodes(mdev) : 0;
6446
6447 return nch * profile->max_tc + ptp_txqs + qos_txqs;
6448 }
6449
mlx5e_get_max_num_rxqs(struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile)6450 static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
6451 const struct mlx5e_profile *profile)
6452 {
6453 return mlx5e_profile_max_num_channels(mdev, profile);
6454 }
6455
6456 struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev * mdev,const struct mlx5e_profile * profile)6457 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
6458 {
6459 struct net_device *netdev;
6460 unsigned int txqs, rxqs;
6461 int err;
6462
6463 txqs = mlx5e_get_max_num_txqs(mdev, profile);
6464 rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
6465
6466 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
6467 if (!netdev) {
6468 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
6469 return NULL;
6470 }
6471
6472 err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
6473 if (err) {
6474 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
6475 goto err_free_netdev;
6476 }
6477
6478 netif_carrier_off(netdev);
6479 netif_tx_disable(netdev);
6480 dev_net_set(netdev, mlx5_core_net(mdev));
6481
6482 return netdev;
6483
6484 err_free_netdev:
6485 free_netdev(netdev);
6486
6487 return NULL;
6488 }
6489
mlx5e_update_features(struct net_device * netdev)6490 static void mlx5e_update_features(struct net_device *netdev)
6491 {
6492 if (netdev->reg_state != NETREG_REGISTERED)
6493 return; /* features will be updated on netdev registration */
6494
6495 rtnl_lock();
6496 netdev_lock(netdev);
6497 netdev_update_features(netdev);
6498 netdev_unlock(netdev);
6499 rtnl_unlock();
6500 }
6501
mlx5e_reset_channels(struct net_device * netdev)6502 static void mlx5e_reset_channels(struct net_device *netdev)
6503 {
6504 netdev_reset_tc(netdev);
6505 }
6506
mlx5e_attach_netdev(struct mlx5e_priv * priv)6507 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
6508 {
6509 const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
6510 const struct mlx5e_profile *profile = priv->profile;
6511 int max_nch;
6512 int err;
6513
6514 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
6515 if (priv->fs)
6516 mlx5e_fs_set_state_destroy(priv->fs,
6517 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
6518
6519 /* Validate the max_wqe_size_sq capability. */
6520 if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
6521 mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %u\n",
6522 mlx5e_get_max_sq_wqebbs(priv->mdev), (unsigned int)MLX5E_MAX_TX_WQEBBS);
6523 return -EIO;
6524 }
6525
6526 /* max number of channels may have changed */
6527 max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
6528
6529 /* Locking is required by ethtool_rxfh_indir_lost() (sends
6530 * ETHTOOL_MSG_RSS_NTF) and by netif_set_real_num_*_queues in case
6531 * the netdev has been registered by this point (if this function
6532 * was called in the reload or resume flow).
6533 */
6534 if (need_lock) {
6535 rtnl_lock();
6536 netdev_lock(priv->netdev);
6537 }
6538
6539 if (priv->channels.params.num_channels > max_nch) {
6540 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
6541 /* Reducing the number of channels - RXFH has to be reset, and
6542 * mlx5e_num_channels_changed below will build the RQT.
6543 */
6544 ethtool_rxfh_indir_lost(priv->netdev);
6545 priv->channels.params.num_channels = max_nch;
6546 if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
6547 mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
6548 mlx5e_params_mqprio_reset(&priv->channels.params);
6549 }
6550 }
6551 if (max_nch != priv->max_nch) {
6552 mlx5_core_warn(priv->mdev,
6553 "MLX5E: Updating max number of channels from %u to %u\n",
6554 priv->max_nch, max_nch);
6555 priv->max_nch = max_nch;
6556 }
6557
6558 /* 1. Set the real number of queues in the kernel the first time.
6559 * 2. Set our default XPS cpumask.
6560 * 3. Build the RQT.
6561 */
6562 err = mlx5e_num_channels_changed(priv);
6563 if (need_lock) {
6564 netdev_unlock(priv->netdev);
6565 rtnl_unlock();
6566 }
6567 if (err)
6568 goto out;
6569
6570 err = profile->init_tx(priv);
6571 if (err)
6572 goto out;
6573
6574 err = profile->init_rx(priv);
6575 if (err)
6576 goto err_cleanup_tx;
6577
6578 if (profile->enable)
6579 profile->enable(priv);
6580
6581 mlx5e_update_features(priv->netdev);
6582
6583 return 0;
6584
6585 err_cleanup_tx:
6586 profile->cleanup_tx(priv);
6587
6588 out:
6589 mlx5e_reset_channels(priv->netdev);
6590 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
6591 if (priv->fs)
6592 mlx5e_fs_set_state_destroy(priv->fs,
6593 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
6594 cancel_work_sync(&priv->update_stats_work);
6595 return err;
6596 }
6597
mlx5e_detach_netdev(struct mlx5e_priv * priv)6598 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
6599 {
6600 const struct mlx5e_profile *profile = priv->profile;
6601
6602 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
6603 if (priv->fs)
6604 mlx5e_fs_set_state_destroy(priv->fs,
6605 !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
6606
6607 if (profile->disable)
6608 profile->disable(priv);
6609 flush_workqueue(priv->wq);
6610
6611 profile->cleanup_rx(priv);
6612 profile->cleanup_tx(priv);
6613 mlx5e_reset_channels(priv->netdev);
6614 cancel_work_sync(&priv->update_stats_work);
6615 }
6616
6617 static int
mlx5e_netdev_init_profile(struct net_device * netdev,struct mlx5_core_dev * mdev,const struct mlx5e_profile * new_profile,void * new_ppriv)6618 mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
6619 const struct mlx5e_profile *new_profile, void *new_ppriv)
6620 {
6621 struct mlx5e_priv *priv = netdev_priv(netdev);
6622 int err;
6623
6624 err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
6625 if (err) {
6626 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
6627 return err;
6628 }
6629 netif_carrier_off(netdev);
6630 priv->profile = new_profile;
6631 priv->ppriv = new_ppriv;
6632 err = new_profile->init(priv->mdev, priv->netdev);
6633 if (err)
6634 goto priv_cleanup;
6635
6636 return 0;
6637
6638 priv_cleanup:
6639 mlx5e_priv_cleanup(priv);
6640 return err;
6641 }
6642
6643 static int
mlx5e_netdev_attach_profile(struct net_device * netdev,struct mlx5_core_dev * mdev,const struct mlx5e_profile * new_profile,void * new_ppriv)6644 mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
6645 const struct mlx5e_profile *new_profile, void *new_ppriv)
6646 {
6647 struct mlx5e_priv *priv = netdev_priv(netdev);
6648 int err;
6649
6650 err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
6651 if (err)
6652 return err;
6653
6654 err = mlx5e_attach_netdev(priv);
6655 if (err)
6656 goto profile_cleanup;
6657 return err;
6658
6659 profile_cleanup:
6660 new_profile->cleanup(priv);
6661 mlx5e_priv_cleanup(priv);
6662 return err;
6663 }
6664
mlx5e_netdev_change_profile(struct net_device * netdev,struct mlx5_core_dev * mdev,const struct mlx5e_profile * new_profile,void * new_ppriv)6665 int mlx5e_netdev_change_profile(struct net_device *netdev,
6666 struct mlx5_core_dev *mdev,
6667 const struct mlx5e_profile *new_profile,
6668 void *new_ppriv)
6669 {
6670 struct mlx5e_priv *priv = netdev_priv(netdev);
6671 const struct mlx5e_profile *orig_profile;
6672 int err, rollback_err;
6673 void *orig_ppriv;
6674
6675 orig_profile = priv->profile;
6676 orig_ppriv = priv->ppriv;
6677
6678 /* NULL could happen if previous change_profile failed to rollback */
6679 if (priv->profile) {
6680 WARN_ON_ONCE(priv->mdev != mdev);
6681 /* cleanup old profile */
6682 mlx5e_detach_netdev(priv);
6683 priv->profile->cleanup(priv);
6684 mlx5e_priv_cleanup(priv);
6685 }
6686 /* priv members are not valid from this point ... */
6687
6688 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6689 mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
6690 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
6691 return -EIO;
6692 }
6693
6694 err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
6695 if (err) { /* roll back to original profile */
6696 netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
6697 goto rollback;
6698 }
6699
6700 return 0;
6701
6702 rollback:
6703 if (!orig_profile) {
6704 netdev_warn(netdev, "no original profile to rollback to\n");
6705 priv->profile = NULL;
6706 return err;
6707 }
6708
6709 rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
6710 if (rollback_err) {
6711 netdev_err(netdev, "failed to rollback to orig profile, %d\n",
6712 rollback_err);
6713 priv->profile = NULL;
6714 }
6715 return err;
6716 }
6717
mlx5e_netdev_attach_nic_profile(struct net_device * netdev,struct mlx5_core_dev * mdev)6718 void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
6719 struct mlx5_core_dev *mdev)
6720 {
6721 mlx5e_netdev_change_profile(netdev, mdev, &mlx5e_nic_profile, NULL);
6722 }
6723
mlx5e_destroy_netdev(struct net_device * netdev)6724 void mlx5e_destroy_netdev(struct net_device *netdev)
6725 {
6726 struct mlx5e_priv *priv = netdev_priv(netdev);
6727
6728 if (priv->profile)
6729 mlx5e_priv_cleanup(priv);
6730 free_netdev(netdev);
6731 }
6732
_mlx5e_resume(struct auxiliary_device * adev)6733 static int _mlx5e_resume(struct auxiliary_device *adev)
6734 {
6735 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6736 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
6737 struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
6738 struct net_device *netdev = mlx5e_dev->netdev;
6739 struct mlx5_core_dev *mdev = edev->mdev;
6740 struct mlx5_core_dev *pos, *to;
6741 int err, i;
6742
6743 if (netif_device_present(netdev))
6744 return 0;
6745
6746 mlx5_sd_for_each_dev(i, mdev, pos) {
6747 err = mlx5e_create_mdev_resources(pos, true);
6748 if (err)
6749 goto err_destroy_mdev_res;
6750 }
6751
6752 err = mlx5e_attach_netdev(priv);
6753 if (err)
6754 goto err_destroy_mdev_res;
6755
6756 return 0;
6757
6758 err_destroy_mdev_res:
6759 to = pos;
6760 mlx5_sd_for_each_dev_to(i, mdev, to, pos)
6761 mlx5e_destroy_mdev_resources(pos);
6762 return err;
6763 }
6764
mlx5e_resume(struct auxiliary_device * adev)6765 static int mlx5e_resume(struct auxiliary_device *adev)
6766 {
6767 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6768 struct mlx5_core_dev *mdev = edev->mdev;
6769 struct auxiliary_device *actual_adev;
6770 int err;
6771
6772 err = mlx5_sd_init(mdev);
6773 if (err)
6774 return err;
6775
6776 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6777 if (actual_adev) {
6778 err = _mlx5e_resume(actual_adev);
6779 mlx5_sd_put_adev(actual_adev, adev);
6780 }
6781 return err;
6782 }
6783
_mlx5e_suspend(struct auxiliary_device * adev,bool pre_netdev_reg)6784 static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
6785 {
6786 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6787 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
6788 struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
6789 struct net_device *netdev = mlx5e_dev->netdev;
6790 struct mlx5_core_dev *mdev = edev->mdev;
6791 struct mlx5_core_dev *pos;
6792 int i;
6793
6794 if (!pre_netdev_reg && !netif_device_present(netdev)) {
6795 if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
6796 mlx5_sd_for_each_dev(i, mdev, pos)
6797 mlx5e_destroy_mdev_resources(pos);
6798 return -ENODEV;
6799 }
6800
6801 mlx5e_detach_netdev(priv);
6802 mlx5_sd_for_each_dev(i, mdev, pos)
6803 mlx5e_destroy_mdev_resources(pos);
6804
6805 return 0;
6806 }
6807
mlx5e_suspend(struct auxiliary_device * adev,pm_message_t state)6808 static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
6809 {
6810 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6811 struct mlx5_core_dev *mdev = edev->mdev;
6812 struct auxiliary_device *actual_adev;
6813 int err = 0;
6814
6815 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6816 if (actual_adev)
6817 err = _mlx5e_suspend(actual_adev, false);
6818
6819 mlx5_sd_cleanup(mdev);
6820 if (actual_adev)
6821 mlx5_sd_put_adev(actual_adev, adev);
6822 return err;
6823 }
6824
_mlx5e_probe(struct auxiliary_device * adev)6825 static int _mlx5e_probe(struct auxiliary_device *adev)
6826 {
6827 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6828 const struct mlx5e_profile *profile = &mlx5e_nic_profile;
6829 struct mlx5_core_dev *mdev = edev->mdev;
6830 struct mlx5e_dev *mlx5e_dev;
6831 struct net_device *netdev;
6832 struct mlx5e_priv *priv;
6833 int err;
6834
6835 mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
6836 if (IS_ERR(mlx5e_dev))
6837 return PTR_ERR(mlx5e_dev);
6838 auxiliary_set_drvdata(adev, mlx5e_dev);
6839
6840 err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
6841 if (err) {
6842 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
6843 goto err_devlink_unregister;
6844 }
6845
6846 netdev = mlx5e_create_netdev(mdev, profile);
6847 if (!netdev) {
6848 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
6849 err = -ENOMEM;
6850 goto err_devlink_port_unregister;
6851 }
6852 SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
6853 mlx5e_dev->netdev = netdev;
6854
6855 mlx5e_build_nic_netdev(netdev);
6856
6857 priv = netdev_priv(netdev);
6858
6859 priv->profile = profile;
6860 priv->ppriv = NULL;
6861
6862 err = profile->init(mdev, netdev);
6863 if (err) {
6864 mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
6865 goto err_destroy_netdev;
6866 }
6867
6868 err = _mlx5e_resume(adev);
6869 if (err) {
6870 mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err);
6871 goto err_profile_cleanup;
6872 }
6873
6874 err = register_netdev(netdev);
6875 if (err) {
6876 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
6877 goto err_resume;
6878 }
6879
6880 /* mlx5e_fix_features() returns early when the device is not present
6881 * to avoid dereferencing cleared priv during profile changes.
6882 * This also causes it to be a no-op during register_netdev(), where
6883 * the device is not yet present.
6884 * Trigger an additional features update that will actually work.
6885 */
6886 mlx5e_update_features(netdev);
6887
6888 mlx5e_dcbnl_init_app(priv);
6889 mlx5_core_uplink_netdev_set(mdev, netdev);
6890 mlx5e_params_print_info(mdev, &priv->channels.params);
6891 return 0;
6892
6893 err_resume:
6894 _mlx5e_suspend(adev, true);
6895 err_profile_cleanup:
6896 profile->cleanup(priv);
6897 err_destroy_netdev:
6898 mlx5e_destroy_netdev(netdev);
6899 err_devlink_port_unregister:
6900 mlx5e_devlink_port_unregister(mlx5e_dev);
6901 err_devlink_unregister:
6902 mlx5e_destroy_devlink(mlx5e_dev);
6903 return err;
6904 }
6905
mlx5e_probe(struct auxiliary_device * adev,const struct auxiliary_device_id * id)6906 static int mlx5e_probe(struct auxiliary_device *adev,
6907 const struct auxiliary_device_id *id)
6908 {
6909 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6910 struct mlx5_core_dev *mdev = edev->mdev;
6911 struct auxiliary_device *actual_adev;
6912 int err;
6913
6914 err = mlx5_sd_init(mdev);
6915 if (err)
6916 return err;
6917
6918 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6919 if (actual_adev) {
6920 err = _mlx5e_probe(actual_adev);
6921 if (err)
6922 goto sd_cleanup;
6923 mlx5_sd_put_adev(actual_adev, adev);
6924 }
6925 return 0;
6926
6927 sd_cleanup:
6928 mlx5_sd_cleanup(mdev);
6929 if (actual_adev)
6930 mlx5_sd_put_adev(actual_adev, adev);
6931 return err;
6932 }
6933
_mlx5e_remove(struct auxiliary_device * adev)6934 static void _mlx5e_remove(struct auxiliary_device *adev)
6935 {
6936 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6937 struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
6938 struct net_device *netdev = mlx5e_dev->netdev;
6939 struct mlx5e_priv *priv = netdev_priv(netdev);
6940 struct mlx5_core_dev *mdev = edev->mdev;
6941
6942 mlx5_eswitch_safe_aux_devs_remove(mdev);
6943 mlx5_core_uplink_netdev_set(mdev, NULL);
6944
6945 if (priv->profile)
6946 mlx5e_dcbnl_delete_app(priv);
6947 /* When unload driver, the netdev is in registered state
6948 * if it's from legacy mode. If from switchdev mode, it
6949 * is already unregistered before changing to NIC profile.
6950 */
6951 if (netdev->reg_state == NETREG_REGISTERED) {
6952 unregister_netdev(netdev);
6953 _mlx5e_suspend(adev, false);
6954 } else {
6955 struct mlx5_core_dev *pos;
6956 int i;
6957
6958 if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
6959 mlx5_sd_for_each_dev(i, mdev, pos)
6960 mlx5e_destroy_mdev_resources(pos);
6961 else
6962 _mlx5e_suspend(adev, true);
6963 }
6964 /* Avoid cleanup if profile rollback failed. */
6965 if (priv->profile)
6966 priv->profile->cleanup(priv);
6967 mlx5e_destroy_netdev(netdev);
6968 mlx5e_devlink_port_unregister(mlx5e_dev);
6969 mlx5e_destroy_devlink(mlx5e_dev);
6970 }
6971
mlx5e_remove(struct auxiliary_device * adev)6972 static void mlx5e_remove(struct auxiliary_device *adev)
6973 {
6974 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
6975 struct mlx5_core_dev *mdev = edev->mdev;
6976 struct auxiliary_device *actual_adev;
6977
6978 actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
6979 if (actual_adev)
6980 _mlx5e_remove(actual_adev);
6981
6982 mlx5_sd_cleanup(mdev);
6983 if (actual_adev)
6984 mlx5_sd_put_adev(actual_adev, adev);
6985 }
6986
6987 static const struct auxiliary_device_id mlx5e_id_table[] = {
6988 { .name = MLX5_ADEV_NAME ".eth", },
6989 {},
6990 };
6991
6992 MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
6993
6994 static struct auxiliary_driver mlx5e_driver = {
6995 .name = "eth",
6996 .probe = mlx5e_probe,
6997 .remove = mlx5e_remove,
6998 .suspend = mlx5e_suspend,
6999 .resume = mlx5e_resume,
7000 .id_table = mlx5e_id_table,
7001 };
7002
mlx5e_init(void)7003 int mlx5e_init(void)
7004 {
7005 int ret;
7006
7007 mlx5e_build_ptys2ethtool_map();
7008 ret = auxiliary_driver_register(&mlx5e_driver);
7009 if (ret)
7010 return ret;
7011
7012 ret = mlx5e_rep_init();
7013 if (ret)
7014 auxiliary_driver_unregister(&mlx5e_driver);
7015 return ret;
7016 }
7017
mlx5e_cleanup(void)7018 void mlx5e_cleanup(void)
7019 {
7020 mlx5e_rep_cleanup();
7021 auxiliary_driver_unregister(&mlx5e_driver);
7022 }
7023