xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c (revision a4a35f6cbebbf9466b6c412506ab89299d567f51)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "health.h"
5 #include "en/ptp.h"
6 #include "en/devlink.h"
7 #include "lib/tout.h"
8 
9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
10 static const char * const sq_sw_state_type_name[] = {
11 	[MLX5E_SQ_STATE_ENABLED] = "enabled",
12 	[MLX5E_SQ_STATE_MPWQE] = "mpwqe",
13 	[MLX5E_SQ_STATE_RECOVERING] = "recovering",
14 	[MLX5E_SQ_STATE_IPSEC] = "ipsec",
15 	[MLX5E_SQ_STATE_DIM] = "dim",
16 	[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
17 	[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
18 	[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
19 	[MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
20 };
21 
mlx5e_wait_for_sq_flush(struct mlx5e_txqsq * sq)22 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
23 {
24 	struct mlx5_core_dev *dev = sq->mdev;
25 	unsigned long exp_time;
26 
27 	exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
28 
29 	while (time_before(jiffies, exp_time)) {
30 		if (sq->cc == sq->pc)
31 			return 0;
32 
33 		msleep(20);
34 	}
35 
36 	netdev_err(sq->netdev,
37 		   "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
38 		   sq->sqn, sq->cc, sq->pc);
39 
40 	return -ETIMEDOUT;
41 }
42 
mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq * sq)43 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
44 {
45 	WARN_ONCE(sq->cc != sq->pc,
46 		  "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
47 		  sq->sqn, sq->cc, sq->pc);
48 	sq->cc = 0;
49 	sq->dma_fifo_cc = 0;
50 	sq->pc = 0;
51 }
52 
mlx5e_health_sq_put_sw_state(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq)53 static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
54 {
55 	int i;
56 
57 	BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
58 			 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
59 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
60 
61 	for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i)
62 		devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
63 					  test_bit(i, &sq->state));
64 
65 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
66 }
67 
mlx5e_tx_reporter_err_cqe_recover(void * ctx)68 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
69 {
70 	struct mlx5_core_dev *mdev;
71 	struct net_device *dev;
72 	struct mlx5e_txqsq *sq;
73 	u8 state;
74 	int err;
75 
76 	sq = ctx;
77 	mdev = sq->mdev;
78 	dev = sq->netdev;
79 
80 	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
81 		return 0;
82 
83 	err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
84 	if (err) {
85 		netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
86 			   sq->sqn, err);
87 		goto out;
88 	}
89 
90 	if (state != MLX5_SQC_STATE_ERR)
91 		goto out;
92 
93 	mlx5e_tx_disable_queue(sq->txq);
94 
95 	err = mlx5e_wait_for_sq_flush(sq);
96 	if (err)
97 		goto out;
98 
99 	/* At this point, no new packets will arrive from the stack as TXQ is
100 	 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
101 	 * pending WQEs. SQ can safely reset the SQ.
102 	 */
103 
104 	err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
105 	if (err)
106 		goto out;
107 
108 	mlx5e_reset_txqsq_cc_pc(sq);
109 	sq->stats->recover++;
110 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 	rtnl_lock();
112 	mlx5e_activate_txqsq(sq);
113 	rtnl_unlock();
114 
115 	if (sq->channel)
116 		mlx5e_trigger_napi_icosq(sq->channel);
117 	else
118 		mlx5e_trigger_napi_sched(sq->cq.napi);
119 
120 	return 0;
121 out:
122 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
123 	return err;
124 }
125 
126 struct mlx5e_tx_timeout_ctx {
127 	struct mlx5e_txqsq *sq;
128 	signed int status;
129 };
130 
mlx5e_tx_reporter_timeout_recover(void * ctx)131 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
132 {
133 	struct mlx5e_tx_timeout_ctx *to_ctx;
134 	struct mlx5e_priv *priv;
135 	struct mlx5_eq_comp *eq;
136 	struct mlx5e_txqsq *sq;
137 	int err;
138 
139 	to_ctx = ctx;
140 	sq = to_ctx->sq;
141 	eq = sq->cq.mcq.eq;
142 	priv = sq->priv;
143 	err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
144 	if (!err) {
145 		to_ctx->status = 0; /* this sq recovered */
146 		return err;
147 	}
148 
149 	mutex_lock(&priv->state_lock);
150 	err = mlx5e_safe_reopen_channels(priv);
151 	mutex_unlock(&priv->state_lock);
152 	if (!err) {
153 		to_ctx->status = 1; /* all channels recovered */
154 		return err;
155 	}
156 
157 	to_ctx->status = err;
158 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
159 	netdev_err(priv->netdev,
160 		   "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
161 		   err);
162 
163 	return err;
164 }
165 
mlx5e_tx_reporter_ptpsq_unhealthy_recover(void * ctx)166 static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
167 {
168 	struct mlx5e_ptpsq *ptpsq = ctx;
169 	struct mlx5e_channels *chs;
170 	struct net_device *netdev;
171 	struct mlx5e_priv *priv;
172 	int carrier_ok;
173 	int err;
174 
175 	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
176 		return 0;
177 
178 	priv = ptpsq->txqsq.priv;
179 
180 	mutex_lock(&priv->state_lock);
181 	chs = &priv->channels;
182 	netdev = priv->netdev;
183 
184 	carrier_ok = netif_carrier_ok(netdev);
185 	netif_carrier_off(netdev);
186 
187 	rtnl_lock();
188 	mlx5e_deactivate_priv_channels(priv);
189 	rtnl_unlock();
190 
191 	mlx5e_ptp_close(chs->ptp);
192 	err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
193 
194 	rtnl_lock();
195 	mlx5e_activate_priv_channels(priv);
196 	rtnl_unlock();
197 
198 	/* return carrier back if needed */
199 	if (carrier_ok)
200 		netif_carrier_on(netdev);
201 
202 	mutex_unlock(&priv->state_lock);
203 
204 	return err;
205 }
206 
207 /* state lock cannot be grabbed within this function.
208  * It can cause a dead lock or a read-after-free.
209  */
mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx * err_ctx)210 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
211 {
212 	return err_ctx->recover(err_ctx->ctx);
213 }
214 
mlx5e_tx_reporter_recover(struct devlink_health_reporter * reporter,void * context,struct netlink_ext_ack * extack)215 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
216 				     void *context,
217 				     struct netlink_ext_ack *extack)
218 {
219 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
220 	struct mlx5e_err_ctx *err_ctx = context;
221 
222 	return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
223 			 mlx5e_health_recover_channels(priv);
224 }
225 
226 static void
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)227 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
228 						  struct mlx5e_txqsq *sq, int tc)
229 {
230 	bool stopped = netif_xmit_stopped(sq->txq);
231 	u8 state;
232 	int err;
233 
234 	devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
235 	devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
236 	devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
237 
238 	err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
239 	if (!err)
240 		devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
241 
242 	devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
243 	devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
244 	devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
245 	mlx5e_health_sq_put_sw_state(fmsg, sq);
246 	mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
247 	mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
248 }
249 
250 static void
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)251 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
252 					struct mlx5e_txqsq *sq, int tc)
253 {
254 	devlink_fmsg_obj_nest_start(fmsg);
255 	devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
256 	mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
257 	devlink_fmsg_obj_nest_end(fmsg);
258 }
259 
260 static void
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq,int tc)261 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
262 					      struct mlx5e_ptpsq *ptpsq, int tc)
263 {
264 	devlink_fmsg_obj_nest_start(fmsg);
265 	devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
266 	mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
267 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
268 	mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
269 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
270 	devlink_fmsg_obj_nest_end(fmsg);
271 }
272 
273 static void
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * txqsq)274 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
275 					 struct mlx5e_txqsq *txqsq)
276 {
277 	bool real_time =  mlx5_is_real_time_sq(txqsq->mdev);
278 	u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
279 	u32 sq_stride = MLX5_SEND_WQE_BB;
280 
281 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
282 	devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
283 	devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
284 	devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
285 	mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
286 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
287 }
288 
289 static void
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq)290 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
291 					      struct mlx5e_ptpsq *ptpsq)
292 {
293 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
294 	mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
295 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
296 }
297 
298 static void
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg)299 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
300 					 struct devlink_fmsg *fmsg)
301 {
302 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
303 	struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
304 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
305 	struct mlx5e_ptpsq *generic_ptpsq;
306 
307 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
308 	mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
309 
310 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
311 		goto out;
312 
313 	generic_ptpsq = &ptp_ch->ptpsq[0];
314 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
315 	mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
316 	mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
317 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
318 out:
319 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
320 }
321 
mlx5e_tx_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)322 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
323 				      struct devlink_fmsg *fmsg,
324 				      struct netlink_ext_ack *extack)
325 {
326 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
327 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
328 
329 	int i, tc;
330 
331 	mutex_lock(&priv->state_lock);
332 
333 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
334 		goto unlock;
335 
336 	mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
337 	devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
338 
339 	for (i = 0; i < priv->channels.num; i++) {
340 		struct mlx5e_channel *c = priv->channels.c[i];
341 
342 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
343 			struct mlx5e_txqsq *sq = &c->sq[tc];
344 
345 			mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
346 		}
347 	}
348 
349 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
350 		goto close_sqs_nest;
351 
352 	for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++)
353 		mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
354 							      &ptp_ch->ptpsq[tc],
355 							      tc);
356 
357 close_sqs_nest:
358 	devlink_fmsg_arr_pair_nest_end(fmsg);
359 unlock:
360 	mutex_unlock(&priv->state_lock);
361 	return 0;
362 }
363 
mlx5e_tx_reporter_dump_sq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)364 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
365 				     void *ctx)
366 {
367 	struct mlx5_rsc_key key = {};
368 	struct mlx5e_txqsq *sq = ctx;
369 
370 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
371 		return 0;
372 
373 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
374 	key.size = PAGE_SIZE;
375 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
376 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
377 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
378 
379 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
380 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
381 	key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
382 	key.index1 = sq->sqn;
383 	key.num_of_obj1 = 1;
384 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
385 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
386 
387 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
388 	key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
389 	key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
390 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
391 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
392 
393 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
394 
395 	return 0;
396 }
397 
mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)398 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
399 					  void *ctx)
400 {
401 	struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
402 
403 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
404 }
405 
mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)406 static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
407 						  struct devlink_fmsg *fmsg,
408 						  void *ctx)
409 {
410 	struct mlx5e_ptpsq *ptpsq = ctx;
411 
412 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
413 }
414 
mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg)415 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
416 					  struct devlink_fmsg *fmsg)
417 {
418 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
419 	struct mlx5_rsc_key key = {};
420 	int i, tc;
421 
422 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
423 		return 0;
424 
425 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
426 	key.size = PAGE_SIZE;
427 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
428 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
429 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
430 	devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
431 
432 	for (i = 0; i < priv->channels.num; i++) {
433 		struct mlx5e_channel *c = priv->channels.c[i];
434 
435 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
436 			struct mlx5e_txqsq *sq = &c->sq[tc];
437 
438 			mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
439 		}
440 	}
441 
442 	if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
443 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
444 			struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
445 
446 			mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
447 		}
448 	}
449 
450 	devlink_fmsg_arr_pair_nest_end(fmsg);
451 	return 0;
452 }
453 
mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv * priv,struct mlx5e_err_ctx * err_ctx,struct devlink_fmsg * fmsg)454 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
455 					   struct mlx5e_err_ctx *err_ctx,
456 					   struct devlink_fmsg *fmsg)
457 {
458 	return err_ctx->dump(priv, fmsg, err_ctx->ctx);
459 }
460 
mlx5e_tx_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * context,struct netlink_ext_ack * extack)461 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
462 				  struct devlink_fmsg *fmsg, void *context,
463 				  struct netlink_ext_ack *extack)
464 {
465 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
466 	struct mlx5e_err_ctx *err_ctx = context;
467 
468 	return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
469 			 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
470 }
471 
mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq * sq)472 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
473 {
474 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
475 	struct mlx5e_priv *priv = sq->priv;
476 	struct mlx5e_err_ctx err_ctx = {};
477 
478 	err_ctx.ctx = sq;
479 	err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
480 	err_ctx.dump = mlx5e_tx_reporter_dump_sq;
481 	snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
482 
483 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
484 }
485 
mlx5e_reporter_tx_timeout(struct mlx5e_txqsq * sq)486 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
487 {
488 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
489 	struct mlx5e_tx_timeout_ctx to_ctx = {};
490 	struct mlx5e_priv *priv = sq->priv;
491 	struct mlx5e_err_ctx err_ctx = {};
492 
493 	to_ctx.sq = sq;
494 	err_ctx.ctx = &to_ctx;
495 	err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
496 	err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
497 	snprintf(err_str, sizeof(err_str),
498 		 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
499 		 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
500 		 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
501 
502 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
503 	return to_ctx.status;
504 }
505 
mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq * ptpsq)506 void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
507 {
508 	struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
509 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
510 	struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
511 	struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
512 	struct mlx5e_priv *priv = txqsq->priv;
513 	struct mlx5e_err_ctx err_ctx = {};
514 
515 	err_ctx.ctx = ptpsq;
516 	err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
517 	err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
518 	snprintf(err_str, sizeof(err_str),
519 		 "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
520 		 txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
521 
522 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
523 }
524 
525 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
526 		.name = "tx",
527 		.recover = mlx5e_tx_reporter_recover,
528 		.diagnose = mlx5e_tx_reporter_diagnose,
529 		.dump = mlx5e_tx_reporter_dump,
530 };
531 
532 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
533 
mlx5e_reporter_tx_create(struct mlx5e_priv * priv)534 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
535 {
536 	struct devlink_health_reporter *reporter;
537 
538 	reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
539 						       &mlx5_tx_reporter_ops,
540 						       MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
541 	if (IS_ERR(reporter)) {
542 		netdev_warn(priv->netdev,
543 			    "Failed to create tx reporter, err = %ld\n",
544 			    PTR_ERR(reporter));
545 		return;
546 	}
547 	priv->tx_reporter = reporter;
548 }
549 
mlx5e_reporter_tx_destroy(struct mlx5e_priv * priv)550 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
551 {
552 	if (!priv->tx_reporter)
553 		return;
554 
555 	devlink_health_reporter_destroy(priv->tx_reporter);
556 	priv->tx_reporter = NULL;
557 }
558