xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "health.h"
5 #include "en/ptp.h"
6 #include "en/devlink.h"
7 #include "lib/tout.h"
8 
9 /* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
10 static const char * const sq_sw_state_type_name[] = {
11 	[MLX5E_SQ_STATE_ENABLED] = "enabled",
12 	[MLX5E_SQ_STATE_MPWQE] = "mpwqe",
13 	[MLX5E_SQ_STATE_RECOVERING] = "recovering",
14 	[MLX5E_SQ_STATE_IPSEC] = "ipsec",
15 	[MLX5E_SQ_STATE_DIM] = "dim",
16 	[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
17 	[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
18 	[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
19 };
20 
mlx5e_wait_for_sq_flush(struct mlx5e_txqsq * sq)21 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
22 {
23 	struct mlx5_core_dev *dev = sq->mdev;
24 	unsigned long exp_time;
25 
26 	exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
27 
28 	while (time_before(jiffies, exp_time)) {
29 		if (sq->cc == sq->pc)
30 			return 0;
31 
32 		msleep(20);
33 	}
34 
35 	netdev_err(sq->netdev,
36 		   "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
37 		   sq->sqn, sq->cc, sq->pc);
38 
39 	return -ETIMEDOUT;
40 }
41 
mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq * sq)42 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
43 {
44 	WARN_ONCE(sq->cc != sq->pc,
45 		  "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
46 		  sq->sqn, sq->cc, sq->pc);
47 	sq->cc = 0;
48 	sq->dma_fifo_cc = 0;
49 	sq->pc = 0;
50 }
51 
mlx5e_health_sq_put_sw_state(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq)52 static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
53 {
54 	int i;
55 
56 	BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
57 			 "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
58 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
59 
60 	for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i)
61 		devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
62 					  test_bit(i, &sq->state));
63 
64 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
65 }
66 
mlx5e_tx_reporter_err_cqe_recover(void * ctx)67 static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
68 {
69 	struct mlx5_core_dev *mdev;
70 	struct net_device *dev;
71 	struct mlx5e_txqsq *sq;
72 	u8 state;
73 	int err;
74 
75 	sq = ctx;
76 	mdev = sq->mdev;
77 	dev = sq->netdev;
78 
79 	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
80 		return 0;
81 
82 	err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
83 	if (err) {
84 		netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 			   sq->sqn, err);
86 		goto out;
87 	}
88 
89 	if (state != MLX5_SQC_STATE_ERR)
90 		goto out;
91 
92 	mlx5e_tx_disable_queue(sq->txq);
93 
94 	err = mlx5e_wait_for_sq_flush(sq);
95 	if (err)
96 		goto out;
97 
98 	/* At this point, no new packets will arrive from the stack as TXQ is
99 	 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
100 	 * pending WQEs. SQ can safely reset the SQ.
101 	 */
102 
103 	err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn);
104 	if (err)
105 		goto out;
106 
107 	mlx5e_reset_txqsq_cc_pc(sq);
108 	sq->stats->recover++;
109 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
110 	rtnl_lock();
111 	mlx5e_activate_txqsq(sq);
112 	rtnl_unlock();
113 
114 	if (sq->channel)
115 		mlx5e_trigger_napi_icosq(sq->channel);
116 	else
117 		mlx5e_trigger_napi_sched(sq->cq.napi);
118 
119 	return 0;
120 out:
121 	clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
122 	return err;
123 }
124 
125 struct mlx5e_tx_timeout_ctx {
126 	struct mlx5e_txqsq *sq;
127 	signed int status;
128 };
129 
mlx5e_tx_reporter_timeout_recover(void * ctx)130 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
131 {
132 	struct mlx5e_tx_timeout_ctx *to_ctx;
133 	struct mlx5e_priv *priv;
134 	struct mlx5_eq_comp *eq;
135 	struct mlx5e_txqsq *sq;
136 	int err;
137 
138 	to_ctx = ctx;
139 	sq = to_ctx->sq;
140 	eq = sq->cq.mcq.eq;
141 	priv = sq->priv;
142 	err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
143 	if (!err) {
144 		to_ctx->status = 0; /* this sq recovered */
145 		return err;
146 	}
147 
148 	mutex_lock(&priv->state_lock);
149 	err = mlx5e_safe_reopen_channels(priv);
150 	mutex_unlock(&priv->state_lock);
151 	if (!err) {
152 		to_ctx->status = 1; /* all channels recovered */
153 		return err;
154 	}
155 
156 	to_ctx->status = err;
157 	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
158 	netdev_err(priv->netdev,
159 		   "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
160 		   err);
161 
162 	return err;
163 }
164 
mlx5e_tx_reporter_ptpsq_unhealthy_recover(void * ctx)165 static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
166 {
167 	struct mlx5e_ptpsq *ptpsq = ctx;
168 	struct mlx5e_channels *chs;
169 	struct net_device *netdev;
170 	struct mlx5e_priv *priv;
171 	int carrier_ok;
172 	int err;
173 
174 	if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
175 		return 0;
176 
177 	priv = ptpsq->txqsq.priv;
178 
179 	mutex_lock(&priv->state_lock);
180 	chs = &priv->channels;
181 	netdev = priv->netdev;
182 
183 	carrier_ok = netif_carrier_ok(netdev);
184 	netif_carrier_off(netdev);
185 
186 	rtnl_lock();
187 	mlx5e_deactivate_priv_channels(priv);
188 	rtnl_unlock();
189 
190 	mlx5e_ptp_close(chs->ptp);
191 	err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
192 
193 	rtnl_lock();
194 	mlx5e_activate_priv_channels(priv);
195 	rtnl_unlock();
196 
197 	/* return carrier back if needed */
198 	if (carrier_ok)
199 		netif_carrier_on(netdev);
200 
201 	mutex_unlock(&priv->state_lock);
202 
203 	return err;
204 }
205 
206 /* state lock cannot be grabbed within this function.
207  * It can cause a dead lock or a read-after-free.
208  */
mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx * err_ctx)209 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
210 {
211 	return err_ctx->recover(err_ctx->ctx);
212 }
213 
mlx5e_tx_reporter_recover(struct devlink_health_reporter * reporter,void * context,struct netlink_ext_ack * extack)214 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
215 				     void *context,
216 				     struct netlink_ext_ack *extack)
217 {
218 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
219 	struct mlx5e_err_ctx *err_ctx = context;
220 
221 	return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
222 			 mlx5e_health_recover_channels(priv);
223 }
224 
225 static void
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)226 mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
227 						  struct mlx5e_txqsq *sq, int tc)
228 {
229 	bool stopped = netif_xmit_stopped(sq->txq);
230 	u8 state;
231 	int err;
232 
233 	devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
234 	devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
235 	devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
236 
237 	err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
238 	if (!err)
239 		devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
240 
241 	devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
242 	devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
243 	devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
244 	mlx5e_health_sq_put_sw_state(fmsg, sq);
245 	mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
246 	mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
247 }
248 
249 static void
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * sq,int tc)250 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
251 					struct mlx5e_txqsq *sq, int tc)
252 {
253 	devlink_fmsg_obj_nest_start(fmsg);
254 	devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
255 	mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
256 	devlink_fmsg_obj_nest_end(fmsg);
257 }
258 
259 static void
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq,int tc)260 mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
261 					      struct mlx5e_ptpsq *ptpsq, int tc)
262 {
263 	devlink_fmsg_obj_nest_start(fmsg);
264 	devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
265 	mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
266 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
267 	mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
268 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
269 	devlink_fmsg_obj_nest_end(fmsg);
270 }
271 
272 static void
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg * fmsg,struct mlx5e_txqsq * txqsq)273 mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
274 					 struct mlx5e_txqsq *txqsq)
275 {
276 	bool real_time =  mlx5_is_real_time_sq(txqsq->mdev);
277 	u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
278 	u32 sq_stride = MLX5_SEND_WQE_BB;
279 
280 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
281 	devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
282 	devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
283 	devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
284 	mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
285 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
286 }
287 
288 static void
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg * fmsg,struct mlx5e_ptpsq * ptpsq)289 mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
290 					      struct mlx5e_ptpsq *ptpsq)
291 {
292 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
293 	mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
294 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
295 }
296 
297 static void
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg)298 mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
299 					 struct devlink_fmsg *fmsg)
300 {
301 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
302 	struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
303 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
304 	struct mlx5e_ptpsq *generic_ptpsq;
305 
306 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
307 	mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
308 
309 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
310 		goto out;
311 
312 	generic_ptpsq = &ptp_ch->ptpsq[0];
313 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
314 	mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
315 	mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
316 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
317 out:
318 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
319 }
320 
mlx5e_tx_reporter_diagnose(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,struct netlink_ext_ack * extack)321 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
322 				      struct devlink_fmsg *fmsg,
323 				      struct netlink_ext_ack *extack)
324 {
325 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
326 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
327 
328 	int i, tc;
329 
330 	mutex_lock(&priv->state_lock);
331 
332 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
333 		goto unlock;
334 
335 	mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
336 	devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
337 
338 	for (i = 0; i < priv->channels.num; i++) {
339 		struct mlx5e_channel *c = priv->channels.c[i];
340 
341 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
342 			struct mlx5e_txqsq *sq = &c->sq[tc];
343 
344 			mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
345 		}
346 	}
347 
348 	if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
349 		goto close_sqs_nest;
350 
351 	for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++)
352 		mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
353 							      &ptp_ch->ptpsq[tc],
354 							      tc);
355 
356 close_sqs_nest:
357 	devlink_fmsg_arr_pair_nest_end(fmsg);
358 unlock:
359 	mutex_unlock(&priv->state_lock);
360 	return 0;
361 }
362 
mlx5e_tx_reporter_dump_sq(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)363 static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
364 				     void *ctx)
365 {
366 	struct mlx5_rsc_key key = {};
367 	struct mlx5e_txqsq *sq = ctx;
368 
369 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
370 		return 0;
371 
372 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
373 	key.size = PAGE_SIZE;
374 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
375 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
376 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
377 
378 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
379 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
380 	key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
381 	key.index1 = sq->sqn;
382 	key.num_of_obj1 = 1;
383 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
384 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
385 
386 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
387 	key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
388 	key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
389 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
390 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
391 
392 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
393 
394 	return 0;
395 }
396 
mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)397 static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
398 					  void *ctx)
399 {
400 	struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
401 
402 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
403 }
404 
mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg,void * ctx)405 static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
406 						  struct devlink_fmsg *fmsg,
407 						  void *ctx)
408 {
409 	struct mlx5e_ptpsq *ptpsq = ctx;
410 
411 	return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
412 }
413 
mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv * priv,struct devlink_fmsg * fmsg)414 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
415 					  struct devlink_fmsg *fmsg)
416 {
417 	struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
418 	struct mlx5_rsc_key key = {};
419 	int i, tc;
420 
421 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
422 		return 0;
423 
424 	mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
425 	key.size = PAGE_SIZE;
426 	key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
427 	mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
428 	mlx5e_health_fmsg_named_obj_nest_end(fmsg);
429 	devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
430 
431 	for (i = 0; i < priv->channels.num; i++) {
432 		struct mlx5e_channel *c = priv->channels.c[i];
433 
434 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
435 			struct mlx5e_txqsq *sq = &c->sq[tc];
436 
437 			mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
438 		}
439 	}
440 
441 	if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
442 		for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
443 			struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
444 
445 			mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
446 		}
447 	}
448 
449 	devlink_fmsg_arr_pair_nest_end(fmsg);
450 	return 0;
451 }
452 
mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv * priv,struct mlx5e_err_ctx * err_ctx,struct devlink_fmsg * fmsg)453 static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
454 					   struct mlx5e_err_ctx *err_ctx,
455 					   struct devlink_fmsg *fmsg)
456 {
457 	return err_ctx->dump(priv, fmsg, err_ctx->ctx);
458 }
459 
mlx5e_tx_reporter_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * context,struct netlink_ext_ack * extack)460 static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
461 				  struct devlink_fmsg *fmsg, void *context,
462 				  struct netlink_ext_ack *extack)
463 {
464 	struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
465 	struct mlx5e_err_ctx *err_ctx = context;
466 
467 	return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
468 			 mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
469 }
470 
mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq * sq)471 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
472 {
473 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
474 	struct mlx5e_priv *priv = sq->priv;
475 	struct mlx5e_err_ctx err_ctx = {};
476 
477 	err_ctx.ctx = sq;
478 	err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
479 	err_ctx.dump = mlx5e_tx_reporter_dump_sq;
480 	snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
481 
482 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
483 }
484 
mlx5e_reporter_tx_timeout(struct mlx5e_txqsq * sq)485 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
486 {
487 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
488 	struct mlx5e_tx_timeout_ctx to_ctx = {};
489 	struct mlx5e_priv *priv = sq->priv;
490 	struct mlx5e_err_ctx err_ctx = {};
491 
492 	to_ctx.sq = sq;
493 	err_ctx.ctx = &to_ctx;
494 	err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
495 	err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
496 	snprintf(err_str, sizeof(err_str),
497 		 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
498 		 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
499 		 jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
500 
501 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
502 	return to_ctx.status;
503 }
504 
mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq * ptpsq)505 void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
506 {
507 	struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
508 	char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
509 	struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
510 	struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
511 	struct mlx5e_priv *priv = txqsq->priv;
512 	struct mlx5e_err_ctx err_ctx = {};
513 
514 	err_ctx.ctx = ptpsq;
515 	err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
516 	err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
517 	snprintf(err_str, sizeof(err_str),
518 		 "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
519 		 txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
520 
521 	mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
522 }
523 
524 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
525 		.name = "tx",
526 		.recover = mlx5e_tx_reporter_recover,
527 		.diagnose = mlx5e_tx_reporter_diagnose,
528 		.dump = mlx5e_tx_reporter_dump,
529 };
530 
531 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
532 
mlx5e_reporter_tx_create(struct mlx5e_priv * priv)533 void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
534 {
535 	struct devlink_health_reporter *reporter;
536 
537 	reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
538 						       &mlx5_tx_reporter_ops,
539 						       MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
540 	if (IS_ERR(reporter)) {
541 		netdev_warn(priv->netdev,
542 			    "Failed to create tx reporter, err = %ld\n",
543 			    PTR_ERR(reporter));
544 		return;
545 	}
546 	priv->tx_reporter = reporter;
547 }
548 
mlx5e_reporter_tx_destroy(struct mlx5e_priv * priv)549 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
550 {
551 	if (!priv->tx_reporter)
552 		return;
553 
554 	devlink_health_reporter_destroy(priv->tx_reporter);
555 	priv->tx_reporter = NULL;
556 }
557