xref: /linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c (revision a4989fa91110508b64eea7ccde63d062113988ff)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020 Pengutronix,
6 //                          Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/pm_runtime.h>
23 
24 #include <asm/unaligned.h>
25 
26 #include "mcp251xfd.h"
27 
28 #define DEVICE_NAME "mcp251xfd"
29 
30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
31 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
32 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 		MCP251XFD_QUIRK_ECC,
34 	.model = MCP251XFD_MODEL_MCP2517FD,
35 };
36 
37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
38 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
39 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
40 	.model = MCP251XFD_MODEL_MCP2518FD,
41 };
42 
43 /* Autodetect model, start with CRC enabled. */
44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
45 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
46 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
47 	.model = MCP251XFD_MODEL_MCP251XFD,
48 };
49 
50 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
51 	.name = DEVICE_NAME,
52 	.tseg1_min = 2,
53 	.tseg1_max = 256,
54 	.tseg2_min = 1,
55 	.tseg2_max = 128,
56 	.sjw_max = 128,
57 	.brp_min = 1,
58 	.brp_max = 256,
59 	.brp_inc = 1,
60 };
61 
62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
63 	.name = DEVICE_NAME,
64 	.tseg1_min = 1,
65 	.tseg1_max = 32,
66 	.tseg2_min = 1,
67 	.tseg2_max = 16,
68 	.sjw_max = 16,
69 	.brp_min = 1,
70 	.brp_max = 256,
71 	.brp_inc = 1,
72 };
73 
74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
75 {
76 	switch (model) {
77 	case MCP251XFD_MODEL_MCP2517FD:
78 		return "MCP2517FD";
79 	case MCP251XFD_MODEL_MCP2518FD:
80 		return "MCP2518FD";
81 	case MCP251XFD_MODEL_MCP251XFD:
82 		return "MCP251xFD";
83 	}
84 
85 	return "<unknown>";
86 }
87 
88 static inline const char *
89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 {
91 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
92 }
93 
94 static const char *mcp251xfd_get_mode_str(const u8 mode)
95 {
96 	switch (mode) {
97 	case MCP251XFD_REG_CON_MODE_MIXED:
98 		return "Mixed (CAN FD/CAN 2.0)";
99 	case MCP251XFD_REG_CON_MODE_SLEEP:
100 		return "Sleep";
101 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
102 		return "Internal Loopback";
103 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
104 		return "Listen Only";
105 	case MCP251XFD_REG_CON_MODE_CONFIG:
106 		return "Configuration";
107 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
108 		return "External Loopback";
109 	case MCP251XFD_REG_CON_MODE_CAN2_0:
110 		return "CAN 2.0";
111 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
112 		return "Restricted Operation";
113 	}
114 
115 	return "<unknown>";
116 }
117 
118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
119 {
120 	if (!priv->reg_vdd)
121 		return 0;
122 
123 	return regulator_enable(priv->reg_vdd);
124 }
125 
126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
127 {
128 	if (!priv->reg_vdd)
129 		return 0;
130 
131 	return regulator_disable(priv->reg_vdd);
132 }
133 
134 static inline int
135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 {
137 	if (!priv->reg_xceiver)
138 		return 0;
139 
140 	return regulator_enable(priv->reg_xceiver);
141 }
142 
143 static inline int
144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 {
146 	if (!priv->reg_xceiver)
147 		return 0;
148 
149 	return regulator_disable(priv->reg_xceiver);
150 }
151 
152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
153 {
154 	int err;
155 
156 	err = clk_prepare_enable(priv->clk);
157 	if (err)
158 		return err;
159 
160 	err = mcp251xfd_vdd_enable(priv);
161 	if (err)
162 		clk_disable_unprepare(priv->clk);
163 
164 	/* Wait for oscillator stabilisation time after power up */
165 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
166 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
167 
168 	return err;
169 }
170 
171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
172 {
173 	int err;
174 
175 	err = mcp251xfd_vdd_disable(priv);
176 	if (err)
177 		return err;
178 
179 	clk_disable_unprepare(priv->clk);
180 
181 	return 0;
182 }
183 
184 static inline u8
185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
186 				union mcp251xfd_write_reg_buf *write_reg_buf,
187 				const u16 reg, const u32 mask, const u32 val)
188 {
189 	u8 first_byte, last_byte, len;
190 	u8 *data;
191 	__le32 val_le32;
192 
193 	first_byte = mcp251xfd_first_byte_set(mask);
194 	last_byte = mcp251xfd_last_byte_set(mask);
195 	len = last_byte - first_byte + 1;
196 
197 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
198 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
199 	memcpy(data, &val_le32, len);
200 
201 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
202 		u16 crc;
203 
204 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
205 						     len);
206 		/* CRC */
207 		len += sizeof(write_reg_buf->crc.cmd);
208 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
209 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
210 
211 		/* Total length */
212 		len += sizeof(write_reg_buf->crc.crc);
213 	} else {
214 		len += sizeof(write_reg_buf->nocrc.cmd);
215 	}
216 
217 	return len;
218 }
219 
220 static inline int
221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
222 				 u8 *tef_tail)
223 {
224 	u32 tef_ua;
225 	int err;
226 
227 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
228 	if (err)
229 		return err;
230 
231 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
232 
233 	return 0;
234 }
235 
236 static inline int
237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
238 				u8 *tx_tail)
239 {
240 	u32 fifo_sta;
241 	int err;
242 
243 	err = regmap_read(priv->map_reg,
244 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
245 			  &fifo_sta);
246 	if (err)
247 		return err;
248 
249 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
250 
251 	return 0;
252 }
253 
254 static inline int
255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
256 				const struct mcp251xfd_rx_ring *ring,
257 				u8 *rx_head)
258 {
259 	u32 fifo_sta;
260 	int err;
261 
262 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
263 			  &fifo_sta);
264 	if (err)
265 		return err;
266 
267 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
268 
269 	return 0;
270 }
271 
272 static inline int
273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
274 				const struct mcp251xfd_rx_ring *ring,
275 				u8 *rx_tail)
276 {
277 	u32 fifo_ua;
278 	int err;
279 
280 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
281 			  &fifo_ua);
282 	if (err)
283 		return err;
284 
285 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
286 	*rx_tail = fifo_ua / ring->obj_size;
287 
288 	return 0;
289 }
290 
291 static void
292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
293 			      const struct mcp251xfd_tx_ring *ring,
294 			      struct mcp251xfd_tx_obj *tx_obj,
295 			      const u8 rts_buf_len,
296 			      const u8 n)
297 {
298 	struct spi_transfer *xfer;
299 	u16 addr;
300 
301 	/* FIFO load */
302 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
303 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
304 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
305 						     addr);
306 	else
307 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
308 					      addr);
309 
310 	xfer = &tx_obj->xfer[0];
311 	xfer->tx_buf = &tx_obj->buf;
312 	xfer->len = 0;	/* actual len is assigned on the fly */
313 	xfer->cs_change = 1;
314 	xfer->cs_change_delay.value = 0;
315 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316 
317 	/* FIFO request to send */
318 	xfer = &tx_obj->xfer[1];
319 	xfer->tx_buf = &ring->rts_buf;
320 	xfer->len = rts_buf_len;
321 
322 	/* SPI message */
323 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
324 					ARRAY_SIZE(tx_obj->xfer));
325 }
326 
327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 {
329 	struct mcp251xfd_tx_ring *tx_ring;
330 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
331 	struct mcp251xfd_tx_obj *tx_obj;
332 	u32 val;
333 	u16 addr;
334 	u8 len;
335 	int i;
336 
337 	/* TEF */
338 	priv->tef.head = 0;
339 	priv->tef.tail = 0;
340 
341 	/* TX */
342 	tx_ring = priv->tx;
343 	tx_ring->head = 0;
344 	tx_ring->tail = 0;
345 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
346 
347 	/* FIFO request to send */
348 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
349 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
350 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
351 					      addr, val, val);
352 
353 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
354 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
355 
356 	/* RX */
357 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
358 		rx_ring->head = 0;
359 		rx_ring->tail = 0;
360 		rx_ring->nr = i;
361 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
362 
363 		if (!prev_rx_ring)
364 			rx_ring->base =
365 				mcp251xfd_get_tx_obj_addr(tx_ring,
366 							  tx_ring->obj_num);
367 		else
368 			rx_ring->base = prev_rx_ring->base +
369 				prev_rx_ring->obj_size *
370 				prev_rx_ring->obj_num;
371 
372 		prev_rx_ring = rx_ring;
373 	}
374 }
375 
376 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
377 {
378 	int i;
379 
380 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
381 		kfree(priv->rx[i]);
382 		priv->rx[i] = NULL;
383 	}
384 }
385 
386 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
387 {
388 	struct mcp251xfd_tx_ring *tx_ring;
389 	struct mcp251xfd_rx_ring *rx_ring;
390 	int tef_obj_size, tx_obj_size, rx_obj_size;
391 	int tx_obj_num;
392 	int ram_free, i;
393 
394 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
395 	/* listen-only mode works like FD mode */
396 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
397 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
398 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
399 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
400 	} else {
401 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
402 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
403 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
404 	}
405 
406 	tx_ring = priv->tx;
407 	tx_ring->obj_num = tx_obj_num;
408 	tx_ring->obj_size = tx_obj_size;
409 
410 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
411 		(tef_obj_size + tx_obj_size);
412 
413 	for (i = 0;
414 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
415 	     i++) {
416 		int rx_obj_num;
417 
418 		rx_obj_num = ram_free / rx_obj_size;
419 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
420 
421 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
422 				  GFP_KERNEL);
423 		if (!rx_ring) {
424 			mcp251xfd_ring_free(priv);
425 			return -ENOMEM;
426 		}
427 		rx_ring->obj_num = rx_obj_num;
428 		rx_ring->obj_size = rx_obj_size;
429 		priv->rx[i] = rx_ring;
430 
431 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
432 	}
433 	priv->rx_ring_num = i;
434 
435 	netdev_dbg(priv->ndev,
436 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
437 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
438 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
439 
440 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
441 		netdev_dbg(priv->ndev,
442 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
443 			   i, rx_ring->obj_num, rx_ring->obj_size,
444 			   rx_ring->obj_size * rx_ring->obj_num);
445 	}
446 
447 	netdev_dbg(priv->ndev,
448 		   "FIFO setup: free: %d bytes\n",
449 		   ram_free);
450 
451 	return 0;
452 }
453 
454 static inline int
455 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
456 {
457 	u32 val;
458 	int err;
459 
460 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
461 	if (err)
462 		return err;
463 
464 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
465 
466 	return 0;
467 }
468 
469 static int
470 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
471 			  const u8 mode_req, bool nowait)
472 {
473 	u32 con, con_reqop;
474 	int err;
475 
476 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
477 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
478 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
479 	if (err)
480 		return err;
481 
482 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
483 		return 0;
484 
485 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
486 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
487 						 con) == mode_req,
488 				       MCP251XFD_POLL_SLEEP_US,
489 				       MCP251XFD_POLL_TIMEOUT_US);
490 	if (err) {
491 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
492 
493 		netdev_err(priv->ndev,
494 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
495 			   mcp251xfd_get_mode_str(mode_req), mode_req,
496 			   mcp251xfd_get_mode_str(mode), mode);
497 		return err;
498 	}
499 
500 	return 0;
501 }
502 
503 static inline int
504 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
505 			const u8 mode_req)
506 {
507 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
508 }
509 
510 static inline int
511 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
512 			       const u8 mode_req)
513 {
514 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
515 }
516 
517 static inline bool mcp251xfd_osc_invalid(u32 reg)
518 {
519 	return reg == 0x0 || reg == 0xffffffff;
520 }
521 
522 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
523 {
524 	u32 osc, osc_reference, osc_mask;
525 	int err;
526 
527 	/* Set Power On Defaults for "Clock Output Divisor" and remove
528 	 * "Oscillator Disable" bit.
529 	 */
530 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
531 			 MCP251XFD_REG_OSC_CLKODIV_10);
532 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
533 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
534 
535 	/* Note:
536 	 *
537 	 * If the controller is in Sleep Mode the following write only
538 	 * removes the "Oscillator Disable" bit and powers it up. All
539 	 * other bits are unaffected.
540 	 */
541 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
542 	if (err)
543 		return err;
544 
545 	/* Wait for "Oscillator Ready" bit */
546 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
547 				       (osc & osc_mask) == osc_reference,
548 				       MCP251XFD_OSC_STAB_SLEEP_US,
549 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
550 	if (mcp251xfd_osc_invalid(osc)) {
551 		netdev_err(priv->ndev,
552 			   "Failed to detect %s (osc=0x%08x).\n",
553 			   mcp251xfd_get_model_str(priv), osc);
554 		return -ENODEV;
555 	} else if (err == -ETIMEDOUT) {
556 		netdev_err(priv->ndev,
557 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
558 			   osc, osc_reference);
559 		return -ETIMEDOUT;
560 	} else if (err) {
561 		return err;
562 	}
563 
564 	return 0;
565 }
566 
567 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
568 {
569 	const __be16 cmd = mcp251xfd_cmd_reset();
570 	int err;
571 
572 	/* The Set Mode and SPI Reset command only seems to works if
573 	 * the controller is not in Sleep Mode.
574 	 */
575 	err = mcp251xfd_chip_clock_enable(priv);
576 	if (err)
577 		return err;
578 
579 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
580 	if (err)
581 		return err;
582 
583 	/* spi_write_then_read() works with non DMA-safe buffers */
584 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
585 }
586 
587 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
588 {
589 	u32 osc, osc_reference;
590 	u8 mode;
591 	int err;
592 
593 	err = mcp251xfd_chip_get_mode(priv, &mode);
594 	if (err)
595 		return err;
596 
597 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
598 		netdev_info(priv->ndev,
599 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
600 			    mcp251xfd_get_mode_str(mode), mode);
601 		return -ETIMEDOUT;
602 	}
603 
604 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
605 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
606 			   MCP251XFD_REG_OSC_CLKODIV_10);
607 
608 	/* check reset defaults of OSC reg */
609 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
610 	if (err)
611 		return err;
612 
613 	if (osc != osc_reference) {
614 		netdev_info(priv->ndev,
615 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
616 			    osc, osc_reference);
617 		return -ETIMEDOUT;
618 	}
619 
620 	return 0;
621 }
622 
623 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
624 {
625 	int err, i;
626 
627 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
628 		if (i)
629 			netdev_info(priv->ndev,
630 				    "Retrying to reset Controller.\n");
631 
632 		err = mcp251xfd_chip_softreset_do(priv);
633 		if (err == -ETIMEDOUT)
634 			continue;
635 		if (err)
636 			return err;
637 
638 		err = mcp251xfd_chip_softreset_check(priv);
639 		if (err == -ETIMEDOUT)
640 			continue;
641 		if (err)
642 			return err;
643 
644 		return 0;
645 	}
646 
647 	return err;
648 }
649 
650 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
651 {
652 	u32 osc;
653 	int err;
654 
655 	/* Activate Low Power Mode on Oscillator Disable. This only
656 	 * works on the MCP2518FD. The MCP2517FD will go into normal
657 	 * Sleep Mode instead.
658 	 */
659 	osc = MCP251XFD_REG_OSC_LPMEN |
660 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
661 			   MCP251XFD_REG_OSC_CLKODIV_10);
662 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
663 	if (err)
664 		return err;
665 
666 	/* Set Time Base Counter Prescaler to 1.
667 	 *
668 	 * This means an overflow of the 32 bit Time Base Counter
669 	 * register at 40 MHz every 107 seconds.
670 	 */
671 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
672 			    MCP251XFD_REG_TSCON_TBCEN);
673 }
674 
675 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
676 {
677 	const struct can_bittiming *bt = &priv->can.bittiming;
678 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
679 	u32 val = 0;
680 	s8 tdco;
681 	int err;
682 
683 	/* CAN Control Register
684 	 *
685 	 * - no transmit bandwidth sharing
686 	 * - config mode
687 	 * - disable transmit queue
688 	 * - store in transmit FIFO event
689 	 * - transition to restricted operation mode on system error
690 	 * - ESI is transmitted recessive when ESI of message is high or
691 	 *   CAN controller error passive
692 	 * - restricted retransmission attempts,
693 	 *   use TQXCON_TXAT and FIFOCON_TXAT
694 	 * - wake-up filter bits T11FILTER
695 	 * - use CAN bus line filter for wakeup
696 	 * - protocol exception is treated as a form error
697 	 * - Do not compare data bytes
698 	 */
699 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
700 			 MCP251XFD_REG_CON_MODE_CONFIG) |
701 		MCP251XFD_REG_CON_STEF |
702 		MCP251XFD_REG_CON_ESIGM |
703 		MCP251XFD_REG_CON_RTXAT |
704 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
705 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
706 		MCP251XFD_REG_CON_WAKFIL |
707 		MCP251XFD_REG_CON_PXEDIS;
708 
709 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
710 		val |= MCP251XFD_REG_CON_ISOCRCEN;
711 
712 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
713 	if (err)
714 		return err;
715 
716 	/* Nominal Bit Time */
717 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
718 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
719 			   bt->prop_seg + bt->phase_seg1 - 1) |
720 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
721 			   bt->phase_seg2 - 1) |
722 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
723 
724 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
725 	if (err)
726 		return err;
727 
728 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
729 		return 0;
730 
731 	/* Data Bit Time */
732 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
733 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
734 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
735 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
736 			   dbt->phase_seg2 - 1) |
737 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
738 
739 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
740 	if (err)
741 		return err;
742 
743 	/* Transmitter Delay Compensation */
744 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
745 		       -64, 63);
746 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
747 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
748 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
749 
750 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
751 }
752 
753 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
754 {
755 	u32 val;
756 
757 	if (!priv->rx_int)
758 		return 0;
759 
760 	/* Configure GPIOs:
761 	 * - PIN0: GPIO Input
762 	 * - PIN1: GPIO Input/RX Interrupt
763 	 *
764 	 * PIN1 must be Input, otherwise there is a glitch on the
765 	 * rx-INT line. It happens between setting the PIN as output
766 	 * (in the first byte of the SPI transfer) and configuring the
767 	 * PIN as interrupt (in the last byte of the SPI transfer).
768 	 */
769 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
770 		MCP251XFD_REG_IOCON_TRIS0;
771 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
772 }
773 
774 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
775 {
776 	u32 val;
777 
778 	if (!priv->rx_int)
779 		return 0;
780 
781 	/* Configure GPIOs:
782 	 * - PIN0: GPIO Input
783 	 * - PIN1: GPIO Input
784 	 */
785 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
786 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
787 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
788 }
789 
790 static int
791 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
792 				const struct mcp251xfd_rx_ring *ring)
793 {
794 	u32 fifo_con;
795 
796 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
797 	 *
798 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
799 	 * generate a RXOVIF, use this to properly detect RX MAB
800 	 * overflows.
801 	 */
802 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
803 			      ring->obj_num - 1) |
804 		MCP251XFD_REG_FIFOCON_RXTSEN |
805 		MCP251XFD_REG_FIFOCON_RXOVIE |
806 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
807 
808 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
809 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
810 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
811 	else
812 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
813 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
814 
815 	return regmap_write(priv->map_reg,
816 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
817 }
818 
819 static int
820 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
821 				  const struct mcp251xfd_rx_ring *ring)
822 {
823 	u32 fltcon;
824 
825 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
826 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
827 
828 	return regmap_update_bits(priv->map_reg,
829 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
830 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
831 				  fltcon);
832 }
833 
834 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
835 {
836 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
837 	const struct mcp251xfd_rx_ring *rx_ring;
838 	u32 val;
839 	int err, n;
840 
841 	/* TEF */
842 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
843 			 tx_ring->obj_num - 1) |
844 		MCP251XFD_REG_TEFCON_TEFTSEN |
845 		MCP251XFD_REG_TEFCON_TEFOVIE |
846 		MCP251XFD_REG_TEFCON_TEFNEIE;
847 
848 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
849 	if (err)
850 		return err;
851 
852 	/* FIFO 1 - TX */
853 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
854 			 tx_ring->obj_num - 1) |
855 		MCP251XFD_REG_FIFOCON_TXEN |
856 		MCP251XFD_REG_FIFOCON_TXATIE;
857 
858 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
859 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
860 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
861 	else
862 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
863 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
864 
865 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
866 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
867 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
868 	else
869 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
870 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
871 
872 	err = regmap_write(priv->map_reg,
873 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
874 			   val);
875 	if (err)
876 		return err;
877 
878 	/* RX FIFOs */
879 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
880 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
881 		if (err)
882 			return err;
883 
884 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
885 		if (err)
886 			return err;
887 	}
888 
889 	return 0;
890 }
891 
892 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
893 {
894 	struct mcp251xfd_ecc *ecc = &priv->ecc;
895 	void *ram;
896 	u32 val = 0;
897 	int err;
898 
899 	ecc->ecc_stat = 0;
900 
901 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
902 		val = MCP251XFD_REG_ECCCON_ECCEN;
903 
904 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
905 				 MCP251XFD_REG_ECCCON_ECCEN, val);
906 	if (err)
907 		return err;
908 
909 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
910 	if (!ram)
911 		return -ENOMEM;
912 
913 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
914 			       MCP251XFD_RAM_SIZE);
915 	kfree(ram);
916 
917 	return err;
918 }
919 
920 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
921 {
922 	struct mcp251xfd_ecc *ecc = &priv->ecc;
923 
924 	ecc->ecc_stat = 0;
925 }
926 
927 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
928 {
929 	u8 mode;
930 
931 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
932 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
933 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
934 		mode = MCP251XFD_REG_CON_MODE_MIXED;
935 	else
936 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
937 
938 	return mode;
939 }
940 
941 static int
942 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
943 				 bool nowait)
944 {
945 	u8 mode;
946 
947 	mode = mcp251xfd_get_normal_mode(priv);
948 
949 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
950 }
951 
952 static inline int
953 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
954 {
955 	return __mcp251xfd_chip_set_normal_mode(priv, false);
956 }
957 
958 static inline int
959 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
960 {
961 	return __mcp251xfd_chip_set_normal_mode(priv, true);
962 }
963 
964 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
965 {
966 	u32 val;
967 	int err;
968 
969 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
970 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
971 	if (err)
972 		return err;
973 
974 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
975 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
976 	if (err)
977 		return err;
978 
979 	val = MCP251XFD_REG_INT_CERRIE |
980 		MCP251XFD_REG_INT_SERRIE |
981 		MCP251XFD_REG_INT_RXOVIE |
982 		MCP251XFD_REG_INT_TXATIE |
983 		MCP251XFD_REG_INT_SPICRCIE |
984 		MCP251XFD_REG_INT_ECCIE |
985 		MCP251XFD_REG_INT_TEFIE |
986 		MCP251XFD_REG_INT_MODIE |
987 		MCP251XFD_REG_INT_RXIE;
988 
989 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
990 		val |= MCP251XFD_REG_INT_IVMIE;
991 
992 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
993 }
994 
995 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
996 {
997 	int err;
998 	u32 mask;
999 
1000 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1001 	if (err)
1002 		return err;
1003 
1004 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1005 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1006 				 mask, 0x0);
1007 	if (err)
1008 		return err;
1009 
1010 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1011 }
1012 
1013 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1014 			       const enum can_state state)
1015 {
1016 	priv->can.state = state;
1017 
1018 	mcp251xfd_chip_interrupts_disable(priv);
1019 	mcp251xfd_chip_rx_int_disable(priv);
1020 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1021 }
1022 
1023 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1024 {
1025 	int err;
1026 
1027 	err = mcp251xfd_chip_softreset(priv);
1028 	if (err)
1029 		goto out_chip_stop;
1030 
1031 	err = mcp251xfd_chip_clock_init(priv);
1032 	if (err)
1033 		goto out_chip_stop;
1034 
1035 	err = mcp251xfd_set_bittiming(priv);
1036 	if (err)
1037 		goto out_chip_stop;
1038 
1039 	err = mcp251xfd_chip_rx_int_enable(priv);
1040 	if (err)
1041 		return err;
1042 
1043 	err = mcp251xfd_chip_ecc_init(priv);
1044 	if (err)
1045 		goto out_chip_stop;
1046 
1047 	mcp251xfd_ring_init(priv);
1048 
1049 	err = mcp251xfd_chip_fifo_init(priv);
1050 	if (err)
1051 		goto out_chip_stop;
1052 
1053 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1054 
1055 	err = mcp251xfd_chip_set_normal_mode(priv);
1056 	if (err)
1057 		goto out_chip_stop;
1058 
1059 	return 0;
1060 
1061  out_chip_stop:
1062 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1063 
1064 	return err;
1065 }
1066 
1067 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1068 {
1069 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1070 	int err;
1071 
1072 	switch (mode) {
1073 	case CAN_MODE_START:
1074 		err = mcp251xfd_chip_start(priv);
1075 		if (err)
1076 			return err;
1077 
1078 		err = mcp251xfd_chip_interrupts_enable(priv);
1079 		if (err) {
1080 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1081 			return err;
1082 		}
1083 
1084 		netif_wake_queue(ndev);
1085 		break;
1086 
1087 	default:
1088 		return -EOPNOTSUPP;
1089 	}
1090 
1091 	return 0;
1092 }
1093 
1094 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1095 					struct can_berr_counter *bec)
1096 {
1097 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1098 	u32 trec;
1099 	int err;
1100 
1101 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1102 	if (err)
1103 		return err;
1104 
1105 	if (trec & MCP251XFD_REG_TREC_TXBO)
1106 		bec->txerr = 256;
1107 	else
1108 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1109 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1110 
1111 	return 0;
1112 }
1113 
1114 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1115 				      struct can_berr_counter *bec)
1116 {
1117 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1118 
1119 	/* Avoid waking up the controller if the interface is down */
1120 	if (!(ndev->flags & IFF_UP))
1121 		return 0;
1122 
1123 	/* The controller is powered down during Bus Off, use saved
1124 	 * bec values.
1125 	 */
1126 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1127 		*bec = priv->bec;
1128 		return 0;
1129 	}
1130 
1131 	return __mcp251xfd_get_berr_counter(ndev, bec);
1132 }
1133 
1134 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1135 {
1136 	u8 tef_tail_chip, tef_tail;
1137 	int err;
1138 
1139 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1140 		return 0;
1141 
1142 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1143 	if (err)
1144 		return err;
1145 
1146 	tef_tail = mcp251xfd_get_tef_tail(priv);
1147 	if (tef_tail_chip != tef_tail) {
1148 		netdev_err(priv->ndev,
1149 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1150 			   tef_tail_chip, tef_tail);
1151 		return -EILSEQ;
1152 	}
1153 
1154 	return 0;
1155 }
1156 
1157 static int
1158 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1159 			const struct mcp251xfd_rx_ring *ring)
1160 {
1161 	u8 rx_tail_chip, rx_tail;
1162 	int err;
1163 
1164 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1165 		return 0;
1166 
1167 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1168 	if (err)
1169 		return err;
1170 
1171 	rx_tail = mcp251xfd_get_rx_tail(ring);
1172 	if (rx_tail_chip != rx_tail) {
1173 		netdev_err(priv->ndev,
1174 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1175 			   rx_tail_chip, rx_tail);
1176 		return -EILSEQ;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 static int
1183 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1184 {
1185 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1186 	u32 tef_sta;
1187 	int err;
1188 
1189 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1190 	if (err)
1191 		return err;
1192 
1193 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1194 		netdev_err(priv->ndev,
1195 			   "Transmit Event FIFO buffer overflow.\n");
1196 		return -ENOBUFS;
1197 	}
1198 
1199 	netdev_info(priv->ndev,
1200 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
1201 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1202 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1203 		    "not empty" : "empty",
1204 		    seq, priv->tef.tail, priv->tef.head, tx_ring->head);
1205 
1206 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1207 	return -EAGAIN;
1208 }
1209 
1210 static int
1211 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1212 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
1213 {
1214 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1215 	struct net_device_stats *stats = &priv->ndev->stats;
1216 	u32 seq, seq_masked, tef_tail_masked;
1217 	int err;
1218 
1219 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1220 			hw_tef_obj->flags);
1221 
1222 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1223 	 * compare 7 bits, this should be enough to detect
1224 	 * net-yet-completed, i.e. old TEF objects.
1225 	 */
1226 	seq_masked = seq &
1227 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1228 	tef_tail_masked = priv->tef.tail &
1229 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1230 	if (seq_masked != tef_tail_masked)
1231 		return mcp251xfd_handle_tefif_recover(priv, seq);
1232 
1233 	stats->tx_bytes +=
1234 		can_rx_offload_get_echo_skb(&priv->offload,
1235 					    mcp251xfd_get_tef_tail(priv),
1236 					    hw_tef_obj->ts);
1237 	stats->tx_packets++;
1238 
1239 	/* finally increment the TEF pointer */
1240 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON,
1241 				 GENMASK(15, 8),
1242 				 MCP251XFD_REG_TEFCON_UINC);
1243 	if (err)
1244 		return err;
1245 
1246 	priv->tef.tail++;
1247 	tx_ring->tail++;
1248 
1249 	return mcp251xfd_check_tef_tail(priv);
1250 }
1251 
1252 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1253 {
1254 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1255 	unsigned int new_head;
1256 	u8 chip_tx_tail;
1257 	int err;
1258 
1259 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1260 	if (err)
1261 		return err;
1262 
1263 	/* chip_tx_tail, is the next TX-Object send by the HW.
1264 	 * The new TEF head must be >= the old head, ...
1265 	 */
1266 	new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
1267 	if (new_head <= priv->tef.head)
1268 		new_head += tx_ring->obj_num;
1269 
1270 	/* ... but it cannot exceed the TX head. */
1271 	priv->tef.head = min(new_head, tx_ring->head);
1272 
1273 	return mcp251xfd_check_tef_tail(priv);
1274 }
1275 
1276 static inline int
1277 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1278 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1279 		       const u8 offset, const u8 len)
1280 {
1281 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1282 
1283 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1284 	    (offset > tx_ring->obj_num ||
1285 	     len > tx_ring->obj_num ||
1286 	     offset + len > tx_ring->obj_num)) {
1287 		netdev_err(priv->ndev,
1288 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1289 			   tx_ring->obj_num, offset, len);
1290 		return -ERANGE;
1291 	}
1292 
1293 	return regmap_bulk_read(priv->map_rx,
1294 				mcp251xfd_get_tef_obj_addr(offset),
1295 				hw_tef_obj,
1296 				sizeof(*hw_tef_obj) / sizeof(u32) * len);
1297 }
1298 
1299 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1300 {
1301 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1302 	u8 tef_tail, len, l;
1303 	int err, i;
1304 
1305 	err = mcp251xfd_tef_ring_update(priv);
1306 	if (err)
1307 		return err;
1308 
1309 	tef_tail = mcp251xfd_get_tef_tail(priv);
1310 	len = mcp251xfd_get_tef_len(priv);
1311 	l = mcp251xfd_get_tef_linear_len(priv);
1312 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1313 	if (err)
1314 		return err;
1315 
1316 	if (l < len) {
1317 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1318 		if (err)
1319 			return err;
1320 	}
1321 
1322 	for (i = 0; i < len; i++) {
1323 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
1324 		/* -EAGAIN means the Sequence Number in the TEF
1325 		 * doesn't match our tef_tail. This can happen if we
1326 		 * read the TEF objects too early. Leave loop let the
1327 		 * interrupt handler call us again.
1328 		 */
1329 		if (err == -EAGAIN)
1330 			goto out_netif_wake_queue;
1331 		if (err)
1332 			return err;
1333 	}
1334 
1335  out_netif_wake_queue:
1336 	mcp251xfd_ecc_tefif_successful(priv);
1337 
1338 	if (mcp251xfd_get_tx_free(priv->tx)) {
1339 		/* Make sure that anybody stopping the queue after
1340 		 * this sees the new tx_ring->tail.
1341 		 */
1342 		smp_mb();
1343 		netif_wake_queue(priv->ndev);
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static int
1350 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1351 			 struct mcp251xfd_rx_ring *ring)
1352 {
1353 	u32 new_head;
1354 	u8 chip_rx_head;
1355 	int err;
1356 
1357 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1358 	if (err)
1359 		return err;
1360 
1361 	/* chip_rx_head, is the next RX-Object filled by the HW.
1362 	 * The new RX head must be >= the old head.
1363 	 */
1364 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1365 	if (new_head <= ring->head)
1366 		new_head += ring->obj_num;
1367 
1368 	ring->head = new_head;
1369 
1370 	return mcp251xfd_check_rx_tail(priv, ring);
1371 }
1372 
1373 static void
1374 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1375 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1376 			   struct sk_buff *skb)
1377 {
1378 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1379 
1380 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1381 		u32 sid, eid;
1382 
1383 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1384 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1385 
1386 		cfd->can_id = CAN_EFF_FLAG |
1387 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1388 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1389 	} else {
1390 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1391 					hw_rx_obj->id);
1392 	}
1393 
1394 	/* CANFD */
1395 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1396 		u8 dlc;
1397 
1398 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1399 			cfd->flags |= CANFD_ESI;
1400 
1401 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1402 			cfd->flags |= CANFD_BRS;
1403 
1404 		dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
1405 		cfd->len = can_fd_dlc2len(dlc);
1406 	} else {
1407 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1408 			cfd->can_id |= CAN_RTR_FLAG;
1409 
1410 		cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
1411 						 hw_rx_obj->flags));
1412 	}
1413 
1414 	memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1415 }
1416 
1417 static int
1418 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1419 			  struct mcp251xfd_rx_ring *ring,
1420 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1421 {
1422 	struct net_device_stats *stats = &priv->ndev->stats;
1423 	struct sk_buff *skb;
1424 	struct canfd_frame *cfd;
1425 	int err;
1426 
1427 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1428 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1429 	else
1430 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1431 
1432 	if (!cfd) {
1433 		stats->rx_dropped++;
1434 		return 0;
1435 	}
1436 
1437 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1438 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1439 	if (err)
1440 		stats->rx_fifo_errors++;
1441 
1442 	ring->tail++;
1443 
1444 	/* finally increment the RX pointer */
1445 	return regmap_update_bits(priv->map_reg,
1446 				  MCP251XFD_REG_FIFOCON(ring->fifo_nr),
1447 				  GENMASK(15, 8),
1448 				  MCP251XFD_REG_FIFOCON_UINC);
1449 }
1450 
1451 static inline int
1452 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1453 		      const struct mcp251xfd_rx_ring *ring,
1454 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1455 		      const u8 offset, const u8 len)
1456 {
1457 	int err;
1458 
1459 	err = regmap_bulk_read(priv->map_rx,
1460 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1461 			       hw_rx_obj,
1462 			       len * ring->obj_size / sizeof(u32));
1463 
1464 	return err;
1465 }
1466 
1467 static int
1468 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1469 			   struct mcp251xfd_rx_ring *ring)
1470 {
1471 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1472 	u8 rx_tail, len;
1473 	int err, i;
1474 
1475 	err = mcp251xfd_rx_ring_update(priv, ring);
1476 	if (err)
1477 		return err;
1478 
1479 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1480 		rx_tail = mcp251xfd_get_rx_tail(ring);
1481 
1482 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1483 					    rx_tail, len);
1484 		if (err)
1485 			return err;
1486 
1487 		for (i = 0; i < len; i++) {
1488 			err = mcp251xfd_handle_rxif_one(priv, ring,
1489 							(void *)hw_rx_obj +
1490 							i * ring->obj_size);
1491 			if (err)
1492 				return err;
1493 		}
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1500 {
1501 	struct mcp251xfd_rx_ring *ring;
1502 	int err, n;
1503 
1504 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1505 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1506 		if (err)
1507 			return err;
1508 	}
1509 
1510 	return 0;
1511 }
1512 
1513 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
1514 					  u32 *timestamp)
1515 {
1516 	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
1517 }
1518 
1519 static struct sk_buff *
1520 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
1521 			    struct can_frame **cf, u32 *timestamp)
1522 {
1523 	int err;
1524 
1525 	err = mcp251xfd_get_timestamp(priv, timestamp);
1526 	if (err)
1527 		return NULL;
1528 
1529 	return alloc_can_err_skb(priv->ndev, cf);
1530 }
1531 
1532 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1533 {
1534 	struct net_device_stats *stats = &priv->ndev->stats;
1535 	struct mcp251xfd_rx_ring *ring;
1536 	struct sk_buff *skb;
1537 	struct can_frame *cf;
1538 	u32 timestamp, rxovif;
1539 	int err, i;
1540 
1541 	stats->rx_over_errors++;
1542 	stats->rx_errors++;
1543 
1544 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1545 	if (err)
1546 		return err;
1547 
1548 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1549 		if (!(rxovif & BIT(ring->fifo_nr)))
1550 			continue;
1551 
1552 		/* If SERRIF is active, there was a RX MAB overflow. */
1553 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1554 			netdev_info(priv->ndev,
1555 				    "RX-%d: MAB overflow detected.\n",
1556 				    ring->nr);
1557 		} else {
1558 			netdev_info(priv->ndev,
1559 				    "RX-%d: FIFO overflow.\n", ring->nr);
1560 		}
1561 
1562 		err = regmap_update_bits(priv->map_reg,
1563 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1564 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1565 					 0x0);
1566 		if (err)
1567 			return err;
1568 	}
1569 
1570 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1571 	if (!skb)
1572 		return 0;
1573 
1574 	cf->can_id |= CAN_ERR_CRTL;
1575 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1576 
1577 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1578 	if (err)
1579 		stats->rx_fifo_errors++;
1580 
1581 	return 0;
1582 }
1583 
1584 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1585 {
1586 	netdev_info(priv->ndev, "%s\n", __func__);
1587 
1588 	return 0;
1589 }
1590 
1591 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1592 {
1593 	struct net_device_stats *stats = &priv->ndev->stats;
1594 	u32 bdiag1, timestamp;
1595 	struct sk_buff *skb;
1596 	struct can_frame *cf = NULL;
1597 	int err;
1598 
1599 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1600 	if (err)
1601 		return err;
1602 
1603 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1604 	if (err)
1605 		return err;
1606 
1607 	/* Write 0s to clear error bits, don't write 1s to non active
1608 	 * bits, as they will be set.
1609 	 */
1610 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1611 	if (err)
1612 		return err;
1613 
1614 	priv->can.can_stats.bus_error++;
1615 
1616 	skb = alloc_can_err_skb(priv->ndev, &cf);
1617 	if (cf)
1618 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1619 
1620 	/* Controller misconfiguration */
1621 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1622 		netdev_err(priv->ndev,
1623 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1624 
1625 	/* RX errors */
1626 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1627 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1628 		netdev_dbg(priv->ndev, "CRC error\n");
1629 
1630 		stats->rx_errors++;
1631 		if (cf)
1632 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1633 	}
1634 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1635 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1636 		netdev_dbg(priv->ndev, "Stuff error\n");
1637 
1638 		stats->rx_errors++;
1639 		if (cf)
1640 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1641 	}
1642 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1643 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1644 		netdev_dbg(priv->ndev, "Format error\n");
1645 
1646 		stats->rx_errors++;
1647 		if (cf)
1648 			cf->data[2] |= CAN_ERR_PROT_FORM;
1649 	}
1650 
1651 	/* TX errors */
1652 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1653 		netdev_dbg(priv->ndev, "NACK error\n");
1654 
1655 		stats->tx_errors++;
1656 		if (cf) {
1657 			cf->can_id |= CAN_ERR_ACK;
1658 			cf->data[2] |= CAN_ERR_PROT_TX;
1659 		}
1660 	}
1661 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1662 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1663 		netdev_dbg(priv->ndev, "Bit1 error\n");
1664 
1665 		stats->tx_errors++;
1666 		if (cf)
1667 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1668 	}
1669 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1670 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1671 		netdev_dbg(priv->ndev, "Bit0 error\n");
1672 
1673 		stats->tx_errors++;
1674 		if (cf)
1675 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1676 	}
1677 
1678 	if (!cf)
1679 		return 0;
1680 
1681 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1682 	if (err)
1683 		stats->rx_fifo_errors++;
1684 
1685 	return 0;
1686 }
1687 
1688 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1689 {
1690 	struct net_device_stats *stats = &priv->ndev->stats;
1691 	struct sk_buff *skb;
1692 	struct can_frame *cf = NULL;
1693 	enum can_state new_state, rx_state, tx_state;
1694 	u32 trec, timestamp;
1695 	int err;
1696 
1697 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1698 	if (err)
1699 		return err;
1700 
1701 	if (trec & MCP251XFD_REG_TREC_TXBO)
1702 		tx_state = CAN_STATE_BUS_OFF;
1703 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1704 		tx_state = CAN_STATE_ERROR_PASSIVE;
1705 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1706 		tx_state = CAN_STATE_ERROR_WARNING;
1707 	else
1708 		tx_state = CAN_STATE_ERROR_ACTIVE;
1709 
1710 	if (trec & MCP251XFD_REG_TREC_RXBP)
1711 		rx_state = CAN_STATE_ERROR_PASSIVE;
1712 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1713 		rx_state = CAN_STATE_ERROR_WARNING;
1714 	else
1715 		rx_state = CAN_STATE_ERROR_ACTIVE;
1716 
1717 	new_state = max(tx_state, rx_state);
1718 	if (new_state == priv->can.state)
1719 		return 0;
1720 
1721 	/* The skb allocation might fail, but can_change_state()
1722 	 * handles cf == NULL.
1723 	 */
1724 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1725 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1726 
1727 	if (new_state == CAN_STATE_BUS_OFF) {
1728 		/* As we're going to switch off the chip now, let's
1729 		 * save the error counters and return them to
1730 		 * userspace, if do_get_berr_counter() is called while
1731 		 * the chip is in Bus Off.
1732 		 */
1733 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1734 		if (err)
1735 			return err;
1736 
1737 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1738 		can_bus_off(priv->ndev);
1739 	}
1740 
1741 	if (!skb)
1742 		return 0;
1743 
1744 	if (new_state != CAN_STATE_BUS_OFF) {
1745 		struct can_berr_counter bec;
1746 
1747 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1748 		if (err)
1749 			return err;
1750 		cf->data[6] = bec.txerr;
1751 		cf->data[7] = bec.rxerr;
1752 	}
1753 
1754 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1755 	if (err)
1756 		stats->rx_fifo_errors++;
1757 
1758 	return 0;
1759 }
1760 
1761 static int
1762 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1763 {
1764 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1765 	u8 mode;
1766 	int err;
1767 
1768 	err = mcp251xfd_chip_get_mode(priv, &mode);
1769 	if (err)
1770 		return err;
1771 
1772 	if (mode == mode_reference) {
1773 		netdev_dbg(priv->ndev,
1774 			   "Controller changed into %s Mode (%u).\n",
1775 			   mcp251xfd_get_mode_str(mode), mode);
1776 		return 0;
1777 	}
1778 
1779 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1780 	 * MAB underflow, the controller will transition to Restricted
1781 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1782 	 *
1783 	 * However this is not always the case. If SERR2LOM is
1784 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1785 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1786 	 * first. When polling this bit we see that it will transition
1787 	 * to Restricted Operation Mode shortly after.
1788 	 */
1789 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1790 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1791 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1792 		netdev_dbg(priv->ndev,
1793 			   "Controller changed into %s Mode (%u).\n",
1794 			   mcp251xfd_get_mode_str(mode), mode);
1795 	else
1796 		netdev_err(priv->ndev,
1797 			   "Controller changed into %s Mode (%u).\n",
1798 			   mcp251xfd_get_mode_str(mode), mode);
1799 
1800 	/* After the application requests Normal mode, the Controller
1801 	 * will automatically attempt to retransmit the message that
1802 	 * caused the TX MAB underflow.
1803 	 *
1804 	 * However, if there is an ECC error in the TX-RAM, we first
1805 	 * have to reload the tx-object before requesting Normal
1806 	 * mode. This is done later in mcp251xfd_handle_eccif().
1807 	 */
1808 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1809 		*set_normal_mode = true;
1810 		return 0;
1811 	}
1812 
1813 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1814 }
1815 
1816 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1817 {
1818 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1819 	struct net_device_stats *stats = &priv->ndev->stats;
1820 	bool handled = false;
1821 
1822 	/* TX MAB underflow
1823 	 *
1824 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1825 	 * underflow is indicated by SERRIF and MODIF.
1826 	 *
1827 	 * In addition to the effects mentioned in the Errata, there
1828 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1829 	 * will be seen as well.
1830 	 *
1831 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1832 	 * to a TX MAB underflow.
1833 	 *
1834 	 * However, probably due to a race condition, there is no
1835 	 * associated MODIF pending.
1836 	 *
1837 	 * Further, there are situations, where the SERRIF is caused
1838 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1839 	 * set. This only seems to happen _after_ the first occurrence
1840 	 * of a ECCIF (which is tracked in ecc->cnt).
1841 	 *
1842 	 * Treat all as a known system errors..
1843 	 */
1844 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1845 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1846 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1847 	    ecc->cnt) {
1848 		const char *msg;
1849 
1850 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1851 		    ecc->cnt)
1852 			msg = "TX MAB underflow due to ECC error detected.";
1853 		else
1854 			msg = "TX MAB underflow detected.";
1855 
1856 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1857 			netdev_dbg(priv->ndev, "%s\n", msg);
1858 		else
1859 			netdev_info(priv->ndev, "%s\n", msg);
1860 
1861 		stats->tx_aborted_errors++;
1862 		stats->tx_errors++;
1863 		handled = true;
1864 	}
1865 
1866 	/* RX MAB overflow
1867 	 *
1868 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1869 	 * overflow is indicated by SERRIF.
1870 	 *
1871 	 * In addition to the effects mentioned in the Errata, (most
1872 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1873 	 * received into has the RXOVIE activated (and we have enabled
1874 	 * RXOVIE on all FIFOs).
1875 	 *
1876 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1877 	 *
1878 	 * Treat all as a known system errors..
1879 	 */
1880 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1881 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1882 		stats->rx_dropped++;
1883 		handled = true;
1884 	}
1885 
1886 	if (!handled)
1887 		netdev_err(priv->ndev,
1888 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1889 			   priv->regs_status.intf);
1890 
1891 	return 0;
1892 }
1893 
1894 static int
1895 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1896 {
1897 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1898 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1899 	struct mcp251xfd_tx_obj *tx_obj;
1900 	u8 chip_tx_tail, tx_tail, offset;
1901 	u16 addr;
1902 	int err;
1903 
1904 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1905 
1906 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1907 	if (err)
1908 		return err;
1909 
1910 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
1911 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
1912 
1913 	/* Bail out if one of the following is met:
1914 	 * - tx_tail information is inconsistent
1915 	 * - for mcp2517fd: offset not 0
1916 	 * - for mcp2518fd: offset not 0 or 1
1917 	 */
1918 	if (chip_tx_tail != tx_tail ||
1919 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
1920 		netdev_err(priv->ndev,
1921 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
1922 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
1923 			   offset);
1924 		return -EINVAL;
1925 	}
1926 
1927 	netdev_info(priv->ndev,
1928 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
1929 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
1930 		    "Single" : "Double",
1931 		    addr, nr, tx_ring->tail, tx_tail, offset);
1932 
1933 	/* reload tx_obj into controller RAM ... */
1934 	tx_obj = &tx_ring->obj[nr];
1935 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
1936 	if (err)
1937 		return err;
1938 
1939 	/* ... and trigger retransmit */
1940 	return mcp251xfd_chip_set_normal_mode(priv);
1941 }
1942 
1943 static int
1944 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
1945 {
1946 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1947 	const char *msg;
1948 	bool in_tx_ram;
1949 	u32 ecc_stat;
1950 	u16 addr;
1951 	u8 nr;
1952 	int err;
1953 
1954 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
1955 	if (err)
1956 		return err;
1957 
1958 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
1959 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
1960 	if (err)
1961 		return err;
1962 
1963 	/* Check if ECC error occurred in TX-RAM */
1964 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
1965 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
1966 	if (!err)
1967 		in_tx_ram = true;
1968 	else if (err == -ENOENT)
1969 		in_tx_ram = false;
1970 	else
1971 		return err;
1972 
1973 	/* Errata Reference:
1974 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
1975 	 *
1976 	 * ECC single error correction does not work in all cases:
1977 	 *
1978 	 * Fix/Work Around:
1979 	 * Enable single error correction and double error detection
1980 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
1981 	 * detection interrupt and do not rely on the error
1982 	 * correction. Instead, handle both interrupts as a
1983 	 * notification that the RAM word at ERRADDR was corrupted.
1984 	 */
1985 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
1986 		msg = "Single ECC Error detected at address";
1987 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
1988 		msg = "Double ECC Error detected at address";
1989 	else
1990 		return -EINVAL;
1991 
1992 	if (!in_tx_ram) {
1993 		ecc->ecc_stat = 0;
1994 
1995 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
1996 	} else {
1997 		/* Re-occurring error? */
1998 		if (ecc->ecc_stat == ecc_stat) {
1999 			ecc->cnt++;
2000 		} else {
2001 			ecc->ecc_stat = ecc_stat;
2002 			ecc->cnt = 1;
2003 		}
2004 
2005 		netdev_info(priv->ndev,
2006 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2007 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2008 
2009 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2010 			return mcp251xfd_handle_eccif_recover(priv, nr);
2011 	}
2012 
2013 	if (set_normal_mode)
2014 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2015 
2016 	return 0;
2017 }
2018 
2019 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2020 {
2021 	int err;
2022 	u32 crc;
2023 
2024 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2025 	if (err)
2026 		return err;
2027 
2028 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2029 				 MCP251XFD_REG_CRC_IF_MASK,
2030 				 ~crc);
2031 	if (err)
2032 		return err;
2033 
2034 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2035 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2036 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2037 		netdev_notice(priv->ndev,
2038 			      "CRC write error detected. CRC=0x%04lx.\n",
2039 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2040 
2041 	return 0;
2042 }
2043 
2044 #define mcp251xfd_handle(priv, irq, ...) \
2045 ({ \
2046 	struct mcp251xfd_priv *_priv = (priv); \
2047 	int err; \
2048 \
2049 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2050 	if (err) \
2051 		netdev_err(_priv->ndev, \
2052 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2053 			__stringify(irq), err); \
2054 	err; \
2055 })
2056 
2057 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2058 {
2059 	struct mcp251xfd_priv *priv = dev_id;
2060 	irqreturn_t handled = IRQ_NONE;
2061 	int err;
2062 
2063 	if (priv->rx_int)
2064 		do {
2065 			int rx_pending;
2066 
2067 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2068 			if (!rx_pending)
2069 				break;
2070 
2071 			err = mcp251xfd_handle(priv, rxif);
2072 			if (err)
2073 				goto out_fail;
2074 
2075 			handled = IRQ_HANDLED;
2076 		} while (1);
2077 
2078 	do {
2079 		u32 intf_pending, intf_pending_clearable;
2080 		bool set_normal_mode = false;
2081 
2082 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2083 				       &priv->regs_status,
2084 				       sizeof(priv->regs_status) /
2085 				       sizeof(u32));
2086 		if (err)
2087 			goto out_fail;
2088 
2089 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2090 					 priv->regs_status.intf) &
2091 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2092 				  priv->regs_status.intf);
2093 
2094 		if (!(intf_pending))
2095 			return handled;
2096 
2097 		/* Some interrupts must be ACKed in the
2098 		 * MCP251XFD_REG_INT register.
2099 		 * - First ACK then handle, to avoid lost-IRQ race
2100 		 *   condition on fast re-occurring interrupts.
2101 		 * - Write "0" to clear active IRQs, "1" to all other,
2102 		 *   to avoid r/m/w race condition on the
2103 		 *   MCP251XFD_REG_INT register.
2104 		 */
2105 		intf_pending_clearable = intf_pending &
2106 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2107 		if (intf_pending_clearable) {
2108 			err = regmap_update_bits(priv->map_reg,
2109 						 MCP251XFD_REG_INT,
2110 						 MCP251XFD_REG_INT_IF_MASK,
2111 						 ~intf_pending_clearable);
2112 			if (err)
2113 				goto out_fail;
2114 		}
2115 
2116 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2117 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2118 			if (err)
2119 				goto out_fail;
2120 		}
2121 
2122 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2123 			err = mcp251xfd_handle(priv, rxif);
2124 			if (err)
2125 				goto out_fail;
2126 		}
2127 
2128 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2129 			err = mcp251xfd_handle(priv, tefif);
2130 			if (err)
2131 				goto out_fail;
2132 		}
2133 
2134 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2135 			err = mcp251xfd_handle(priv, rxovif);
2136 			if (err)
2137 				goto out_fail;
2138 		}
2139 
2140 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2141 			err = mcp251xfd_handle(priv, txatif);
2142 			if (err)
2143 				goto out_fail;
2144 		}
2145 
2146 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2147 			err = mcp251xfd_handle(priv, ivmif);
2148 			if (err)
2149 				goto out_fail;
2150 		}
2151 
2152 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2153 			err = mcp251xfd_handle(priv, serrif);
2154 			if (err)
2155 				goto out_fail;
2156 		}
2157 
2158 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2159 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2160 			if (err)
2161 				goto out_fail;
2162 		}
2163 
2164 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2165 			err = mcp251xfd_handle(priv, spicrcif);
2166 			if (err)
2167 				goto out_fail;
2168 		}
2169 
2170 		/* On the MCP2527FD and MCP2518FD, we don't get a
2171 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2172 		 * ERROR_ACTIVE.
2173 		 */
2174 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2175 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2176 			err = mcp251xfd_handle(priv, cerrif);
2177 			if (err)
2178 				goto out_fail;
2179 
2180 			/* In Bus Off we completely shut down the
2181 			 * controller. Every subsequent register read
2182 			 * will read bogus data, and if
2183 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2184 			 * check will fail, too. So leave IRQ handler
2185 			 * directly.
2186 			 */
2187 			if (priv->can.state == CAN_STATE_BUS_OFF)
2188 				return IRQ_HANDLED;
2189 		}
2190 
2191 		handled = IRQ_HANDLED;
2192 	} while (1);
2193 
2194  out_fail:
2195 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2196 		   err, priv->regs_status.intf);
2197 	mcp251xfd_chip_interrupts_disable(priv);
2198 
2199 	return handled;
2200 }
2201 
2202 static inline struct
2203 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2204 {
2205 	u8 tx_head;
2206 
2207 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2208 
2209 	return &tx_ring->obj[tx_head];
2210 }
2211 
2212 static void
2213 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2214 			  struct mcp251xfd_tx_obj *tx_obj,
2215 			  const struct sk_buff *skb,
2216 			  unsigned int seq)
2217 {
2218 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2219 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2220 	union mcp251xfd_tx_obj_load_buf *load_buf;
2221 	u8 dlc;
2222 	u32 id, flags;
2223 	int offset, len;
2224 
2225 	if (cfd->can_id & CAN_EFF_FLAG) {
2226 		u32 sid, eid;
2227 
2228 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2229 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2230 
2231 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2232 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2233 
2234 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2235 	} else {
2236 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2237 		flags = 0;
2238 	}
2239 
2240 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2241 	 * harm, only the lower 7 bits will be transferred into the
2242 	 * TEF object.
2243 	 */
2244 	dlc = can_fd_len2dlc(cfd->len);
2245 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
2246 		FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
2247 
2248 	if (cfd->can_id & CAN_RTR_FLAG)
2249 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2250 
2251 	/* CANFD */
2252 	if (can_is_canfd_skb(skb)) {
2253 		if (cfd->flags & CANFD_ESI)
2254 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2255 
2256 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2257 
2258 		if (cfd->flags & CANFD_BRS)
2259 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2260 	}
2261 
2262 	load_buf = &tx_obj->buf;
2263 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2264 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2265 	else
2266 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2267 
2268 	put_unaligned_le32(id, &hw_tx_obj->id);
2269 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2270 
2271 	/* Clear data at end of CAN frame */
2272 	offset = round_down(cfd->len, sizeof(u32));
2273 	len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
2274 	if (MCP251XFD_SANITIZE_CAN && len)
2275 		memset(hw_tx_obj->data + offset, 0x0, len);
2276 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2277 
2278 	/* Number of bytes to be written into the RAM of the controller */
2279 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2280 	if (MCP251XFD_SANITIZE_CAN)
2281 		len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
2282 	else
2283 		len += round_up(cfd->len, sizeof(u32));
2284 
2285 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2286 		u16 crc;
2287 
2288 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2289 						     len);
2290 		/* CRC */
2291 		len += sizeof(load_buf->crc.cmd);
2292 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2293 		put_unaligned_be16(crc, (void *)load_buf + len);
2294 
2295 		/* Total length */
2296 		len += sizeof(load_buf->crc.crc);
2297 	} else {
2298 		len += sizeof(load_buf->nocrc.cmd);
2299 	}
2300 
2301 	tx_obj->xfer[0].len = len;
2302 }
2303 
2304 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2305 				  struct mcp251xfd_tx_obj *tx_obj)
2306 {
2307 	return spi_async(priv->spi, &tx_obj->msg);
2308 }
2309 
2310 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2311 			      struct mcp251xfd_tx_ring *tx_ring)
2312 {
2313 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2314 		return false;
2315 
2316 	netif_stop_queue(priv->ndev);
2317 
2318 	/* Memory barrier before checking tx_free (head and tail) */
2319 	smp_mb();
2320 
2321 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2322 		netdev_dbg(priv->ndev,
2323 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2324 			   tx_ring->head, tx_ring->tail,
2325 			   tx_ring->head - tx_ring->tail);
2326 
2327 		return true;
2328 	}
2329 
2330 	netif_start_queue(priv->ndev);
2331 
2332 	return false;
2333 }
2334 
2335 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2336 					struct net_device *ndev)
2337 {
2338 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2339 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2340 	struct mcp251xfd_tx_obj *tx_obj;
2341 	u8 tx_head;
2342 	int err;
2343 
2344 	if (can_dropped_invalid_skb(ndev, skb))
2345 		return NETDEV_TX_OK;
2346 
2347 	if (mcp251xfd_tx_busy(priv, tx_ring))
2348 		return NETDEV_TX_BUSY;
2349 
2350 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2351 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2352 
2353 	/* Stop queue if we occupy the complete TX FIFO */
2354 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2355 	tx_ring->head++;
2356 	if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
2357 		netif_stop_queue(ndev);
2358 
2359 	can_put_echo_skb(skb, ndev, tx_head);
2360 
2361 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2362 	if (err)
2363 		goto out_err;
2364 
2365 	return NETDEV_TX_OK;
2366 
2367  out_err:
2368 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2369 
2370 	return NETDEV_TX_OK;
2371 }
2372 
2373 static int mcp251xfd_open(struct net_device *ndev)
2374 {
2375 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2376 	const struct spi_device *spi = priv->spi;
2377 	int err;
2378 
2379 	err = pm_runtime_get_sync(ndev->dev.parent);
2380 	if (err < 0) {
2381 		pm_runtime_put_noidle(ndev->dev.parent);
2382 		return err;
2383 	}
2384 
2385 	err = open_candev(ndev);
2386 	if (err)
2387 		goto out_pm_runtime_put;
2388 
2389 	err = mcp251xfd_ring_alloc(priv);
2390 	if (err)
2391 		goto out_close_candev;
2392 
2393 	err = mcp251xfd_transceiver_enable(priv);
2394 	if (err)
2395 		goto out_mcp251xfd_ring_free;
2396 
2397 	err = mcp251xfd_chip_start(priv);
2398 	if (err)
2399 		goto out_transceiver_disable;
2400 
2401 	can_rx_offload_enable(&priv->offload);
2402 
2403 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2404 				   IRQF_ONESHOT, dev_name(&spi->dev),
2405 				   priv);
2406 	if (err)
2407 		goto out_can_rx_offload_disable;
2408 
2409 	err = mcp251xfd_chip_interrupts_enable(priv);
2410 	if (err)
2411 		goto out_free_irq;
2412 
2413 	netif_start_queue(ndev);
2414 
2415 	return 0;
2416 
2417  out_free_irq:
2418 	free_irq(spi->irq, priv);
2419  out_can_rx_offload_disable:
2420 	can_rx_offload_disable(&priv->offload);
2421  out_transceiver_disable:
2422 	mcp251xfd_transceiver_disable(priv);
2423  out_mcp251xfd_ring_free:
2424 	mcp251xfd_ring_free(priv);
2425  out_close_candev:
2426 	close_candev(ndev);
2427  out_pm_runtime_put:
2428 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2429 	pm_runtime_put(ndev->dev.parent);
2430 
2431 	return err;
2432 }
2433 
2434 static int mcp251xfd_stop(struct net_device *ndev)
2435 {
2436 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2437 
2438 	netif_stop_queue(ndev);
2439 	mcp251xfd_chip_interrupts_disable(priv);
2440 	free_irq(ndev->irq, priv);
2441 	can_rx_offload_disable(&priv->offload);
2442 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2443 	mcp251xfd_transceiver_disable(priv);
2444 	mcp251xfd_ring_free(priv);
2445 	close_candev(ndev);
2446 
2447 	pm_runtime_put(ndev->dev.parent);
2448 
2449 	return 0;
2450 }
2451 
2452 static const struct net_device_ops mcp251xfd_netdev_ops = {
2453 	.ndo_open = mcp251xfd_open,
2454 	.ndo_stop = mcp251xfd_stop,
2455 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2456 	.ndo_change_mtu = can_change_mtu,
2457 };
2458 
2459 static void
2460 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2461 {
2462 	const struct spi_device *spi = priv->spi;
2463 	const struct spi_controller *ctlr = spi->controller;
2464 
2465 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2466 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2467 }
2468 
2469 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2470 {
2471 	const struct net_device *ndev = priv->ndev;
2472 	const struct mcp251xfd_devtype_data *devtype_data;
2473 	u32 osc;
2474 	int err;
2475 
2476 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2477 	 * autodetect the model.
2478 	 */
2479 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2480 				 MCP251XFD_REG_OSC_LPMEN,
2481 				 MCP251XFD_REG_OSC_LPMEN);
2482 	if (err)
2483 		return err;
2484 
2485 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2486 	if (err)
2487 		return err;
2488 
2489 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2490 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2491 	else
2492 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2493 
2494 	if (!mcp251xfd_is_251X(priv) &&
2495 	    priv->devtype_data.model != devtype_data->model) {
2496 		netdev_info(ndev,
2497 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2498 			    __mcp251xfd_get_model_str(devtype_data->model),
2499 			    mcp251xfd_get_model_str(priv));
2500 	}
2501 	priv->devtype_data = *devtype_data;
2502 
2503 	/* We need to preserve the Half Duplex Quirk. */
2504 	mcp251xfd_register_quirks(priv);
2505 
2506 	/* Re-init regmap with quirks of detected model. */
2507 	return mcp251xfd_regmap_init(priv);
2508 }
2509 
2510 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2511 {
2512 	int err, rx_pending;
2513 
2514 	if (!priv->rx_int)
2515 		return 0;
2516 
2517 	err = mcp251xfd_chip_rx_int_enable(priv);
2518 	if (err)
2519 		return err;
2520 
2521 	/* Check if RX_INT is properly working. The RX_INT should not
2522 	 * be active after a softreset.
2523 	 */
2524 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2525 
2526 	err = mcp251xfd_chip_rx_int_disable(priv);
2527 	if (err)
2528 		return err;
2529 
2530 	if (!rx_pending)
2531 		return 0;
2532 
2533 	netdev_info(priv->ndev,
2534 		    "RX_INT active after softreset, disabling RX_INT support.");
2535 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2536 	priv->rx_int = NULL;
2537 
2538 	return 0;
2539 }
2540 
2541 static int
2542 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2543 			      u32 *dev_id, u32 *effective_speed_hz)
2544 {
2545 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2546 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2547 	struct spi_transfer xfer[2] = { };
2548 	int err;
2549 
2550 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2551 	if (!buf_rx)
2552 		return -ENOMEM;
2553 
2554 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2555 	if (!buf_tx) {
2556 		err = -ENOMEM;
2557 		goto out_kfree_buf_rx;
2558 	}
2559 
2560 	xfer[0].tx_buf = buf_tx;
2561 	xfer[0].len = sizeof(buf_tx->cmd);
2562 	xfer[1].rx_buf = buf_rx->data;
2563 	xfer[1].len = sizeof(dev_id);
2564 
2565 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2566 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2567 	if (err)
2568 		goto out_kfree_buf_tx;
2569 
2570 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2571 	*effective_speed_hz = xfer->effective_speed_hz;
2572 
2573  out_kfree_buf_tx:
2574 	kfree(buf_tx);
2575  out_kfree_buf_rx:
2576 	kfree(buf_rx);
2577 
2578 	return 0;
2579 }
2580 
2581 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2582 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2583 
2584 static int
2585 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2586 {
2587 	u32 dev_id, effective_speed_hz;
2588 	int err;
2589 
2590 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2591 					    &effective_speed_hz);
2592 	if (err)
2593 		return err;
2594 
2595 	netdev_info(priv->ndev,
2596 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2597 		    mcp251xfd_get_model_str(priv),
2598 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2599 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2600 		    priv->rx_int ? '+' : '-',
2601 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2602 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2603 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2604 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2605 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2606 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2607 		    priv->can.clock.freq / 1000000,
2608 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2609 		    priv->spi_max_speed_hz_orig / 1000000,
2610 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2611 		    priv->spi->max_speed_hz / 1000000,
2612 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2613 		    effective_speed_hz / 1000000,
2614 		    effective_speed_hz % 1000000 / 1000 / 10);
2615 
2616 	return 0;
2617 }
2618 
2619 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2620 {
2621 	struct net_device *ndev = priv->ndev;
2622 	int err;
2623 
2624 	err = mcp251xfd_clks_and_vdd_enable(priv);
2625 	if (err)
2626 		return err;
2627 
2628 	pm_runtime_get_noresume(ndev->dev.parent);
2629 	err = pm_runtime_set_active(ndev->dev.parent);
2630 	if (err)
2631 		goto out_runtime_put_noidle;
2632 	pm_runtime_enable(ndev->dev.parent);
2633 
2634 	mcp251xfd_register_quirks(priv);
2635 
2636 	err = mcp251xfd_chip_softreset(priv);
2637 	if (err == -ENODEV)
2638 		goto out_runtime_disable;
2639 	if (err)
2640 		goto out_chip_set_mode_sleep;
2641 
2642 	err = mcp251xfd_register_chip_detect(priv);
2643 	if (err)
2644 		goto out_chip_set_mode_sleep;
2645 
2646 	err = mcp251xfd_register_check_rx_int(priv);
2647 	if (err)
2648 		goto out_chip_set_mode_sleep;
2649 
2650 	err = register_candev(ndev);
2651 	if (err)
2652 		goto out_chip_set_mode_sleep;
2653 
2654 	err = mcp251xfd_register_done(priv);
2655 	if (err)
2656 		goto out_unregister_candev;
2657 
2658 	/* Put controller into sleep mode and let pm_runtime_put()
2659 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2660 	 * the clocks and vdd will stay powered.
2661 	 */
2662 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2663 	if (err)
2664 		goto out_unregister_candev;
2665 
2666 	pm_runtime_put(ndev->dev.parent);
2667 
2668 	return 0;
2669 
2670  out_unregister_candev:
2671 	unregister_candev(ndev);
2672  out_chip_set_mode_sleep:
2673 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2674  out_runtime_disable:
2675 	pm_runtime_disable(ndev->dev.parent);
2676  out_runtime_put_noidle:
2677 	pm_runtime_put_noidle(ndev->dev.parent);
2678 	mcp251xfd_clks_and_vdd_disable(priv);
2679 
2680 	return err;
2681 }
2682 
2683 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2684 {
2685 	struct net_device *ndev	= priv->ndev;
2686 
2687 	unregister_candev(ndev);
2688 
2689 	pm_runtime_get_sync(ndev->dev.parent);
2690 	pm_runtime_put_noidle(ndev->dev.parent);
2691 	mcp251xfd_clks_and_vdd_disable(priv);
2692 	pm_runtime_disable(ndev->dev.parent);
2693 }
2694 
2695 static const struct of_device_id mcp251xfd_of_match[] = {
2696 	{
2697 		.compatible = "microchip,mcp2517fd",
2698 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2699 	}, {
2700 		.compatible = "microchip,mcp2518fd",
2701 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2702 	}, {
2703 		.compatible = "microchip,mcp251xfd",
2704 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2705 	}, {
2706 		/* sentinel */
2707 	},
2708 };
2709 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2710 
2711 static const struct spi_device_id mcp251xfd_id_table[] = {
2712 	{
2713 		.name = "mcp2517fd",
2714 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2715 	}, {
2716 		.name = "mcp2518fd",
2717 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2718 	}, {
2719 		.name = "mcp251xfd",
2720 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2721 	}, {
2722 		/* sentinel */
2723 	},
2724 };
2725 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2726 
2727 static int mcp251xfd_probe(struct spi_device *spi)
2728 {
2729 	const void *match;
2730 	struct net_device *ndev;
2731 	struct mcp251xfd_priv *priv;
2732 	struct gpio_desc *rx_int;
2733 	struct regulator *reg_vdd, *reg_xceiver;
2734 	struct clk *clk;
2735 	u32 freq;
2736 	int err;
2737 
2738 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2739 					 GPIOD_IN);
2740 	if (PTR_ERR(rx_int) == -EPROBE_DEFER)
2741 		return -EPROBE_DEFER;
2742 	else if (IS_ERR(rx_int))
2743 		return PTR_ERR(rx_int);
2744 
2745 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2746 	if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
2747 		return -EPROBE_DEFER;
2748 	else if (PTR_ERR(reg_vdd) == -ENODEV)
2749 		reg_vdd = NULL;
2750 	else if (IS_ERR(reg_vdd))
2751 		return PTR_ERR(reg_vdd);
2752 
2753 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2754 	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
2755 		return -EPROBE_DEFER;
2756 	else if (PTR_ERR(reg_xceiver) == -ENODEV)
2757 		reg_xceiver = NULL;
2758 	else if (IS_ERR(reg_xceiver))
2759 		return PTR_ERR(reg_xceiver);
2760 
2761 	clk = devm_clk_get(&spi->dev, NULL);
2762 	if (IS_ERR(clk)) {
2763 		dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
2764 		return PTR_ERR(clk);
2765 	}
2766 	freq = clk_get_rate(clk);
2767 
2768 	/* Sanity check */
2769 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2770 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2771 		dev_err(&spi->dev,
2772 			"Oscillator frequency (%u Hz) is too low or high.\n",
2773 			freq);
2774 		return -ERANGE;
2775 	}
2776 
2777 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2778 		dev_err(&spi->dev,
2779 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2780 			freq);
2781 		return -ERANGE;
2782 	}
2783 
2784 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2785 			    MCP251XFD_TX_OBJ_NUM_MAX);
2786 	if (!ndev)
2787 		return -ENOMEM;
2788 
2789 	SET_NETDEV_DEV(ndev, &spi->dev);
2790 
2791 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2792 	ndev->irq = spi->irq;
2793 	ndev->flags |= IFF_ECHO;
2794 
2795 	priv = netdev_priv(ndev);
2796 	spi_set_drvdata(spi, priv);
2797 	priv->can.clock.freq = freq;
2798 	priv->can.do_set_mode = mcp251xfd_set_mode;
2799 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2800 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2801 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2802 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
2803 		CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
2804 		CAN_CTRLMODE_FD_NON_ISO;
2805 	priv->ndev = ndev;
2806 	priv->spi = spi;
2807 	priv->rx_int = rx_int;
2808 	priv->clk = clk;
2809 	priv->reg_vdd = reg_vdd;
2810 	priv->reg_xceiver = reg_xceiver;
2811 
2812 	match = device_get_match_data(&spi->dev);
2813 	if (match)
2814 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2815 	else
2816 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2817 			spi_get_device_id(spi)->driver_data;
2818 
2819 	/* Errata Reference:
2820 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
2821 	 *
2822 	 * The SPI can write corrupted data to the RAM at fast SPI
2823 	 * speeds:
2824 	 *
2825 	 * Simultaneous activity on the CAN bus while writing data to
2826 	 * RAM via the SPI interface, with high SCK frequency, can
2827 	 * lead to corrupted data being written to RAM.
2828 	 *
2829 	 * Fix/Work Around:
2830 	 * Ensure that FSCK is less than or equal to 0.85 *
2831 	 * (FSYSCLK/2).
2832 	 *
2833 	 * Known good and bad combinations are:
2834 	 *
2835 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	Status	config
2836 	 *
2837 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	good	assigned-clocks = <&ccu CLK_SPIx>
2838 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 9375000 Hz	 93.75%	600000000 Hz	bad	assigned-clocks = <&ccu CLK_SPIx>
2839 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	good	assigned-clocks = <&ccu CLK_SPIx>
2840 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	18750000 Hz	 93.75%	600000000 Hz	bad	assigned-clocks = <&ccu CLK_SPIx>
2841 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	good	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2842 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 9523809 Hz	 95.34%	 28571429 Hz	bad	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2843 	 * 2517 40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	good	default
2844 	 * 2518 40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	good	default
2845 	 *
2846 	 */
2847 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2848 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2849 	spi->bits_per_word = 8;
2850 	spi->rt = true;
2851 	err = spi_setup(spi);
2852 	if (err)
2853 		goto out_free_candev;
2854 
2855 	err = mcp251xfd_regmap_init(priv);
2856 	if (err)
2857 		goto out_free_candev;
2858 
2859 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2860 					MCP251XFD_NAPI_WEIGHT);
2861 	if (err)
2862 		goto out_free_candev;
2863 
2864 	err = mcp251xfd_register(priv);
2865 	if (err)
2866 		goto out_free_candev;
2867 
2868 	return 0;
2869 
2870  out_free_candev:
2871 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2872 
2873 	free_candev(ndev);
2874 
2875 	return err;
2876 }
2877 
2878 static int mcp251xfd_remove(struct spi_device *spi)
2879 {
2880 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2881 	struct net_device *ndev = priv->ndev;
2882 
2883 	can_rx_offload_del(&priv->offload);
2884 	mcp251xfd_unregister(priv);
2885 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2886 	free_candev(ndev);
2887 
2888 	return 0;
2889 }
2890 
2891 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
2892 {
2893 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2894 
2895 	return mcp251xfd_clks_and_vdd_disable(priv);
2896 }
2897 
2898 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
2899 {
2900 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2901 
2902 	return mcp251xfd_clks_and_vdd_enable(priv);
2903 }
2904 
2905 static const struct dev_pm_ops mcp251xfd_pm_ops = {
2906 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
2907 			   mcp251xfd_runtime_resume, NULL)
2908 };
2909 
2910 static struct spi_driver mcp251xfd_driver = {
2911 	.driver = {
2912 		.name = DEVICE_NAME,
2913 		.pm = &mcp251xfd_pm_ops,
2914 		.of_match_table = mcp251xfd_of_match,
2915 	},
2916 	.probe = mcp251xfd_probe,
2917 	.remove = mcp251xfd_remove,
2918 	.id_table = mcp251xfd_id_table,
2919 };
2920 module_spi_driver(mcp251xfd_driver);
2921 
2922 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
2923 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
2924 MODULE_LICENSE("GPL v2");
2925