xref: /linux/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c (revision 7f356166aebb0d956d367dfe55e19d7783277d09)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // mcp251xfd - Microchip MCP251xFD Family CAN controller driver
4 //
5 // Copyright (c) 2019, 2020 Pengutronix,
6 //                          Marc Kleine-Budde <kernel@pengutronix.de>
7 //
8 // Based on:
9 //
10 // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
11 //
12 // Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
13 //
14 
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/pm_runtime.h>
23 
24 #include <asm/unaligned.h>
25 
26 #include "mcp251xfd.h"
27 
28 #define DEVICE_NAME "mcp251xfd"
29 
30 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
31 	.quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
32 		MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
33 		MCP251XFD_QUIRK_ECC,
34 	.model = MCP251XFD_MODEL_MCP2517FD,
35 };
36 
37 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
38 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
39 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
40 	.model = MCP251XFD_MODEL_MCP2518FD,
41 };
42 
43 /* Autodetect model, start with CRC enabled. */
44 static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
45 	.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
46 		MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
47 	.model = MCP251XFD_MODEL_MCP251XFD,
48 };
49 
50 static const struct can_bittiming_const mcp251xfd_bittiming_const = {
51 	.name = DEVICE_NAME,
52 	.tseg1_min = 2,
53 	.tseg1_max = 256,
54 	.tseg2_min = 1,
55 	.tseg2_max = 128,
56 	.sjw_max = 128,
57 	.brp_min = 1,
58 	.brp_max = 256,
59 	.brp_inc = 1,
60 };
61 
62 static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
63 	.name = DEVICE_NAME,
64 	.tseg1_min = 1,
65 	.tseg1_max = 32,
66 	.tseg2_min = 1,
67 	.tseg2_max = 16,
68 	.sjw_max = 16,
69 	.brp_min = 1,
70 	.brp_max = 256,
71 	.brp_inc = 1,
72 };
73 
74 static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
75 {
76 	switch (model) {
77 	case MCP251XFD_MODEL_MCP2517FD:
78 		return "MCP2517FD";
79 	case MCP251XFD_MODEL_MCP2518FD:
80 		return "MCP2518FD";
81 	case MCP251XFD_MODEL_MCP251XFD:
82 		return "MCP251xFD";
83 	}
84 
85 	return "<unknown>";
86 }
87 
88 static inline const char *
89 mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
90 {
91 	return __mcp251xfd_get_model_str(priv->devtype_data.model);
92 }
93 
94 static const char *mcp251xfd_get_mode_str(const u8 mode)
95 {
96 	switch (mode) {
97 	case MCP251XFD_REG_CON_MODE_MIXED:
98 		return "Mixed (CAN FD/CAN 2.0)";
99 	case MCP251XFD_REG_CON_MODE_SLEEP:
100 		return "Sleep";
101 	case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
102 		return "Internal Loopback";
103 	case MCP251XFD_REG_CON_MODE_LISTENONLY:
104 		return "Listen Only";
105 	case MCP251XFD_REG_CON_MODE_CONFIG:
106 		return "Configuration";
107 	case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
108 		return "External Loopback";
109 	case MCP251XFD_REG_CON_MODE_CAN2_0:
110 		return "CAN 2.0";
111 	case MCP251XFD_REG_CON_MODE_RESTRICTED:
112 		return "Restricted Operation";
113 	}
114 
115 	return "<unknown>";
116 }
117 
118 static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
119 {
120 	if (!priv->reg_vdd)
121 		return 0;
122 
123 	return regulator_enable(priv->reg_vdd);
124 }
125 
126 static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
127 {
128 	if (!priv->reg_vdd)
129 		return 0;
130 
131 	return regulator_disable(priv->reg_vdd);
132 }
133 
134 static inline int
135 mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
136 {
137 	if (!priv->reg_xceiver)
138 		return 0;
139 
140 	return regulator_enable(priv->reg_xceiver);
141 }
142 
143 static inline int
144 mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
145 {
146 	if (!priv->reg_xceiver)
147 		return 0;
148 
149 	return regulator_disable(priv->reg_xceiver);
150 }
151 
152 static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
153 {
154 	int err;
155 
156 	err = clk_prepare_enable(priv->clk);
157 	if (err)
158 		return err;
159 
160 	err = mcp251xfd_vdd_enable(priv);
161 	if (err)
162 		clk_disable_unprepare(priv->clk);
163 
164 	/* Wait for oscillator stabilisation time after power up */
165 	usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
166 		     2 * MCP251XFD_OSC_STAB_SLEEP_US);
167 
168 	return err;
169 }
170 
171 static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
172 {
173 	int err;
174 
175 	err = mcp251xfd_vdd_disable(priv);
176 	if (err)
177 		return err;
178 
179 	clk_disable_unprepare(priv->clk);
180 
181 	return 0;
182 }
183 
184 static inline u8
185 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
186 				union mcp251xfd_write_reg_buf *write_reg_buf,
187 				const u16 reg, const u32 mask, const u32 val)
188 {
189 	u8 first_byte, last_byte, len;
190 	u8 *data;
191 	__le32 val_le32;
192 
193 	first_byte = mcp251xfd_first_byte_set(mask);
194 	last_byte = mcp251xfd_last_byte_set(mask);
195 	len = last_byte - first_byte + 1;
196 
197 	data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
198 	val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
199 	memcpy(data, &val_le32, len);
200 
201 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
202 		u16 crc;
203 
204 		mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
205 						     len);
206 		/* CRC */
207 		len += sizeof(write_reg_buf->crc.cmd);
208 		crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
209 		put_unaligned_be16(crc, (void *)write_reg_buf + len);
210 
211 		/* Total length */
212 		len += sizeof(write_reg_buf->crc.crc);
213 	} else {
214 		len += sizeof(write_reg_buf->nocrc.cmd);
215 	}
216 
217 	return len;
218 }
219 
220 static inline int
221 mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
222 				 u8 *tef_tail)
223 {
224 	u32 tef_ua;
225 	int err;
226 
227 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
228 	if (err)
229 		return err;
230 
231 	*tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
232 
233 	return 0;
234 }
235 
236 static inline int
237 mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
238 				u8 *tx_tail)
239 {
240 	u32 fifo_sta;
241 	int err;
242 
243 	err = regmap_read(priv->map_reg,
244 			  MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
245 			  &fifo_sta);
246 	if (err)
247 		return err;
248 
249 	*tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
250 
251 	return 0;
252 }
253 
254 static inline int
255 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
256 				const struct mcp251xfd_rx_ring *ring,
257 				u8 *rx_head)
258 {
259 	u32 fifo_sta;
260 	int err;
261 
262 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
263 			  &fifo_sta);
264 	if (err)
265 		return err;
266 
267 	*rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
268 
269 	return 0;
270 }
271 
272 static inline int
273 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
274 				const struct mcp251xfd_rx_ring *ring,
275 				u8 *rx_tail)
276 {
277 	u32 fifo_ua;
278 	int err;
279 
280 	err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
281 			  &fifo_ua);
282 	if (err)
283 		return err;
284 
285 	fifo_ua -= ring->base - MCP251XFD_RAM_START;
286 	*rx_tail = fifo_ua / ring->obj_size;
287 
288 	return 0;
289 }
290 
291 static void
292 mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
293 			      const struct mcp251xfd_tx_ring *ring,
294 			      struct mcp251xfd_tx_obj *tx_obj,
295 			      const u8 rts_buf_len,
296 			      const u8 n)
297 {
298 	struct spi_transfer *xfer;
299 	u16 addr;
300 
301 	/* FIFO load */
302 	addr = mcp251xfd_get_tx_obj_addr(ring, n);
303 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
304 		mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
305 						     addr);
306 	else
307 		mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
308 					      addr);
309 
310 	xfer = &tx_obj->xfer[0];
311 	xfer->tx_buf = &tx_obj->buf;
312 	xfer->len = 0;	/* actual len is assigned on the fly */
313 	xfer->cs_change = 1;
314 	xfer->cs_change_delay.value = 0;
315 	xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
316 
317 	/* FIFO request to send */
318 	xfer = &tx_obj->xfer[1];
319 	xfer->tx_buf = &ring->rts_buf;
320 	xfer->len = rts_buf_len;
321 
322 	/* SPI message */
323 	spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
324 					ARRAY_SIZE(tx_obj->xfer));
325 }
326 
327 static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
328 {
329 	struct mcp251xfd_tef_ring *tef_ring;
330 	struct mcp251xfd_tx_ring *tx_ring;
331 	struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
332 	struct mcp251xfd_tx_obj *tx_obj;
333 	u32 val;
334 	u16 addr;
335 	u8 len;
336 	int i, j;
337 
338 	/* TEF */
339 	tef_ring = priv->tef;
340 	tef_ring->head = 0;
341 	tef_ring->tail = 0;
342 
343 	/* FIFO increment TEF tail pointer */
344 	addr = MCP251XFD_REG_TEFCON;
345 	val = MCP251XFD_REG_TEFCON_UINC;
346 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
347 					      addr, val, val);
348 
349 	for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
350 		struct spi_transfer *xfer;
351 
352 		xfer = &tef_ring->uinc_xfer[j];
353 		xfer->tx_buf = &tef_ring->uinc_buf;
354 		xfer->len = len;
355 		xfer->cs_change = 1;
356 		xfer->cs_change_delay.value = 0;
357 		xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
358 	}
359 
360 	/* TX */
361 	tx_ring = priv->tx;
362 	tx_ring->head = 0;
363 	tx_ring->tail = 0;
364 	tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
365 
366 	/* FIFO request to send */
367 	addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
368 	val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
369 	len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
370 					      addr, val, val);
371 
372 	mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
373 		mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
374 
375 	/* RX */
376 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
377 		rx_ring->head = 0;
378 		rx_ring->tail = 0;
379 		rx_ring->nr = i;
380 		rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
381 
382 		if (!prev_rx_ring)
383 			rx_ring->base =
384 				mcp251xfd_get_tx_obj_addr(tx_ring,
385 							  tx_ring->obj_num);
386 		else
387 			rx_ring->base = prev_rx_ring->base +
388 				prev_rx_ring->obj_size *
389 				prev_rx_ring->obj_num;
390 
391 		prev_rx_ring = rx_ring;
392 
393 		/* FIFO increment RX tail pointer */
394 		addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
395 		val = MCP251XFD_REG_FIFOCON_UINC;
396 		len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
397 						      addr, val, val);
398 
399 		for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
400 			struct spi_transfer *xfer;
401 
402 			xfer = &rx_ring->uinc_xfer[j];
403 			xfer->tx_buf = &rx_ring->uinc_buf;
404 			xfer->len = len;
405 			xfer->cs_change = 1;
406 			xfer->cs_change_delay.value = 0;
407 			xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
408 		}
409 	}
410 }
411 
412 static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
413 {
414 	int i;
415 
416 	for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
417 		kfree(priv->rx[i]);
418 		priv->rx[i] = NULL;
419 	}
420 }
421 
422 static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
423 {
424 	struct mcp251xfd_tx_ring *tx_ring;
425 	struct mcp251xfd_rx_ring *rx_ring;
426 	int tef_obj_size, tx_obj_size, rx_obj_size;
427 	int tx_obj_num;
428 	int ram_free, i;
429 
430 	tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
431 	/* listen-only mode works like FD mode */
432 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
433 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
434 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
435 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
436 	} else {
437 		tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
438 		tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
439 		rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
440 	}
441 
442 	tx_ring = priv->tx;
443 	tx_ring->obj_num = tx_obj_num;
444 	tx_ring->obj_size = tx_obj_size;
445 
446 	ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
447 		(tef_obj_size + tx_obj_size);
448 
449 	for (i = 0;
450 	     i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
451 	     i++) {
452 		int rx_obj_num;
453 
454 		rx_obj_num = ram_free / rx_obj_size;
455 		rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
456 				 MCP251XFD_RX_OBJ_NUM_MAX);
457 
458 		rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
459 				  GFP_KERNEL);
460 		if (!rx_ring) {
461 			mcp251xfd_ring_free(priv);
462 			return -ENOMEM;
463 		}
464 		rx_ring->obj_num = rx_obj_num;
465 		rx_ring->obj_size = rx_obj_size;
466 		priv->rx[i] = rx_ring;
467 
468 		ram_free -= rx_ring->obj_num * rx_ring->obj_size;
469 	}
470 	priv->rx_ring_num = i;
471 
472 	netdev_dbg(priv->ndev,
473 		   "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
474 		   tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
475 		   tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
476 
477 	mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
478 		netdev_dbg(priv->ndev,
479 			   "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
480 			   i, rx_ring->obj_num, rx_ring->obj_size,
481 			   rx_ring->obj_size * rx_ring->obj_num);
482 	}
483 
484 	netdev_dbg(priv->ndev,
485 		   "FIFO setup: free: %d bytes\n",
486 		   ram_free);
487 
488 	return 0;
489 }
490 
491 static inline int
492 mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
493 {
494 	u32 val;
495 	int err;
496 
497 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
498 	if (err)
499 		return err;
500 
501 	*mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
502 
503 	return 0;
504 }
505 
506 static int
507 __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
508 			  const u8 mode_req, bool nowait)
509 {
510 	u32 con, con_reqop;
511 	int err;
512 
513 	con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
514 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
515 				 MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
516 	if (err)
517 		return err;
518 
519 	if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
520 		return 0;
521 
522 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
523 				       FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
524 						 con) == mode_req,
525 				       MCP251XFD_POLL_SLEEP_US,
526 				       MCP251XFD_POLL_TIMEOUT_US);
527 	if (err) {
528 		u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
529 
530 		netdev_err(priv->ndev,
531 			   "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
532 			   mcp251xfd_get_mode_str(mode_req), mode_req,
533 			   mcp251xfd_get_mode_str(mode), mode);
534 		return err;
535 	}
536 
537 	return 0;
538 }
539 
540 static inline int
541 mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
542 			const u8 mode_req)
543 {
544 	return __mcp251xfd_chip_set_mode(priv, mode_req, false);
545 }
546 
547 static inline int
548 mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
549 			       const u8 mode_req)
550 {
551 	return __mcp251xfd_chip_set_mode(priv, mode_req, true);
552 }
553 
554 static inline bool mcp251xfd_osc_invalid(u32 reg)
555 {
556 	return reg == 0x0 || reg == 0xffffffff;
557 }
558 
559 static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
560 {
561 	u32 osc, osc_reference, osc_mask;
562 	int err;
563 
564 	/* Set Power On Defaults for "Clock Output Divisor" and remove
565 	 * "Oscillator Disable" bit.
566 	 */
567 	osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
568 			 MCP251XFD_REG_OSC_CLKODIV_10);
569 	osc_reference = MCP251XFD_REG_OSC_OSCRDY;
570 	osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
571 
572 	/* Note:
573 	 *
574 	 * If the controller is in Sleep Mode the following write only
575 	 * removes the "Oscillator Disable" bit and powers it up. All
576 	 * other bits are unaffected.
577 	 */
578 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
579 	if (err)
580 		return err;
581 
582 	/* Wait for "Oscillator Ready" bit */
583 	err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
584 				       (osc & osc_mask) == osc_reference,
585 				       MCP251XFD_OSC_STAB_SLEEP_US,
586 				       MCP251XFD_OSC_STAB_TIMEOUT_US);
587 	if (mcp251xfd_osc_invalid(osc)) {
588 		netdev_err(priv->ndev,
589 			   "Failed to detect %s (osc=0x%08x).\n",
590 			   mcp251xfd_get_model_str(priv), osc);
591 		return -ENODEV;
592 	} else if (err == -ETIMEDOUT) {
593 		netdev_err(priv->ndev,
594 			   "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
595 			   osc, osc_reference);
596 		return -ETIMEDOUT;
597 	} else if (err) {
598 		return err;
599 	}
600 
601 	return 0;
602 }
603 
604 static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
605 {
606 	const __be16 cmd = mcp251xfd_cmd_reset();
607 	int err;
608 
609 	/* The Set Mode and SPI Reset command only seems to works if
610 	 * the controller is not in Sleep Mode.
611 	 */
612 	err = mcp251xfd_chip_clock_enable(priv);
613 	if (err)
614 		return err;
615 
616 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
617 	if (err)
618 		return err;
619 
620 	/* spi_write_then_read() works with non DMA-safe buffers */
621 	return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
622 }
623 
624 static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
625 {
626 	u32 osc, osc_reference;
627 	u8 mode;
628 	int err;
629 
630 	err = mcp251xfd_chip_get_mode(priv, &mode);
631 	if (err)
632 		return err;
633 
634 	if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
635 		netdev_info(priv->ndev,
636 			    "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
637 			    mcp251xfd_get_mode_str(mode), mode);
638 		return -ETIMEDOUT;
639 	}
640 
641 	osc_reference = MCP251XFD_REG_OSC_OSCRDY |
642 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
643 			   MCP251XFD_REG_OSC_CLKODIV_10);
644 
645 	/* check reset defaults of OSC reg */
646 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
647 	if (err)
648 		return err;
649 
650 	if (osc != osc_reference) {
651 		netdev_info(priv->ndev,
652 			    "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
653 			    osc, osc_reference);
654 		return -ETIMEDOUT;
655 	}
656 
657 	return 0;
658 }
659 
660 static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
661 {
662 	int err, i;
663 
664 	for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
665 		if (i)
666 			netdev_info(priv->ndev,
667 				    "Retrying to reset Controller.\n");
668 
669 		err = mcp251xfd_chip_softreset_do(priv);
670 		if (err == -ETIMEDOUT)
671 			continue;
672 		if (err)
673 			return err;
674 
675 		err = mcp251xfd_chip_softreset_check(priv);
676 		if (err == -ETIMEDOUT)
677 			continue;
678 		if (err)
679 			return err;
680 
681 		return 0;
682 	}
683 
684 	return err;
685 }
686 
687 static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
688 {
689 	u32 osc;
690 	int err;
691 
692 	/* Activate Low Power Mode on Oscillator Disable. This only
693 	 * works on the MCP2518FD. The MCP2517FD will go into normal
694 	 * Sleep Mode instead.
695 	 */
696 	osc = MCP251XFD_REG_OSC_LPMEN |
697 		FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
698 			   MCP251XFD_REG_OSC_CLKODIV_10);
699 	err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
700 	if (err)
701 		return err;
702 
703 	/* Set Time Base Counter Prescaler to 1.
704 	 *
705 	 * This means an overflow of the 32 bit Time Base Counter
706 	 * register at 40 MHz every 107 seconds.
707 	 */
708 	return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
709 			    MCP251XFD_REG_TSCON_TBCEN);
710 }
711 
712 static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
713 {
714 	const struct can_bittiming *bt = &priv->can.bittiming;
715 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
716 	u32 val = 0;
717 	s8 tdco;
718 	int err;
719 
720 	/* CAN Control Register
721 	 *
722 	 * - no transmit bandwidth sharing
723 	 * - config mode
724 	 * - disable transmit queue
725 	 * - store in transmit FIFO event
726 	 * - transition to restricted operation mode on system error
727 	 * - ESI is transmitted recessive when ESI of message is high or
728 	 *   CAN controller error passive
729 	 * - restricted retransmission attempts,
730 	 *   use TQXCON_TXAT and FIFOCON_TXAT
731 	 * - wake-up filter bits T11FILTER
732 	 * - use CAN bus line filter for wakeup
733 	 * - protocol exception is treated as a form error
734 	 * - Do not compare data bytes
735 	 */
736 	val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
737 			 MCP251XFD_REG_CON_MODE_CONFIG) |
738 		MCP251XFD_REG_CON_STEF |
739 		MCP251XFD_REG_CON_ESIGM |
740 		MCP251XFD_REG_CON_RTXAT |
741 		FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
742 			   MCP251XFD_REG_CON_WFT_T11FILTER) |
743 		MCP251XFD_REG_CON_WAKFIL |
744 		MCP251XFD_REG_CON_PXEDIS;
745 
746 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
747 		val |= MCP251XFD_REG_CON_ISOCRCEN;
748 
749 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
750 	if (err)
751 		return err;
752 
753 	/* Nominal Bit Time */
754 	val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
755 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
756 			   bt->prop_seg + bt->phase_seg1 - 1) |
757 		FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
758 			   bt->phase_seg2 - 1) |
759 		FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
760 
761 	err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
762 	if (err)
763 		return err;
764 
765 	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
766 		return 0;
767 
768 	/* Data Bit Time */
769 	val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
770 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
771 			   dbt->prop_seg + dbt->phase_seg1 - 1) |
772 		FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
773 			   dbt->phase_seg2 - 1) |
774 		FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
775 
776 	err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
777 	if (err)
778 		return err;
779 
780 	/* Transmitter Delay Compensation */
781 	tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
782 		       -64, 63);
783 	val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
784 			 MCP251XFD_REG_TDC_TDCMOD_AUTO) |
785 		FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
786 
787 	return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
788 }
789 
790 static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
791 {
792 	u32 val;
793 
794 	if (!priv->rx_int)
795 		return 0;
796 
797 	/* Configure GPIOs:
798 	 * - PIN0: GPIO Input
799 	 * - PIN1: GPIO Input/RX Interrupt
800 	 *
801 	 * PIN1 must be Input, otherwise there is a glitch on the
802 	 * rx-INT line. It happens between setting the PIN as output
803 	 * (in the first byte of the SPI transfer) and configuring the
804 	 * PIN as interrupt (in the last byte of the SPI transfer).
805 	 */
806 	val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
807 		MCP251XFD_REG_IOCON_TRIS0;
808 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
809 }
810 
811 static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
812 {
813 	u32 val;
814 
815 	if (!priv->rx_int)
816 		return 0;
817 
818 	/* Configure GPIOs:
819 	 * - PIN0: GPIO Input
820 	 * - PIN1: GPIO Input
821 	 */
822 	val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
823 		MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
824 	return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
825 }
826 
827 static int
828 mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
829 				const struct mcp251xfd_rx_ring *ring)
830 {
831 	u32 fifo_con;
832 
833 	/* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
834 	 *
835 	 * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
836 	 * generate a RXOVIF, use this to properly detect RX MAB
837 	 * overflows.
838 	 */
839 	fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
840 			      ring->obj_num - 1) |
841 		MCP251XFD_REG_FIFOCON_RXTSEN |
842 		MCP251XFD_REG_FIFOCON_RXOVIE |
843 		MCP251XFD_REG_FIFOCON_TFNRFNIE;
844 
845 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
846 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
847 				       MCP251XFD_REG_FIFOCON_PLSIZE_64);
848 	else
849 		fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
850 				       MCP251XFD_REG_FIFOCON_PLSIZE_8);
851 
852 	return regmap_write(priv->map_reg,
853 			    MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
854 }
855 
856 static int
857 mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
858 				  const struct mcp251xfd_rx_ring *ring)
859 {
860 	u32 fltcon;
861 
862 	fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
863 		MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
864 
865 	return regmap_update_bits(priv->map_reg,
866 				  MCP251XFD_REG_FLTCON(ring->nr >> 2),
867 				  MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
868 				  fltcon);
869 }
870 
871 static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
872 {
873 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
874 	const struct mcp251xfd_rx_ring *rx_ring;
875 	u32 val;
876 	int err, n;
877 
878 	/* TEF */
879 	val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
880 			 tx_ring->obj_num - 1) |
881 		MCP251XFD_REG_TEFCON_TEFTSEN |
882 		MCP251XFD_REG_TEFCON_TEFOVIE |
883 		MCP251XFD_REG_TEFCON_TEFNEIE;
884 
885 	err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
886 	if (err)
887 		return err;
888 
889 	/* FIFO 1 - TX */
890 	val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
891 			 tx_ring->obj_num - 1) |
892 		MCP251XFD_REG_FIFOCON_TXEN |
893 		MCP251XFD_REG_FIFOCON_TXATIE;
894 
895 	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
896 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
897 				  MCP251XFD_REG_FIFOCON_PLSIZE_64);
898 	else
899 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
900 				  MCP251XFD_REG_FIFOCON_PLSIZE_8);
901 
902 	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
903 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
904 				  MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
905 	else
906 		val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
907 				  MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
908 
909 	err = regmap_write(priv->map_reg,
910 			   MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
911 			   val);
912 	if (err)
913 		return err;
914 
915 	/* RX FIFOs */
916 	mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
917 		err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
918 		if (err)
919 			return err;
920 
921 		err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
922 		if (err)
923 			return err;
924 	}
925 
926 	return 0;
927 }
928 
929 static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
930 {
931 	struct mcp251xfd_ecc *ecc = &priv->ecc;
932 	void *ram;
933 	u32 val = 0;
934 	int err;
935 
936 	ecc->ecc_stat = 0;
937 
938 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
939 		val = MCP251XFD_REG_ECCCON_ECCEN;
940 
941 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
942 				 MCP251XFD_REG_ECCCON_ECCEN, val);
943 	if (err)
944 		return err;
945 
946 	ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
947 	if (!ram)
948 		return -ENOMEM;
949 
950 	err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
951 			       MCP251XFD_RAM_SIZE);
952 	kfree(ram);
953 
954 	return err;
955 }
956 
957 static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
958 {
959 	struct mcp251xfd_ecc *ecc = &priv->ecc;
960 
961 	ecc->ecc_stat = 0;
962 }
963 
964 static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
965 {
966 	u8 mode;
967 
968 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
969 		mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
970 	else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
971 		mode = MCP251XFD_REG_CON_MODE_MIXED;
972 	else
973 		mode = MCP251XFD_REG_CON_MODE_CAN2_0;
974 
975 	return mode;
976 }
977 
978 static int
979 __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
980 				 bool nowait)
981 {
982 	u8 mode;
983 
984 	mode = mcp251xfd_get_normal_mode(priv);
985 
986 	return __mcp251xfd_chip_set_mode(priv, mode, nowait);
987 }
988 
989 static inline int
990 mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
991 {
992 	return __mcp251xfd_chip_set_normal_mode(priv, false);
993 }
994 
995 static inline int
996 mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
997 {
998 	return __mcp251xfd_chip_set_normal_mode(priv, true);
999 }
1000 
1001 static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
1002 {
1003 	u32 val;
1004 	int err;
1005 
1006 	val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
1007 	err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
1008 	if (err)
1009 		return err;
1010 
1011 	val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1012 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
1013 	if (err)
1014 		return err;
1015 
1016 	val = MCP251XFD_REG_INT_CERRIE |
1017 		MCP251XFD_REG_INT_SERRIE |
1018 		MCP251XFD_REG_INT_RXOVIE |
1019 		MCP251XFD_REG_INT_TXATIE |
1020 		MCP251XFD_REG_INT_SPICRCIE |
1021 		MCP251XFD_REG_INT_ECCIE |
1022 		MCP251XFD_REG_INT_TEFIE |
1023 		MCP251XFD_REG_INT_MODIE |
1024 		MCP251XFD_REG_INT_RXIE;
1025 
1026 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1027 		val |= MCP251XFD_REG_INT_IVMIE;
1028 
1029 	return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
1030 }
1031 
1032 static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
1033 {
1034 	int err;
1035 	u32 mask;
1036 
1037 	err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
1038 	if (err)
1039 		return err;
1040 
1041 	mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
1042 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
1043 				 mask, 0x0);
1044 	if (err)
1045 		return err;
1046 
1047 	return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
1048 }
1049 
1050 static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
1051 			       const enum can_state state)
1052 {
1053 	priv->can.state = state;
1054 
1055 	mcp251xfd_chip_interrupts_disable(priv);
1056 	mcp251xfd_chip_rx_int_disable(priv);
1057 	return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
1058 }
1059 
1060 static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
1061 {
1062 	int err;
1063 
1064 	err = mcp251xfd_chip_softreset(priv);
1065 	if (err)
1066 		goto out_chip_stop;
1067 
1068 	err = mcp251xfd_chip_clock_init(priv);
1069 	if (err)
1070 		goto out_chip_stop;
1071 
1072 	err = mcp251xfd_set_bittiming(priv);
1073 	if (err)
1074 		goto out_chip_stop;
1075 
1076 	err = mcp251xfd_chip_rx_int_enable(priv);
1077 	if (err)
1078 		return err;
1079 
1080 	err = mcp251xfd_chip_ecc_init(priv);
1081 	if (err)
1082 		goto out_chip_stop;
1083 
1084 	mcp251xfd_ring_init(priv);
1085 
1086 	err = mcp251xfd_chip_fifo_init(priv);
1087 	if (err)
1088 		goto out_chip_stop;
1089 
1090 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1091 
1092 	err = mcp251xfd_chip_set_normal_mode(priv);
1093 	if (err)
1094 		goto out_chip_stop;
1095 
1096 	return 0;
1097 
1098  out_chip_stop:
1099 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1100 
1101 	return err;
1102 }
1103 
1104 static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
1105 {
1106 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
1107 	int err;
1108 
1109 	switch (mode) {
1110 	case CAN_MODE_START:
1111 		err = mcp251xfd_chip_start(priv);
1112 		if (err)
1113 			return err;
1114 
1115 		err = mcp251xfd_chip_interrupts_enable(priv);
1116 		if (err) {
1117 			mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
1118 			return err;
1119 		}
1120 
1121 		netif_wake_queue(ndev);
1122 		break;
1123 
1124 	default:
1125 		return -EOPNOTSUPP;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
1132 					struct can_berr_counter *bec)
1133 {
1134 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1135 	u32 trec;
1136 	int err;
1137 
1138 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1139 	if (err)
1140 		return err;
1141 
1142 	if (trec & MCP251XFD_REG_TREC_TXBO)
1143 		bec->txerr = 256;
1144 	else
1145 		bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
1146 	bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
1147 
1148 	return 0;
1149 }
1150 
1151 static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
1152 				      struct can_berr_counter *bec)
1153 {
1154 	const struct mcp251xfd_priv *priv = netdev_priv(ndev);
1155 
1156 	/* Avoid waking up the controller if the interface is down */
1157 	if (!(ndev->flags & IFF_UP))
1158 		return 0;
1159 
1160 	/* The controller is powered down during Bus Off, use saved
1161 	 * bec values.
1162 	 */
1163 	if (priv->can.state == CAN_STATE_BUS_OFF) {
1164 		*bec = priv->bec;
1165 		return 0;
1166 	}
1167 
1168 	return __mcp251xfd_get_berr_counter(ndev, bec);
1169 }
1170 
1171 static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
1172 {
1173 	u8 tef_tail_chip, tef_tail;
1174 	int err;
1175 
1176 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1177 		return 0;
1178 
1179 	err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
1180 	if (err)
1181 		return err;
1182 
1183 	tef_tail = mcp251xfd_get_tef_tail(priv);
1184 	if (tef_tail_chip != tef_tail) {
1185 		netdev_err(priv->ndev,
1186 			   "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
1187 			   tef_tail_chip, tef_tail);
1188 		return -EILSEQ;
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 static int
1195 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
1196 			const struct mcp251xfd_rx_ring *ring)
1197 {
1198 	u8 rx_tail_chip, rx_tail;
1199 	int err;
1200 
1201 	if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
1202 		return 0;
1203 
1204 	err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
1205 	if (err)
1206 		return err;
1207 
1208 	rx_tail = mcp251xfd_get_rx_tail(ring);
1209 	if (rx_tail_chip != rx_tail) {
1210 		netdev_err(priv->ndev,
1211 			   "RX tail of chip (%d) and ours (%d) inconsistent.\n",
1212 			   rx_tail_chip, rx_tail);
1213 		return -EILSEQ;
1214 	}
1215 
1216 	return 0;
1217 }
1218 
1219 static int
1220 mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
1221 {
1222 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1223 	u32 tef_sta;
1224 	int err;
1225 
1226 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
1227 	if (err)
1228 		return err;
1229 
1230 	if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
1231 		netdev_err(priv->ndev,
1232 			   "Transmit Event FIFO buffer overflow.\n");
1233 		return -ENOBUFS;
1234 	}
1235 
1236 	netdev_info(priv->ndev,
1237 		    "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
1238 		    tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
1239 		    "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
1240 		    "not empty" : "empty",
1241 		    seq, priv->tef->tail, priv->tef->head, tx_ring->head);
1242 
1243 	/* The Sequence Number in the TEF doesn't match our tef_tail. */
1244 	return -EAGAIN;
1245 }
1246 
1247 static int
1248 mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
1249 			   const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
1250 {
1251 	struct net_device_stats *stats = &priv->ndev->stats;
1252 	u32 seq, seq_masked, tef_tail_masked;
1253 
1254 	seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
1255 			hw_tef_obj->flags);
1256 
1257 	/* Use the MCP2517FD mask on the MCP2518FD, too. We only
1258 	 * compare 7 bits, this should be enough to detect
1259 	 * net-yet-completed, i.e. old TEF objects.
1260 	 */
1261 	seq_masked = seq &
1262 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1263 	tef_tail_masked = priv->tef->tail &
1264 		field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
1265 	if (seq_masked != tef_tail_masked)
1266 		return mcp251xfd_handle_tefif_recover(priv, seq);
1267 
1268 	stats->tx_bytes +=
1269 		can_rx_offload_get_echo_skb(&priv->offload,
1270 					    mcp251xfd_get_tef_tail(priv),
1271 					    hw_tef_obj->ts);
1272 	stats->tx_packets++;
1273 	priv->tef->tail++;
1274 
1275 	return 0;
1276 }
1277 
1278 static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
1279 {
1280 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1281 	unsigned int new_head;
1282 	u8 chip_tx_tail;
1283 	int err;
1284 
1285 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1286 	if (err)
1287 		return err;
1288 
1289 	/* chip_tx_tail, is the next TX-Object send by the HW.
1290 	 * The new TEF head must be >= the old head, ...
1291 	 */
1292 	new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
1293 	if (new_head <= priv->tef->head)
1294 		new_head += tx_ring->obj_num;
1295 
1296 	/* ... but it cannot exceed the TX head. */
1297 	priv->tef->head = min(new_head, tx_ring->head);
1298 
1299 	return mcp251xfd_check_tef_tail(priv);
1300 }
1301 
1302 static inline int
1303 mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
1304 		       struct mcp251xfd_hw_tef_obj *hw_tef_obj,
1305 		       const u8 offset, const u8 len)
1306 {
1307 	const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1308 
1309 	if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
1310 	    (offset > tx_ring->obj_num ||
1311 	     len > tx_ring->obj_num ||
1312 	     offset + len > tx_ring->obj_num)) {
1313 		netdev_err(priv->ndev,
1314 			   "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
1315 			   tx_ring->obj_num, offset, len);
1316 		return -ERANGE;
1317 	}
1318 
1319 	return regmap_bulk_read(priv->map_rx,
1320 				mcp251xfd_get_tef_obj_addr(offset),
1321 				hw_tef_obj,
1322 				sizeof(*hw_tef_obj) / sizeof(u32) * len);
1323 }
1324 
1325 static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
1326 {
1327 	struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
1328 	u8 tef_tail, len, l;
1329 	int err, i;
1330 
1331 	err = mcp251xfd_tef_ring_update(priv);
1332 	if (err)
1333 		return err;
1334 
1335 	tef_tail = mcp251xfd_get_tef_tail(priv);
1336 	len = mcp251xfd_get_tef_len(priv);
1337 	l = mcp251xfd_get_tef_linear_len(priv);
1338 	err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
1339 	if (err)
1340 		return err;
1341 
1342 	if (l < len) {
1343 		err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
1344 		if (err)
1345 			return err;
1346 	}
1347 
1348 	for (i = 0; i < len; i++) {
1349 		err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
1350 		/* -EAGAIN means the Sequence Number in the TEF
1351 		 * doesn't match our tef_tail. This can happen if we
1352 		 * read the TEF objects too early. Leave loop let the
1353 		 * interrupt handler call us again.
1354 		 */
1355 		if (err == -EAGAIN)
1356 			goto out_netif_wake_queue;
1357 		if (err)
1358 			return err;
1359 	}
1360 
1361  out_netif_wake_queue:
1362 	len = i;	/* number of handled goods TEFs */
1363 	if (len) {
1364 		struct mcp251xfd_tef_ring *ring = priv->tef;
1365 		struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1366 		struct spi_transfer *last_xfer;
1367 
1368 		tx_ring->tail += len;
1369 
1370 		/* Increment the TEF FIFO tail pointer 'len' times in
1371 		 * a single SPI message.
1372 		 */
1373 
1374 		/* Note:
1375 		 *
1376 		 * "cs_change == 1" on the last transfer results in an
1377 		 * active chip select after the complete SPI
1378 		 * message. This causes the controller to interpret
1379 		 * the next register access as data. Temporary set
1380 		 * "cs_change" of the last transfer to "0" to properly
1381 		 * deactivate the chip select at the end of the
1382 		 * message.
1383 		 */
1384 		last_xfer = &ring->uinc_xfer[len - 1];
1385 		last_xfer->cs_change = 0;
1386 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1387 		last_xfer->cs_change = 1;
1388 		if (err)
1389 			return err;
1390 
1391 		err = mcp251xfd_check_tef_tail(priv);
1392 		if (err)
1393 			return err;
1394 	}
1395 
1396 	mcp251xfd_ecc_tefif_successful(priv);
1397 
1398 	if (mcp251xfd_get_tx_free(priv->tx)) {
1399 		/* Make sure that anybody stopping the queue after
1400 		 * this sees the new tx_ring->tail.
1401 		 */
1402 		smp_mb();
1403 		netif_wake_queue(priv->ndev);
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static int
1410 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
1411 			 struct mcp251xfd_rx_ring *ring)
1412 {
1413 	u32 new_head;
1414 	u8 chip_rx_head;
1415 	int err;
1416 
1417 	err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
1418 	if (err)
1419 		return err;
1420 
1421 	/* chip_rx_head, is the next RX-Object filled by the HW.
1422 	 * The new RX head must be >= the old head.
1423 	 */
1424 	new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
1425 	if (new_head <= ring->head)
1426 		new_head += ring->obj_num;
1427 
1428 	ring->head = new_head;
1429 
1430 	return mcp251xfd_check_rx_tail(priv, ring);
1431 }
1432 
1433 static void
1434 mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
1435 			   const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1436 			   struct sk_buff *skb)
1437 {
1438 	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
1439 
1440 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
1441 		u32 sid, eid;
1442 
1443 		eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
1444 		sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
1445 
1446 		cfd->can_id = CAN_EFF_FLAG |
1447 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
1448 			FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
1449 	} else {
1450 		cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
1451 					hw_rx_obj->id);
1452 	}
1453 
1454 	/* CANFD */
1455 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
1456 		u8 dlc;
1457 
1458 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
1459 			cfd->flags |= CANFD_ESI;
1460 
1461 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
1462 			cfd->flags |= CANFD_BRS;
1463 
1464 		dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
1465 		cfd->len = can_fd_dlc2len(dlc);
1466 	} else {
1467 		if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
1468 			cfd->can_id |= CAN_RTR_FLAG;
1469 
1470 		cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
1471 						 hw_rx_obj->flags));
1472 	}
1473 
1474 	memcpy(cfd->data, hw_rx_obj->data, cfd->len);
1475 }
1476 
1477 static int
1478 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
1479 			  struct mcp251xfd_rx_ring *ring,
1480 			  const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
1481 {
1482 	struct net_device_stats *stats = &priv->ndev->stats;
1483 	struct sk_buff *skb;
1484 	struct canfd_frame *cfd;
1485 	int err;
1486 
1487 	if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
1488 		skb = alloc_canfd_skb(priv->ndev, &cfd);
1489 	else
1490 		skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
1491 
1492 	if (!cfd) {
1493 		stats->rx_dropped++;
1494 		return 0;
1495 	}
1496 
1497 	mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
1498 	err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
1499 	if (err)
1500 		stats->rx_fifo_errors++;
1501 
1502 	return 0;
1503 }
1504 
1505 static inline int
1506 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
1507 		      const struct mcp251xfd_rx_ring *ring,
1508 		      struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
1509 		      const u8 offset, const u8 len)
1510 {
1511 	int err;
1512 
1513 	err = regmap_bulk_read(priv->map_rx,
1514 			       mcp251xfd_get_rx_obj_addr(ring, offset),
1515 			       hw_rx_obj,
1516 			       len * ring->obj_size / sizeof(u32));
1517 
1518 	return err;
1519 }
1520 
1521 static int
1522 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
1523 			   struct mcp251xfd_rx_ring *ring)
1524 {
1525 	struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
1526 	u8 rx_tail, len;
1527 	int err, i;
1528 
1529 	err = mcp251xfd_rx_ring_update(priv, ring);
1530 	if (err)
1531 		return err;
1532 
1533 	while ((len = mcp251xfd_get_rx_linear_len(ring))) {
1534 		struct spi_transfer *last_xfer;
1535 
1536 		rx_tail = mcp251xfd_get_rx_tail(ring);
1537 
1538 		err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
1539 					    rx_tail, len);
1540 		if (err)
1541 			return err;
1542 
1543 		for (i = 0; i < len; i++) {
1544 			err = mcp251xfd_handle_rxif_one(priv, ring,
1545 							(void *)hw_rx_obj +
1546 							i * ring->obj_size);
1547 			if (err)
1548 				return err;
1549 		}
1550 
1551 		/* Increment the RX FIFO tail pointer 'len' times in a
1552 		 * single SPI message.
1553 		 */
1554 		ring->tail += len;
1555 
1556 		/* Note:
1557 		 *
1558 		 * "cs_change == 1" on the last transfer results in an
1559 		 * active chip select after the complete SPI
1560 		 * message. This causes the controller to interpret
1561 		 * the next register access as data. Temporary set
1562 		 * "cs_change" of the last transfer to "0" to properly
1563 		 * deactivate the chip select at the end of the
1564 		 * message.
1565 		 */
1566 		last_xfer = &ring->uinc_xfer[len - 1];
1567 		last_xfer->cs_change = 0;
1568 		err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
1569 		last_xfer->cs_change = 1;
1570 		if (err)
1571 			return err;
1572 	}
1573 
1574 	return 0;
1575 }
1576 
1577 static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
1578 {
1579 	struct mcp251xfd_rx_ring *ring;
1580 	int err, n;
1581 
1582 	mcp251xfd_for_each_rx_ring(priv, ring, n) {
1583 		err = mcp251xfd_handle_rxif_ring(priv, ring);
1584 		if (err)
1585 			return err;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
1592 					  u32 *timestamp)
1593 {
1594 	return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
1595 }
1596 
1597 static struct sk_buff *
1598 mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
1599 			    struct can_frame **cf, u32 *timestamp)
1600 {
1601 	int err;
1602 
1603 	err = mcp251xfd_get_timestamp(priv, timestamp);
1604 	if (err)
1605 		return NULL;
1606 
1607 	return alloc_can_err_skb(priv->ndev, cf);
1608 }
1609 
1610 static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
1611 {
1612 	struct net_device_stats *stats = &priv->ndev->stats;
1613 	struct mcp251xfd_rx_ring *ring;
1614 	struct sk_buff *skb;
1615 	struct can_frame *cf;
1616 	u32 timestamp, rxovif;
1617 	int err, i;
1618 
1619 	stats->rx_over_errors++;
1620 	stats->rx_errors++;
1621 
1622 	err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
1623 	if (err)
1624 		return err;
1625 
1626 	mcp251xfd_for_each_rx_ring(priv, ring, i) {
1627 		if (!(rxovif & BIT(ring->fifo_nr)))
1628 			continue;
1629 
1630 		/* If SERRIF is active, there was a RX MAB overflow. */
1631 		if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
1632 			netdev_info(priv->ndev,
1633 				    "RX-%d: MAB overflow detected.\n",
1634 				    ring->nr);
1635 		} else {
1636 			netdev_info(priv->ndev,
1637 				    "RX-%d: FIFO overflow.\n", ring->nr);
1638 		}
1639 
1640 		err = regmap_update_bits(priv->map_reg,
1641 					 MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
1642 					 MCP251XFD_REG_FIFOSTA_RXOVIF,
1643 					 0x0);
1644 		if (err)
1645 			return err;
1646 	}
1647 
1648 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1649 	if (!skb)
1650 		return 0;
1651 
1652 	cf->can_id |= CAN_ERR_CRTL;
1653 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
1654 
1655 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1656 	if (err)
1657 		stats->rx_fifo_errors++;
1658 
1659 	return 0;
1660 }
1661 
1662 static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
1663 {
1664 	netdev_info(priv->ndev, "%s\n", __func__);
1665 
1666 	return 0;
1667 }
1668 
1669 static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
1670 {
1671 	struct net_device_stats *stats = &priv->ndev->stats;
1672 	u32 bdiag1, timestamp;
1673 	struct sk_buff *skb;
1674 	struct can_frame *cf = NULL;
1675 	int err;
1676 
1677 	err = mcp251xfd_get_timestamp(priv, &timestamp);
1678 	if (err)
1679 		return err;
1680 
1681 	err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
1682 	if (err)
1683 		return err;
1684 
1685 	/* Write 0s to clear error bits, don't write 1s to non active
1686 	 * bits, as they will be set.
1687 	 */
1688 	err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
1689 	if (err)
1690 		return err;
1691 
1692 	priv->can.can_stats.bus_error++;
1693 
1694 	skb = alloc_can_err_skb(priv->ndev, &cf);
1695 	if (cf)
1696 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1697 
1698 	/* Controller misconfiguration */
1699 	if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
1700 		netdev_err(priv->ndev,
1701 			   "recv'd DLC is larger than PLSIZE of FIFO element.");
1702 
1703 	/* RX errors */
1704 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
1705 		      MCP251XFD_REG_BDIAG1_NCRCERR)) {
1706 		netdev_dbg(priv->ndev, "CRC error\n");
1707 
1708 		stats->rx_errors++;
1709 		if (cf)
1710 			cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
1711 	}
1712 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
1713 		      MCP251XFD_REG_BDIAG1_NSTUFERR)) {
1714 		netdev_dbg(priv->ndev, "Stuff error\n");
1715 
1716 		stats->rx_errors++;
1717 		if (cf)
1718 			cf->data[2] |= CAN_ERR_PROT_STUFF;
1719 	}
1720 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
1721 		      MCP251XFD_REG_BDIAG1_NFORMERR)) {
1722 		netdev_dbg(priv->ndev, "Format error\n");
1723 
1724 		stats->rx_errors++;
1725 		if (cf)
1726 			cf->data[2] |= CAN_ERR_PROT_FORM;
1727 	}
1728 
1729 	/* TX errors */
1730 	if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
1731 		netdev_dbg(priv->ndev, "NACK error\n");
1732 
1733 		stats->tx_errors++;
1734 		if (cf) {
1735 			cf->can_id |= CAN_ERR_ACK;
1736 			cf->data[2] |= CAN_ERR_PROT_TX;
1737 		}
1738 	}
1739 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
1740 		      MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
1741 		netdev_dbg(priv->ndev, "Bit1 error\n");
1742 
1743 		stats->tx_errors++;
1744 		if (cf)
1745 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
1746 	}
1747 	if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
1748 		      MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
1749 		netdev_dbg(priv->ndev, "Bit0 error\n");
1750 
1751 		stats->tx_errors++;
1752 		if (cf)
1753 			cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
1754 	}
1755 
1756 	if (!cf)
1757 		return 0;
1758 
1759 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1760 	if (err)
1761 		stats->rx_fifo_errors++;
1762 
1763 	return 0;
1764 }
1765 
1766 static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
1767 {
1768 	struct net_device_stats *stats = &priv->ndev->stats;
1769 	struct sk_buff *skb;
1770 	struct can_frame *cf = NULL;
1771 	enum can_state new_state, rx_state, tx_state;
1772 	u32 trec, timestamp;
1773 	int err;
1774 
1775 	err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
1776 	if (err)
1777 		return err;
1778 
1779 	if (trec & MCP251XFD_REG_TREC_TXBO)
1780 		tx_state = CAN_STATE_BUS_OFF;
1781 	else if (trec & MCP251XFD_REG_TREC_TXBP)
1782 		tx_state = CAN_STATE_ERROR_PASSIVE;
1783 	else if (trec & MCP251XFD_REG_TREC_TXWARN)
1784 		tx_state = CAN_STATE_ERROR_WARNING;
1785 	else
1786 		tx_state = CAN_STATE_ERROR_ACTIVE;
1787 
1788 	if (trec & MCP251XFD_REG_TREC_RXBP)
1789 		rx_state = CAN_STATE_ERROR_PASSIVE;
1790 	else if (trec & MCP251XFD_REG_TREC_RXWARN)
1791 		rx_state = CAN_STATE_ERROR_WARNING;
1792 	else
1793 		rx_state = CAN_STATE_ERROR_ACTIVE;
1794 
1795 	new_state = max(tx_state, rx_state);
1796 	if (new_state == priv->can.state)
1797 		return 0;
1798 
1799 	/* The skb allocation might fail, but can_change_state()
1800 	 * handles cf == NULL.
1801 	 */
1802 	skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
1803 	can_change_state(priv->ndev, cf, tx_state, rx_state);
1804 
1805 	if (new_state == CAN_STATE_BUS_OFF) {
1806 		/* As we're going to switch off the chip now, let's
1807 		 * save the error counters and return them to
1808 		 * userspace, if do_get_berr_counter() is called while
1809 		 * the chip is in Bus Off.
1810 		 */
1811 		err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
1812 		if (err)
1813 			return err;
1814 
1815 		mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
1816 		can_bus_off(priv->ndev);
1817 	}
1818 
1819 	if (!skb)
1820 		return 0;
1821 
1822 	if (new_state != CAN_STATE_BUS_OFF) {
1823 		struct can_berr_counter bec;
1824 
1825 		err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
1826 		if (err)
1827 			return err;
1828 		cf->data[6] = bec.txerr;
1829 		cf->data[7] = bec.rxerr;
1830 	}
1831 
1832 	err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
1833 	if (err)
1834 		stats->rx_fifo_errors++;
1835 
1836 	return 0;
1837 }
1838 
1839 static int
1840 mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
1841 {
1842 	const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
1843 	u8 mode;
1844 	int err;
1845 
1846 	err = mcp251xfd_chip_get_mode(priv, &mode);
1847 	if (err)
1848 		return err;
1849 
1850 	if (mode == mode_reference) {
1851 		netdev_dbg(priv->ndev,
1852 			   "Controller changed into %s Mode (%u).\n",
1853 			   mcp251xfd_get_mode_str(mode), mode);
1854 		return 0;
1855 	}
1856 
1857 	/* According to MCP2517FD errata DS80000792B 1., during a TX
1858 	 * MAB underflow, the controller will transition to Restricted
1859 	 * Operation Mode or Listen Only Mode (depending on SERR2LOM).
1860 	 *
1861 	 * However this is not always the case. If SERR2LOM is
1862 	 * configured for Restricted Operation Mode (SERR2LOM not set)
1863 	 * the MCP2517FD will sometimes transition to Listen Only Mode
1864 	 * first. When polling this bit we see that it will transition
1865 	 * to Restricted Operation Mode shortly after.
1866 	 */
1867 	if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
1868 	    (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
1869 	     mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
1870 		netdev_dbg(priv->ndev,
1871 			   "Controller changed into %s Mode (%u).\n",
1872 			   mcp251xfd_get_mode_str(mode), mode);
1873 	else
1874 		netdev_err(priv->ndev,
1875 			   "Controller changed into %s Mode (%u).\n",
1876 			   mcp251xfd_get_mode_str(mode), mode);
1877 
1878 	/* After the application requests Normal mode, the Controller
1879 	 * will automatically attempt to retransmit the message that
1880 	 * caused the TX MAB underflow.
1881 	 *
1882 	 * However, if there is an ECC error in the TX-RAM, we first
1883 	 * have to reload the tx-object before requesting Normal
1884 	 * mode. This is done later in mcp251xfd_handle_eccif().
1885 	 */
1886 	if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
1887 		*set_normal_mode = true;
1888 		return 0;
1889 	}
1890 
1891 	return mcp251xfd_chip_set_normal_mode_nowait(priv);
1892 }
1893 
1894 static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
1895 {
1896 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1897 	struct net_device_stats *stats = &priv->ndev->stats;
1898 	bool handled = false;
1899 
1900 	/* TX MAB underflow
1901 	 *
1902 	 * According to MCP2517FD Errata DS80000792B 1. a TX MAB
1903 	 * underflow is indicated by SERRIF and MODIF.
1904 	 *
1905 	 * In addition to the effects mentioned in the Errata, there
1906 	 * are Bus Errors due to the aborted CAN frame, so a IVMIF
1907 	 * will be seen as well.
1908 	 *
1909 	 * Sometimes there is an ECC error in the TX-RAM, which leads
1910 	 * to a TX MAB underflow.
1911 	 *
1912 	 * However, probably due to a race condition, there is no
1913 	 * associated MODIF pending.
1914 	 *
1915 	 * Further, there are situations, where the SERRIF is caused
1916 	 * by an ECC error in the TX-RAM, but not even the ECCIF is
1917 	 * set. This only seems to happen _after_ the first occurrence
1918 	 * of a ECCIF (which is tracked in ecc->cnt).
1919 	 *
1920 	 * Treat all as a known system errors..
1921 	 */
1922 	if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
1923 	     priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
1924 	    priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1925 	    ecc->cnt) {
1926 		const char *msg;
1927 
1928 		if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
1929 		    ecc->cnt)
1930 			msg = "TX MAB underflow due to ECC error detected.";
1931 		else
1932 			msg = "TX MAB underflow detected.";
1933 
1934 		if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
1935 			netdev_dbg(priv->ndev, "%s\n", msg);
1936 		else
1937 			netdev_info(priv->ndev, "%s\n", msg);
1938 
1939 		stats->tx_aborted_errors++;
1940 		stats->tx_errors++;
1941 		handled = true;
1942 	}
1943 
1944 	/* RX MAB overflow
1945 	 *
1946 	 * According to MCP2517FD Errata DS80000792B 1. a RX MAB
1947 	 * overflow is indicated by SERRIF.
1948 	 *
1949 	 * In addition to the effects mentioned in the Errata, (most
1950 	 * of the times) a RXOVIF is raised, if the FIFO that is being
1951 	 * received into has the RXOVIE activated (and we have enabled
1952 	 * RXOVIE on all FIFOs).
1953 	 *
1954 	 * Sometimes there is no RXOVIF just a RXIF is pending.
1955 	 *
1956 	 * Treat all as a known system errors..
1957 	 */
1958 	if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
1959 	    priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
1960 		stats->rx_dropped++;
1961 		handled = true;
1962 	}
1963 
1964 	if (!handled)
1965 		netdev_err(priv->ndev,
1966 			   "Unhandled System Error Interrupt (intf=0x%08x)!\n",
1967 			   priv->regs_status.intf);
1968 
1969 	return 0;
1970 }
1971 
1972 static int
1973 mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
1974 {
1975 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
1976 	struct mcp251xfd_ecc *ecc = &priv->ecc;
1977 	struct mcp251xfd_tx_obj *tx_obj;
1978 	u8 chip_tx_tail, tx_tail, offset;
1979 	u16 addr;
1980 	int err;
1981 
1982 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
1983 
1984 	err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
1985 	if (err)
1986 		return err;
1987 
1988 	tx_tail = mcp251xfd_get_tx_tail(tx_ring);
1989 	offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
1990 
1991 	/* Bail out if one of the following is met:
1992 	 * - tx_tail information is inconsistent
1993 	 * - for mcp2517fd: offset not 0
1994 	 * - for mcp2518fd: offset not 0 or 1
1995 	 */
1996 	if (chip_tx_tail != tx_tail ||
1997 	    !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
1998 		netdev_err(priv->ndev,
1999 			   "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
2000 			   addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
2001 			   offset);
2002 		return -EINVAL;
2003 	}
2004 
2005 	netdev_info(priv->ndev,
2006 		    "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
2007 		    ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
2008 		    "Single" : "Double",
2009 		    addr, nr, tx_ring->tail, tx_tail, offset);
2010 
2011 	/* reload tx_obj into controller RAM ... */
2012 	tx_obj = &tx_ring->obj[nr];
2013 	err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
2014 	if (err)
2015 		return err;
2016 
2017 	/* ... and trigger retransmit */
2018 	return mcp251xfd_chip_set_normal_mode(priv);
2019 }
2020 
2021 static int
2022 mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
2023 {
2024 	struct mcp251xfd_ecc *ecc = &priv->ecc;
2025 	const char *msg;
2026 	bool in_tx_ram;
2027 	u32 ecc_stat;
2028 	u16 addr;
2029 	u8 nr;
2030 	int err;
2031 
2032 	err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
2033 	if (err)
2034 		return err;
2035 
2036 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
2037 				 MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
2038 	if (err)
2039 		return err;
2040 
2041 	/* Check if ECC error occurred in TX-RAM */
2042 	addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
2043 	err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
2044 	if (!err)
2045 		in_tx_ram = true;
2046 	else if (err == -ENOENT)
2047 		in_tx_ram = false;
2048 	else
2049 		return err;
2050 
2051 	/* Errata Reference:
2052 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
2053 	 *
2054 	 * ECC single error correction does not work in all cases:
2055 	 *
2056 	 * Fix/Work Around:
2057 	 * Enable single error correction and double error detection
2058 	 * interrupts by setting SECIE and DEDIE. Handle SECIF as a
2059 	 * detection interrupt and do not rely on the error
2060 	 * correction. Instead, handle both interrupts as a
2061 	 * notification that the RAM word at ERRADDR was corrupted.
2062 	 */
2063 	if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
2064 		msg = "Single ECC Error detected at address";
2065 	else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
2066 		msg = "Double ECC Error detected at address";
2067 	else
2068 		return -EINVAL;
2069 
2070 	if (!in_tx_ram) {
2071 		ecc->ecc_stat = 0;
2072 
2073 		netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
2074 	} else {
2075 		/* Re-occurring error? */
2076 		if (ecc->ecc_stat == ecc_stat) {
2077 			ecc->cnt++;
2078 		} else {
2079 			ecc->ecc_stat = ecc_stat;
2080 			ecc->cnt = 1;
2081 		}
2082 
2083 		netdev_info(priv->ndev,
2084 			    "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
2085 			    msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
2086 
2087 		if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
2088 			return mcp251xfd_handle_eccif_recover(priv, nr);
2089 	}
2090 
2091 	if (set_normal_mode)
2092 		return mcp251xfd_chip_set_normal_mode_nowait(priv);
2093 
2094 	return 0;
2095 }
2096 
2097 static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
2098 {
2099 	int err;
2100 	u32 crc;
2101 
2102 	err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
2103 	if (err)
2104 		return err;
2105 
2106 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
2107 				 MCP251XFD_REG_CRC_IF_MASK,
2108 				 ~crc);
2109 	if (err)
2110 		return err;
2111 
2112 	if (crc & MCP251XFD_REG_CRC_FERRIF)
2113 		netdev_notice(priv->ndev, "CRC write command format error.\n");
2114 	else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
2115 		netdev_notice(priv->ndev,
2116 			      "CRC write error detected. CRC=0x%04lx.\n",
2117 			      FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
2118 
2119 	return 0;
2120 }
2121 
2122 #define mcp251xfd_handle(priv, irq, ...) \
2123 ({ \
2124 	struct mcp251xfd_priv *_priv = (priv); \
2125 	int err; \
2126 \
2127 	err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
2128 	if (err) \
2129 		netdev_err(_priv->ndev, \
2130 			"IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
2131 			__stringify(irq), err); \
2132 	err; \
2133 })
2134 
2135 static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
2136 {
2137 	struct mcp251xfd_priv *priv = dev_id;
2138 	irqreturn_t handled = IRQ_NONE;
2139 	int err;
2140 
2141 	if (priv->rx_int)
2142 		do {
2143 			int rx_pending;
2144 
2145 			rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2146 			if (!rx_pending)
2147 				break;
2148 
2149 			err = mcp251xfd_handle(priv, rxif);
2150 			if (err)
2151 				goto out_fail;
2152 
2153 			handled = IRQ_HANDLED;
2154 		} while (1);
2155 
2156 	do {
2157 		u32 intf_pending, intf_pending_clearable;
2158 		bool set_normal_mode = false;
2159 
2160 		err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
2161 				       &priv->regs_status,
2162 				       sizeof(priv->regs_status) /
2163 				       sizeof(u32));
2164 		if (err)
2165 			goto out_fail;
2166 
2167 		intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
2168 					 priv->regs_status.intf) &
2169 			FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
2170 				  priv->regs_status.intf);
2171 
2172 		if (!(intf_pending))
2173 			return handled;
2174 
2175 		/* Some interrupts must be ACKed in the
2176 		 * MCP251XFD_REG_INT register.
2177 		 * - First ACK then handle, to avoid lost-IRQ race
2178 		 *   condition on fast re-occurring interrupts.
2179 		 * - Write "0" to clear active IRQs, "1" to all other,
2180 		 *   to avoid r/m/w race condition on the
2181 		 *   MCP251XFD_REG_INT register.
2182 		 */
2183 		intf_pending_clearable = intf_pending &
2184 			MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
2185 		if (intf_pending_clearable) {
2186 			err = regmap_update_bits(priv->map_reg,
2187 						 MCP251XFD_REG_INT,
2188 						 MCP251XFD_REG_INT_IF_MASK,
2189 						 ~intf_pending_clearable);
2190 			if (err)
2191 				goto out_fail;
2192 		}
2193 
2194 		if (intf_pending & MCP251XFD_REG_INT_MODIF) {
2195 			err = mcp251xfd_handle(priv, modif, &set_normal_mode);
2196 			if (err)
2197 				goto out_fail;
2198 		}
2199 
2200 		if (intf_pending & MCP251XFD_REG_INT_RXIF) {
2201 			err = mcp251xfd_handle(priv, rxif);
2202 			if (err)
2203 				goto out_fail;
2204 		}
2205 
2206 		if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
2207 			err = mcp251xfd_handle(priv, tefif);
2208 			if (err)
2209 				goto out_fail;
2210 		}
2211 
2212 		if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
2213 			err = mcp251xfd_handle(priv, rxovif);
2214 			if (err)
2215 				goto out_fail;
2216 		}
2217 
2218 		if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
2219 			err = mcp251xfd_handle(priv, txatif);
2220 			if (err)
2221 				goto out_fail;
2222 		}
2223 
2224 		if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
2225 			err = mcp251xfd_handle(priv, ivmif);
2226 			if (err)
2227 				goto out_fail;
2228 		}
2229 
2230 		if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
2231 			err = mcp251xfd_handle(priv, serrif);
2232 			if (err)
2233 				goto out_fail;
2234 		}
2235 
2236 		if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
2237 			err = mcp251xfd_handle(priv, eccif, set_normal_mode);
2238 			if (err)
2239 				goto out_fail;
2240 		}
2241 
2242 		if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
2243 			err = mcp251xfd_handle(priv, spicrcif);
2244 			if (err)
2245 				goto out_fail;
2246 		}
2247 
2248 		/* On the MCP2527FD and MCP2518FD, we don't get a
2249 		 * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
2250 		 * ERROR_ACTIVE.
2251 		 */
2252 		if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
2253 		    priv->can.state > CAN_STATE_ERROR_ACTIVE) {
2254 			err = mcp251xfd_handle(priv, cerrif);
2255 			if (err)
2256 				goto out_fail;
2257 
2258 			/* In Bus Off we completely shut down the
2259 			 * controller. Every subsequent register read
2260 			 * will read bogus data, and if
2261 			 * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
2262 			 * check will fail, too. So leave IRQ handler
2263 			 * directly.
2264 			 */
2265 			if (priv->can.state == CAN_STATE_BUS_OFF)
2266 				return IRQ_HANDLED;
2267 		}
2268 
2269 		handled = IRQ_HANDLED;
2270 	} while (1);
2271 
2272  out_fail:
2273 	netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
2274 		   err, priv->regs_status.intf);
2275 	mcp251xfd_chip_interrupts_disable(priv);
2276 
2277 	return handled;
2278 }
2279 
2280 static inline struct
2281 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
2282 {
2283 	u8 tx_head;
2284 
2285 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2286 
2287 	return &tx_ring->obj[tx_head];
2288 }
2289 
2290 static void
2291 mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
2292 			  struct mcp251xfd_tx_obj *tx_obj,
2293 			  const struct sk_buff *skb,
2294 			  unsigned int seq)
2295 {
2296 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
2297 	struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
2298 	union mcp251xfd_tx_obj_load_buf *load_buf;
2299 	u8 dlc;
2300 	u32 id, flags;
2301 	int offset, len;
2302 
2303 	if (cfd->can_id & CAN_EFF_FLAG) {
2304 		u32 sid, eid;
2305 
2306 		sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
2307 		eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
2308 
2309 		id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
2310 			FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
2311 
2312 		flags = MCP251XFD_OBJ_FLAGS_IDE;
2313 	} else {
2314 		id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
2315 		flags = 0;
2316 	}
2317 
2318 	/* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
2319 	 * harm, only the lower 7 bits will be transferred into the
2320 	 * TEF object.
2321 	 */
2322 	dlc = can_fd_len2dlc(cfd->len);
2323 	flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
2324 		FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
2325 
2326 	if (cfd->can_id & CAN_RTR_FLAG)
2327 		flags |= MCP251XFD_OBJ_FLAGS_RTR;
2328 
2329 	/* CANFD */
2330 	if (can_is_canfd_skb(skb)) {
2331 		if (cfd->flags & CANFD_ESI)
2332 			flags |= MCP251XFD_OBJ_FLAGS_ESI;
2333 
2334 		flags |= MCP251XFD_OBJ_FLAGS_FDF;
2335 
2336 		if (cfd->flags & CANFD_BRS)
2337 			flags |= MCP251XFD_OBJ_FLAGS_BRS;
2338 	}
2339 
2340 	load_buf = &tx_obj->buf;
2341 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
2342 		hw_tx_obj = &load_buf->crc.hw_tx_obj;
2343 	else
2344 		hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
2345 
2346 	put_unaligned_le32(id, &hw_tx_obj->id);
2347 	put_unaligned_le32(flags, &hw_tx_obj->flags);
2348 
2349 	/* Clear data at end of CAN frame */
2350 	offset = round_down(cfd->len, sizeof(u32));
2351 	len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
2352 	if (MCP251XFD_SANITIZE_CAN && len)
2353 		memset(hw_tx_obj->data + offset, 0x0, len);
2354 	memcpy(hw_tx_obj->data, cfd->data, cfd->len);
2355 
2356 	/* Number of bytes to be written into the RAM of the controller */
2357 	len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
2358 	if (MCP251XFD_SANITIZE_CAN)
2359 		len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
2360 	else
2361 		len += round_up(cfd->len, sizeof(u32));
2362 
2363 	if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
2364 		u16 crc;
2365 
2366 		mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
2367 						     len);
2368 		/* CRC */
2369 		len += sizeof(load_buf->crc.cmd);
2370 		crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
2371 		put_unaligned_be16(crc, (void *)load_buf + len);
2372 
2373 		/* Total length */
2374 		len += sizeof(load_buf->crc.crc);
2375 	} else {
2376 		len += sizeof(load_buf->nocrc.cmd);
2377 	}
2378 
2379 	tx_obj->xfer[0].len = len;
2380 }
2381 
2382 static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
2383 				  struct mcp251xfd_tx_obj *tx_obj)
2384 {
2385 	return spi_async(priv->spi, &tx_obj->msg);
2386 }
2387 
2388 static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
2389 			      struct mcp251xfd_tx_ring *tx_ring)
2390 {
2391 	if (mcp251xfd_get_tx_free(tx_ring) > 0)
2392 		return false;
2393 
2394 	netif_stop_queue(priv->ndev);
2395 
2396 	/* Memory barrier before checking tx_free (head and tail) */
2397 	smp_mb();
2398 
2399 	if (mcp251xfd_get_tx_free(tx_ring) == 0) {
2400 		netdev_dbg(priv->ndev,
2401 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
2402 			   tx_ring->head, tx_ring->tail,
2403 			   tx_ring->head - tx_ring->tail);
2404 
2405 		return true;
2406 	}
2407 
2408 	netif_start_queue(priv->ndev);
2409 
2410 	return false;
2411 }
2412 
2413 static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
2414 					struct net_device *ndev)
2415 {
2416 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2417 	struct mcp251xfd_tx_ring *tx_ring = priv->tx;
2418 	struct mcp251xfd_tx_obj *tx_obj;
2419 	u8 tx_head;
2420 	int err;
2421 
2422 	if (can_dropped_invalid_skb(ndev, skb))
2423 		return NETDEV_TX_OK;
2424 
2425 	if (mcp251xfd_tx_busy(priv, tx_ring))
2426 		return NETDEV_TX_BUSY;
2427 
2428 	tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
2429 	mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
2430 
2431 	/* Stop queue if we occupy the complete TX FIFO */
2432 	tx_head = mcp251xfd_get_tx_head(tx_ring);
2433 	tx_ring->head++;
2434 	if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
2435 		netif_stop_queue(ndev);
2436 
2437 	can_put_echo_skb(skb, ndev, tx_head);
2438 
2439 	err = mcp251xfd_tx_obj_write(priv, tx_obj);
2440 	if (err)
2441 		goto out_err;
2442 
2443 	return NETDEV_TX_OK;
2444 
2445  out_err:
2446 	netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
2447 
2448 	return NETDEV_TX_OK;
2449 }
2450 
2451 static int mcp251xfd_open(struct net_device *ndev)
2452 {
2453 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2454 	const struct spi_device *spi = priv->spi;
2455 	int err;
2456 
2457 	err = pm_runtime_get_sync(ndev->dev.parent);
2458 	if (err < 0) {
2459 		pm_runtime_put_noidle(ndev->dev.parent);
2460 		return err;
2461 	}
2462 
2463 	err = open_candev(ndev);
2464 	if (err)
2465 		goto out_pm_runtime_put;
2466 
2467 	err = mcp251xfd_ring_alloc(priv);
2468 	if (err)
2469 		goto out_close_candev;
2470 
2471 	err = mcp251xfd_transceiver_enable(priv);
2472 	if (err)
2473 		goto out_mcp251xfd_ring_free;
2474 
2475 	err = mcp251xfd_chip_start(priv);
2476 	if (err)
2477 		goto out_transceiver_disable;
2478 
2479 	can_rx_offload_enable(&priv->offload);
2480 
2481 	err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
2482 				   IRQF_ONESHOT, dev_name(&spi->dev),
2483 				   priv);
2484 	if (err)
2485 		goto out_can_rx_offload_disable;
2486 
2487 	err = mcp251xfd_chip_interrupts_enable(priv);
2488 	if (err)
2489 		goto out_free_irq;
2490 
2491 	netif_start_queue(ndev);
2492 
2493 	return 0;
2494 
2495  out_free_irq:
2496 	free_irq(spi->irq, priv);
2497  out_can_rx_offload_disable:
2498 	can_rx_offload_disable(&priv->offload);
2499  out_transceiver_disable:
2500 	mcp251xfd_transceiver_disable(priv);
2501  out_mcp251xfd_ring_free:
2502 	mcp251xfd_ring_free(priv);
2503  out_close_candev:
2504 	close_candev(ndev);
2505  out_pm_runtime_put:
2506 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2507 	pm_runtime_put(ndev->dev.parent);
2508 
2509 	return err;
2510 }
2511 
2512 static int mcp251xfd_stop(struct net_device *ndev)
2513 {
2514 	struct mcp251xfd_priv *priv = netdev_priv(ndev);
2515 
2516 	netif_stop_queue(ndev);
2517 	mcp251xfd_chip_interrupts_disable(priv);
2518 	free_irq(ndev->irq, priv);
2519 	can_rx_offload_disable(&priv->offload);
2520 	mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
2521 	mcp251xfd_transceiver_disable(priv);
2522 	mcp251xfd_ring_free(priv);
2523 	close_candev(ndev);
2524 
2525 	pm_runtime_put(ndev->dev.parent);
2526 
2527 	return 0;
2528 }
2529 
2530 static const struct net_device_ops mcp251xfd_netdev_ops = {
2531 	.ndo_open = mcp251xfd_open,
2532 	.ndo_stop = mcp251xfd_stop,
2533 	.ndo_start_xmit	= mcp251xfd_start_xmit,
2534 	.ndo_change_mtu = can_change_mtu,
2535 };
2536 
2537 static void
2538 mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
2539 {
2540 	const struct spi_device *spi = priv->spi;
2541 	const struct spi_controller *ctlr = spi->controller;
2542 
2543 	if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
2544 		priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
2545 }
2546 
2547 static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
2548 {
2549 	const struct net_device *ndev = priv->ndev;
2550 	const struct mcp251xfd_devtype_data *devtype_data;
2551 	u32 osc;
2552 	int err;
2553 
2554 	/* The OSC_LPMEN is only supported on MCP2518FD, so use it to
2555 	 * autodetect the model.
2556 	 */
2557 	err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
2558 				 MCP251XFD_REG_OSC_LPMEN,
2559 				 MCP251XFD_REG_OSC_LPMEN);
2560 	if (err)
2561 		return err;
2562 
2563 	err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
2564 	if (err)
2565 		return err;
2566 
2567 	if (osc & MCP251XFD_REG_OSC_LPMEN)
2568 		devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
2569 	else
2570 		devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
2571 
2572 	if (!mcp251xfd_is_251X(priv) &&
2573 	    priv->devtype_data.model != devtype_data->model) {
2574 		netdev_info(ndev,
2575 			    "Detected %s, but firmware specifies a %s. Fixing up.",
2576 			    __mcp251xfd_get_model_str(devtype_data->model),
2577 			    mcp251xfd_get_model_str(priv));
2578 	}
2579 	priv->devtype_data = *devtype_data;
2580 
2581 	/* We need to preserve the Half Duplex Quirk. */
2582 	mcp251xfd_register_quirks(priv);
2583 
2584 	/* Re-init regmap with quirks of detected model. */
2585 	return mcp251xfd_regmap_init(priv);
2586 }
2587 
2588 static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
2589 {
2590 	int err, rx_pending;
2591 
2592 	if (!priv->rx_int)
2593 		return 0;
2594 
2595 	err = mcp251xfd_chip_rx_int_enable(priv);
2596 	if (err)
2597 		return err;
2598 
2599 	/* Check if RX_INT is properly working. The RX_INT should not
2600 	 * be active after a softreset.
2601 	 */
2602 	rx_pending = gpiod_get_value_cansleep(priv->rx_int);
2603 
2604 	err = mcp251xfd_chip_rx_int_disable(priv);
2605 	if (err)
2606 		return err;
2607 
2608 	if (!rx_pending)
2609 		return 0;
2610 
2611 	netdev_info(priv->ndev,
2612 		    "RX_INT active after softreset, disabling RX_INT support.");
2613 	devm_gpiod_put(&priv->spi->dev, priv->rx_int);
2614 	priv->rx_int = NULL;
2615 
2616 	return 0;
2617 }
2618 
2619 static int
2620 mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
2621 			      u32 *dev_id, u32 *effective_speed_hz)
2622 {
2623 	struct mcp251xfd_map_buf_nocrc *buf_rx;
2624 	struct mcp251xfd_map_buf_nocrc *buf_tx;
2625 	struct spi_transfer xfer[2] = { };
2626 	int err;
2627 
2628 	buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
2629 	if (!buf_rx)
2630 		return -ENOMEM;
2631 
2632 	buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
2633 	if (!buf_tx) {
2634 		err = -ENOMEM;
2635 		goto out_kfree_buf_rx;
2636 	}
2637 
2638 	xfer[0].tx_buf = buf_tx;
2639 	xfer[0].len = sizeof(buf_tx->cmd);
2640 	xfer[1].rx_buf = buf_rx->data;
2641 	xfer[1].len = sizeof(dev_id);
2642 
2643 	mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
2644 	err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
2645 	if (err)
2646 		goto out_kfree_buf_tx;
2647 
2648 	*dev_id = be32_to_cpup((__be32 *)buf_rx->data);
2649 	*effective_speed_hz = xfer->effective_speed_hz;
2650 
2651  out_kfree_buf_tx:
2652 	kfree(buf_tx);
2653  out_kfree_buf_rx:
2654 	kfree(buf_rx);
2655 
2656 	return 0;
2657 }
2658 
2659 #define MCP251XFD_QUIRK_ACTIVE(quirk) \
2660 	(priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
2661 
2662 static int
2663 mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
2664 {
2665 	u32 dev_id, effective_speed_hz;
2666 	int err;
2667 
2668 	err = mcp251xfd_register_get_dev_id(priv, &dev_id,
2669 					    &effective_speed_hz);
2670 	if (err)
2671 		return err;
2672 
2673 	netdev_info(priv->ndev,
2674 		    "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
2675 		    mcp251xfd_get_model_str(priv),
2676 		    FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
2677 		    FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
2678 		    priv->rx_int ? '+' : '-',
2679 		    MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
2680 		    MCP251XFD_QUIRK_ACTIVE(CRC_REG),
2681 		    MCP251XFD_QUIRK_ACTIVE(CRC_RX),
2682 		    MCP251XFD_QUIRK_ACTIVE(CRC_TX),
2683 		    MCP251XFD_QUIRK_ACTIVE(ECC),
2684 		    MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
2685 		    priv->can.clock.freq / 1000000,
2686 		    priv->can.clock.freq % 1000000 / 1000 / 10,
2687 		    priv->spi_max_speed_hz_orig / 1000000,
2688 		    priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
2689 		    priv->spi->max_speed_hz / 1000000,
2690 		    priv->spi->max_speed_hz % 1000000 / 1000 / 10,
2691 		    effective_speed_hz / 1000000,
2692 		    effective_speed_hz % 1000000 / 1000 / 10);
2693 
2694 	return 0;
2695 }
2696 
2697 static int mcp251xfd_register(struct mcp251xfd_priv *priv)
2698 {
2699 	struct net_device *ndev = priv->ndev;
2700 	int err;
2701 
2702 	err = mcp251xfd_clks_and_vdd_enable(priv);
2703 	if (err)
2704 		return err;
2705 
2706 	pm_runtime_get_noresume(ndev->dev.parent);
2707 	err = pm_runtime_set_active(ndev->dev.parent);
2708 	if (err)
2709 		goto out_runtime_put_noidle;
2710 	pm_runtime_enable(ndev->dev.parent);
2711 
2712 	mcp251xfd_register_quirks(priv);
2713 
2714 	err = mcp251xfd_chip_softreset(priv);
2715 	if (err == -ENODEV)
2716 		goto out_runtime_disable;
2717 	if (err)
2718 		goto out_chip_set_mode_sleep;
2719 
2720 	err = mcp251xfd_register_chip_detect(priv);
2721 	if (err)
2722 		goto out_chip_set_mode_sleep;
2723 
2724 	err = mcp251xfd_register_check_rx_int(priv);
2725 	if (err)
2726 		goto out_chip_set_mode_sleep;
2727 
2728 	err = register_candev(ndev);
2729 	if (err)
2730 		goto out_chip_set_mode_sleep;
2731 
2732 	err = mcp251xfd_register_done(priv);
2733 	if (err)
2734 		goto out_unregister_candev;
2735 
2736 	/* Put controller into sleep mode and let pm_runtime_put()
2737 	 * disable the clocks and vdd. If CONFIG_PM is not enabled,
2738 	 * the clocks and vdd will stay powered.
2739 	 */
2740 	err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2741 	if (err)
2742 		goto out_unregister_candev;
2743 
2744 	pm_runtime_put(ndev->dev.parent);
2745 
2746 	return 0;
2747 
2748  out_unregister_candev:
2749 	unregister_candev(ndev);
2750  out_chip_set_mode_sleep:
2751 	mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
2752  out_runtime_disable:
2753 	pm_runtime_disable(ndev->dev.parent);
2754  out_runtime_put_noidle:
2755 	pm_runtime_put_noidle(ndev->dev.parent);
2756 	mcp251xfd_clks_and_vdd_disable(priv);
2757 
2758 	return err;
2759 }
2760 
2761 static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
2762 {
2763 	struct net_device *ndev	= priv->ndev;
2764 
2765 	unregister_candev(ndev);
2766 
2767 	pm_runtime_get_sync(ndev->dev.parent);
2768 	pm_runtime_put_noidle(ndev->dev.parent);
2769 	mcp251xfd_clks_and_vdd_disable(priv);
2770 	pm_runtime_disable(ndev->dev.parent);
2771 }
2772 
2773 static const struct of_device_id mcp251xfd_of_match[] = {
2774 	{
2775 		.compatible = "microchip,mcp2517fd",
2776 		.data = &mcp251xfd_devtype_data_mcp2517fd,
2777 	}, {
2778 		.compatible = "microchip,mcp2518fd",
2779 		.data = &mcp251xfd_devtype_data_mcp2518fd,
2780 	}, {
2781 		.compatible = "microchip,mcp251xfd",
2782 		.data = &mcp251xfd_devtype_data_mcp251xfd,
2783 	}, {
2784 		/* sentinel */
2785 	},
2786 };
2787 MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
2788 
2789 static const struct spi_device_id mcp251xfd_id_table[] = {
2790 	{
2791 		.name = "mcp2517fd",
2792 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
2793 	}, {
2794 		.name = "mcp2518fd",
2795 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
2796 	}, {
2797 		.name = "mcp251xfd",
2798 		.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
2799 	}, {
2800 		/* sentinel */
2801 	},
2802 };
2803 MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
2804 
2805 static int mcp251xfd_probe(struct spi_device *spi)
2806 {
2807 	const void *match;
2808 	struct net_device *ndev;
2809 	struct mcp251xfd_priv *priv;
2810 	struct gpio_desc *rx_int;
2811 	struct regulator *reg_vdd, *reg_xceiver;
2812 	struct clk *clk;
2813 	u32 freq;
2814 	int err;
2815 
2816 	if (!spi->irq)
2817 		return dev_err_probe(&spi->dev, -ENXIO,
2818 				     "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
2819 
2820 	rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
2821 					 GPIOD_IN);
2822 	if (PTR_ERR(rx_int) == -EPROBE_DEFER)
2823 		return -EPROBE_DEFER;
2824 	else if (IS_ERR(rx_int))
2825 		return PTR_ERR(rx_int);
2826 
2827 	reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
2828 	if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
2829 		return -EPROBE_DEFER;
2830 	else if (PTR_ERR(reg_vdd) == -ENODEV)
2831 		reg_vdd = NULL;
2832 	else if (IS_ERR(reg_vdd))
2833 		return PTR_ERR(reg_vdd);
2834 
2835 	reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
2836 	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
2837 		return -EPROBE_DEFER;
2838 	else if (PTR_ERR(reg_xceiver) == -ENODEV)
2839 		reg_xceiver = NULL;
2840 	else if (IS_ERR(reg_xceiver))
2841 		return PTR_ERR(reg_xceiver);
2842 
2843 	clk = devm_clk_get(&spi->dev, NULL);
2844 	if (IS_ERR(clk)) {
2845 		dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
2846 		return PTR_ERR(clk);
2847 	}
2848 	freq = clk_get_rate(clk);
2849 
2850 	/* Sanity check */
2851 	if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
2852 	    freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
2853 		dev_err(&spi->dev,
2854 			"Oscillator frequency (%u Hz) is too low or high.\n",
2855 			freq);
2856 		return -ERANGE;
2857 	}
2858 
2859 	if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
2860 		dev_err(&spi->dev,
2861 			"Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
2862 			freq);
2863 		return -ERANGE;
2864 	}
2865 
2866 	ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
2867 			    MCP251XFD_TX_OBJ_NUM_MAX);
2868 	if (!ndev)
2869 		return -ENOMEM;
2870 
2871 	SET_NETDEV_DEV(ndev, &spi->dev);
2872 
2873 	ndev->netdev_ops = &mcp251xfd_netdev_ops;
2874 	ndev->irq = spi->irq;
2875 	ndev->flags |= IFF_ECHO;
2876 
2877 	priv = netdev_priv(ndev);
2878 	spi_set_drvdata(spi, priv);
2879 	priv->can.clock.freq = freq;
2880 	priv->can.do_set_mode = mcp251xfd_set_mode;
2881 	priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
2882 	priv->can.bittiming_const = &mcp251xfd_bittiming_const;
2883 	priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
2884 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
2885 		CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
2886 		CAN_CTRLMODE_FD_NON_ISO;
2887 	priv->ndev = ndev;
2888 	priv->spi = spi;
2889 	priv->rx_int = rx_int;
2890 	priv->clk = clk;
2891 	priv->reg_vdd = reg_vdd;
2892 	priv->reg_xceiver = reg_xceiver;
2893 
2894 	match = device_get_match_data(&spi->dev);
2895 	if (match)
2896 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
2897 	else
2898 		priv->devtype_data = *(struct mcp251xfd_devtype_data *)
2899 			spi_get_device_id(spi)->driver_data;
2900 
2901 	/* Errata Reference:
2902 	 * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
2903 	 *
2904 	 * The SPI can write corrupted data to the RAM at fast SPI
2905 	 * speeds:
2906 	 *
2907 	 * Simultaneous activity on the CAN bus while writing data to
2908 	 * RAM via the SPI interface, with high SCK frequency, can
2909 	 * lead to corrupted data being written to RAM.
2910 	 *
2911 	 * Fix/Work Around:
2912 	 * Ensure that FSCK is less than or equal to 0.85 *
2913 	 * (FSYSCLK/2).
2914 	 *
2915 	 * Known good and bad combinations are:
2916 	 *
2917 	 * MCP	ext-clk	SoC			SPI			SPI-clk		max-clk	parent-clk	Status	config
2918 	 *
2919 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 8333333 Hz	 83.33%	600000000 Hz	good	assigned-clocks = <&ccu CLK_SPIx>
2920 	 * 2518	20 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	 9375000 Hz	 93.75%	600000000 Hz	bad	assigned-clocks = <&ccu CLK_SPIx>
2921 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	16666667 Hz	 83.33%	600000000 Hz	good	assigned-clocks = <&ccu CLK_SPIx>
2922 	 * 2518	40 MHz	allwinner,sun8i-h3	allwinner,sun8i-h3-spi	18750000 Hz	 93.75%	600000000 Hz	bad	assigned-clocks = <&ccu CLK_SPIx>
2923 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 8333333 Hz	 83.33%	 16666667 Hz	good	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2924 	 * 2517	20 MHz	fsl,imx8mm		fsl,imx51-ecspi		 9523809 Hz	 95.34%	 28571429 Hz	bad	assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
2925 	 * 2517 40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	good	default
2926 	 * 2518 40 MHz	atmel,sama5d27		atmel,at91rm9200-spi	16400000 Hz	 82.00%	 82000000 Hz	good	default
2927 	 *
2928 	 */
2929 	priv->spi_max_speed_hz_orig = spi->max_speed_hz;
2930 	spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
2931 	spi->bits_per_word = 8;
2932 	spi->rt = true;
2933 	err = spi_setup(spi);
2934 	if (err)
2935 		goto out_free_candev;
2936 
2937 	err = mcp251xfd_regmap_init(priv);
2938 	if (err)
2939 		goto out_free_candev;
2940 
2941 	err = can_rx_offload_add_manual(ndev, &priv->offload,
2942 					MCP251XFD_NAPI_WEIGHT);
2943 	if (err)
2944 		goto out_free_candev;
2945 
2946 	err = mcp251xfd_register(priv);
2947 	if (err)
2948 		goto out_free_candev;
2949 
2950 	return 0;
2951 
2952  out_free_candev:
2953 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2954 
2955 	free_candev(ndev);
2956 
2957 	return err;
2958 }
2959 
2960 static int mcp251xfd_remove(struct spi_device *spi)
2961 {
2962 	struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
2963 	struct net_device *ndev = priv->ndev;
2964 
2965 	can_rx_offload_del(&priv->offload);
2966 	mcp251xfd_unregister(priv);
2967 	spi->max_speed_hz = priv->spi_max_speed_hz_orig;
2968 	free_candev(ndev);
2969 
2970 	return 0;
2971 }
2972 
2973 static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
2974 {
2975 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2976 
2977 	return mcp251xfd_clks_and_vdd_disable(priv);
2978 }
2979 
2980 static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
2981 {
2982 	const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
2983 
2984 	return mcp251xfd_clks_and_vdd_enable(priv);
2985 }
2986 
2987 static const struct dev_pm_ops mcp251xfd_pm_ops = {
2988 	SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
2989 			   mcp251xfd_runtime_resume, NULL)
2990 };
2991 
2992 static struct spi_driver mcp251xfd_driver = {
2993 	.driver = {
2994 		.name = DEVICE_NAME,
2995 		.pm = &mcp251xfd_pm_ops,
2996 		.of_match_table = mcp251xfd_of_match,
2997 	},
2998 	.probe = mcp251xfd_probe,
2999 	.remove = mcp251xfd_remove,
3000 	.id_table = mcp251xfd_id_table,
3001 };
3002 module_spi_driver(mcp251xfd_driver);
3003 
3004 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
3005 MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
3006 MODULE_LICENSE("GPL v2");
3007