xref: /linux/drivers/net/can/xilinx_can.c (revision 79b6bb73f888933cbcd20b0ef3976cde67951b72)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3  *
4  * Copyright (C) 2012 - 2014 Xilinx, Inc.
5  * Copyright (C) 2009 PetaLogix. All rights reserved.
6  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7  *
8  * Description:
9  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <linux/can/dev.h>
28 #include <linux/can/error.h>
29 #include <linux/can/led.h>
30 #include <linux/pm_runtime.h>
31 
32 #define DRIVER_NAME	"xilinx_can"
33 
34 /* CAN registers set */
35 enum xcan_reg {
36 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
37 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
38 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
39 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
40 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
41 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
42 	XCAN_SR_OFFSET		= 0x18, /* Status */
43 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
44 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
45 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
46 
47 	/* not on CAN FD cores */
48 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
49 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
50 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
51 
52 	/* only on CAN FD cores */
53 	XCAN_F_BRPR_OFFSET	= 0x088, /* Data Phase Baud Rate
54 					  * Prescalar
55 					  */
56 	XCAN_F_BTR_OFFSET	= 0x08C, /* Data Phase Bit Timing */
57 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
58 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
59 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
60 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
61 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
62 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
63 };
64 
65 #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
66 #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
67 #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
68 #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
69 #define XCANFD_FRAME_DW_OFFSET(frame_base)	((frame_base) + 0x08)
70 
71 #define XCAN_CANFD_FRAME_SIZE		0x48
72 #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
73 					 XCAN_CANFD_FRAME_SIZE * (n))
74 #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
75 					 XCAN_CANFD_FRAME_SIZE * (n))
76 #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
77 					 XCAN_CANFD_FRAME_SIZE * (n))
78 
79 /* the single TX mailbox used by this driver on CAN FD HW */
80 #define XCAN_TX_MAILBOX_IDX		0
81 
82 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
83 #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
84 #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
85 #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
86 #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
87 #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
88 #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
89 #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
90 #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
91 #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
92 #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
93 #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
94 #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
95 #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
96 #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
97 #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
98 #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
99 #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
100 #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
101 #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
102 #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
103 #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
104 #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
105 #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
106 #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
107 #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
108 #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
109 #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
110 #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
111 #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
112 #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
113 #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
114 #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
115 #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
116 #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
117 #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
118 #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
119 #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
120 #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
121 #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
122 #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
123 #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
124 #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
125 #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
126 #define XCAN_2_FSR_FL_MASK		0x00007F00 /* RX Fill Level */
127 #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
128 #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
129 #define XCAN_2_FSR_RI_MASK		0x0000003F /* RX Read Index */
130 #define XCAN_DLCR_EDL_MASK		0x08000000 /* EDL Mask in DLC */
131 #define XCAN_DLCR_BRS_MASK		0x04000000 /* BRS Mask in DLC */
132 
133 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
134 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
135 #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
136 #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
137 #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
138 #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
139 #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
140 #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
141 #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
142 
143 /* CAN frame length constants */
144 #define XCAN_FRAME_MAX_DATA_LEN		8
145 #define XCANFD_DW_BYTES			4
146 #define XCAN_TIMEOUT			(1 * HZ)
147 
148 /* TX-FIFO-empty interrupt available */
149 #define XCAN_FLAG_TXFEMP	0x0001
150 /* RX Match Not Finished interrupt available */
151 #define XCAN_FLAG_RXMNF		0x0002
152 /* Extended acceptance filters with control at 0xE0 */
153 #define XCAN_FLAG_EXT_FILTERS	0x0004
154 /* TX mailboxes instead of TX FIFO */
155 #define XCAN_FLAG_TX_MAILBOXES	0x0008
156 /* RX FIFO with each buffer in separate registers at 0x1100
157  * instead of the regular FIFO at 0x50
158  */
159 #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
160 #define XCAN_FLAG_CANFD_2	0x0020
161 
162 enum xcan_ip_type {
163 	XAXI_CAN = 0,
164 	XZYNQ_CANPS,
165 	XAXI_CANFD,
166 	XAXI_CANFD_2_0,
167 };
168 
169 struct xcan_devtype_data {
170 	enum xcan_ip_type cantype;
171 	unsigned int flags;
172 	const struct can_bittiming_const *bittiming_const;
173 	const char *bus_clk_name;
174 	unsigned int btr_ts2_shift;
175 	unsigned int btr_sjw_shift;
176 };
177 
178 /**
179  * struct xcan_priv - This definition define CAN driver instance
180  * @can:			CAN private data structure.
181  * @tx_lock:			Lock for synchronizing TX interrupt handling
182  * @tx_head:			Tx CAN packets ready to send on the queue
183  * @tx_tail:			Tx CAN packets successfully sended on the queue
184  * @tx_max:			Maximum number packets the driver can send
185  * @napi:			NAPI structure
186  * @read_reg:			For reading data from CAN registers
187  * @write_reg:			For writing data to CAN registers
188  * @dev:			Network device data structure
189  * @reg_base:			Ioremapped address to registers
190  * @irq_flags:			For request_irq()
191  * @bus_clk:			Pointer to struct clk
192  * @can_clk:			Pointer to struct clk
193  * @devtype:			Device type specific constants
194  */
195 struct xcan_priv {
196 	struct can_priv can;
197 	spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
198 	unsigned int tx_head;
199 	unsigned int tx_tail;
200 	unsigned int tx_max;
201 	struct napi_struct napi;
202 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
203 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
204 			  u32 val);
205 	struct device *dev;
206 	void __iomem *reg_base;
207 	unsigned long irq_flags;
208 	struct clk *bus_clk;
209 	struct clk *can_clk;
210 	struct xcan_devtype_data devtype;
211 };
212 
213 /* CAN Bittiming constants as per Xilinx CAN specs */
214 static const struct can_bittiming_const xcan_bittiming_const = {
215 	.name = DRIVER_NAME,
216 	.tseg1_min = 1,
217 	.tseg1_max = 16,
218 	.tseg2_min = 1,
219 	.tseg2_max = 8,
220 	.sjw_max = 4,
221 	.brp_min = 1,
222 	.brp_max = 256,
223 	.brp_inc = 1,
224 };
225 
226 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
227 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
228 	.name = DRIVER_NAME,
229 	.tseg1_min = 1,
230 	.tseg1_max = 64,
231 	.tseg2_min = 1,
232 	.tseg2_max = 16,
233 	.sjw_max = 16,
234 	.brp_min = 1,
235 	.brp_max = 256,
236 	.brp_inc = 1,
237 };
238 
239 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
240 static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
241 	.name = DRIVER_NAME,
242 	.tseg1_min = 1,
243 	.tseg1_max = 16,
244 	.tseg2_min = 1,
245 	.tseg2_max = 8,
246 	.sjw_max = 8,
247 	.brp_min = 1,
248 	.brp_max = 256,
249 	.brp_inc = 1,
250 };
251 
252 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
253 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
254 	.name = DRIVER_NAME,
255 	.tseg1_min = 1,
256 	.tseg1_max = 256,
257 	.tseg2_min = 1,
258 	.tseg2_max = 128,
259 	.sjw_max = 128,
260 	.brp_min = 1,
261 	.brp_max = 256,
262 	.brp_inc = 1,
263 };
264 
265 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
266 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
267 	.name = DRIVER_NAME,
268 	.tseg1_min = 1,
269 	.tseg1_max = 32,
270 	.tseg2_min = 1,
271 	.tseg2_max = 16,
272 	.sjw_max = 16,
273 	.brp_min = 1,
274 	.brp_max = 256,
275 	.brp_inc = 1,
276 };
277 
278 /**
279  * xcan_write_reg_le - Write a value to the device register little endian
280  * @priv:	Driver private data structure
281  * @reg:	Register offset
282  * @val:	Value to write at the Register offset
283  *
284  * Write data to the paricular CAN register
285  */
286 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
287 			      u32 val)
288 {
289 	iowrite32(val, priv->reg_base + reg);
290 }
291 
292 /**
293  * xcan_read_reg_le - Read a value from the device register little endian
294  * @priv:	Driver private data structure
295  * @reg:	Register offset
296  *
297  * Read data from the particular CAN register
298  * Return: value read from the CAN register
299  */
300 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
301 {
302 	return ioread32(priv->reg_base + reg);
303 }
304 
305 /**
306  * xcan_write_reg_be - Write a value to the device register big endian
307  * @priv:	Driver private data structure
308  * @reg:	Register offset
309  * @val:	Value to write at the Register offset
310  *
311  * Write data to the paricular CAN register
312  */
313 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
314 			      u32 val)
315 {
316 	iowrite32be(val, priv->reg_base + reg);
317 }
318 
319 /**
320  * xcan_read_reg_be - Read a value from the device register big endian
321  * @priv:	Driver private data structure
322  * @reg:	Register offset
323  *
324  * Read data from the particular CAN register
325  * Return: value read from the CAN register
326  */
327 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
328 {
329 	return ioread32be(priv->reg_base + reg);
330 }
331 
332 /**
333  * xcan_rx_int_mask - Get the mask for the receive interrupt
334  * @priv:	Driver private data structure
335  *
336  * Return: The receive interrupt mask used by the driver on this HW
337  */
338 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
339 {
340 	/* RXNEMP is better suited for our use case as it cannot be cleared
341 	 * while the FIFO is non-empty, but CAN FD HW does not have it
342 	 */
343 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
344 		return XCAN_IXR_RXOK_MASK;
345 	else
346 		return XCAN_IXR_RXNEMP_MASK;
347 }
348 
349 /**
350  * set_reset_mode - Resets the CAN device mode
351  * @ndev:	Pointer to net_device structure
352  *
353  * This is the driver reset mode routine.The driver
354  * enters into configuration mode.
355  *
356  * Return: 0 on success and failure value on error
357  */
358 static int set_reset_mode(struct net_device *ndev)
359 {
360 	struct xcan_priv *priv = netdev_priv(ndev);
361 	unsigned long timeout;
362 
363 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
364 
365 	timeout = jiffies + XCAN_TIMEOUT;
366 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
367 		if (time_after(jiffies, timeout)) {
368 			netdev_warn(ndev, "timed out for config mode\n");
369 			return -ETIMEDOUT;
370 		}
371 		usleep_range(500, 10000);
372 	}
373 
374 	/* reset clears FIFOs */
375 	priv->tx_head = 0;
376 	priv->tx_tail = 0;
377 
378 	return 0;
379 }
380 
381 /**
382  * xcan_set_bittiming - CAN set bit timing routine
383  * @ndev:	Pointer to net_device structure
384  *
385  * This is the driver set bittiming  routine.
386  * Return: 0 on success and failure value on error
387  */
388 static int xcan_set_bittiming(struct net_device *ndev)
389 {
390 	struct xcan_priv *priv = netdev_priv(ndev);
391 	struct can_bittiming *bt = &priv->can.bittiming;
392 	struct can_bittiming *dbt = &priv->can.data_bittiming;
393 	u32 btr0, btr1;
394 	u32 is_config_mode;
395 
396 	/* Check whether Xilinx CAN is in configuration mode.
397 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
398 	 */
399 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
400 				XCAN_SR_CONFIG_MASK;
401 	if (!is_config_mode) {
402 		netdev_alert(ndev,
403 			     "BUG! Cannot set bittiming - CAN is not in config mode\n");
404 		return -EPERM;
405 	}
406 
407 	/* Setting Baud Rate prescalar value in BRPR Register */
408 	btr0 = (bt->brp - 1);
409 
410 	/* Setting Time Segment 1 in BTR Register */
411 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
412 
413 	/* Setting Time Segment 2 in BTR Register */
414 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
415 
416 	/* Setting Synchronous jump width in BTR Register */
417 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
418 
419 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
420 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
421 
422 	if (priv->devtype.cantype == XAXI_CANFD ||
423 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
424 		/* Setting Baud Rate prescalar value in F_BRPR Register */
425 		btr0 = dbt->brp - 1;
426 
427 		/* Setting Time Segment 1 in BTR Register */
428 		btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
429 
430 		/* Setting Time Segment 2 in BTR Register */
431 		btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
432 
433 		/* Setting Synchronous jump width in BTR Register */
434 		btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
435 
436 		priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
437 		priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
438 	}
439 
440 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
441 		   priv->read_reg(priv, XCAN_BRPR_OFFSET),
442 		   priv->read_reg(priv, XCAN_BTR_OFFSET));
443 
444 	return 0;
445 }
446 
447 /**
448  * xcan_chip_start - This the drivers start routine
449  * @ndev:	Pointer to net_device structure
450  *
451  * This is the drivers start routine.
452  * Based on the State of the CAN device it puts
453  * the CAN device into a proper mode.
454  *
455  * Return: 0 on success and failure value on error
456  */
457 static int xcan_chip_start(struct net_device *ndev)
458 {
459 	struct xcan_priv *priv = netdev_priv(ndev);
460 	u32 reg_msr;
461 	int err;
462 	u32 ier;
463 
464 	/* Check if it is in reset mode */
465 	err = set_reset_mode(ndev);
466 	if (err < 0)
467 		return err;
468 
469 	err = xcan_set_bittiming(ndev);
470 	if (err < 0)
471 		return err;
472 
473 	/* Enable interrupts
474 	 *
475 	 * We enable the ERROR interrupt even with
476 	 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
477 	 * dedicated interrupt for a state change to
478 	 * ERROR_WARNING/ERROR_PASSIVE.
479 	 */
480 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
481 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
482 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
483 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
484 
485 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
486 		ier |= XCAN_IXR_RXMNF_MASK;
487 
488 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
489 
490 	/* Check whether it is loopback mode or normal mode  */
491 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
492 		reg_msr = XCAN_MSR_LBACK_MASK;
493 	else
494 		reg_msr = 0x0;
495 
496 	/* enable the first extended filter, if any, as cores with extended
497 	 * filtering default to non-receipt if all filters are disabled
498 	 */
499 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
500 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
501 
502 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
503 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
504 
505 	netdev_dbg(ndev, "status:#x%08x\n",
506 		   priv->read_reg(priv, XCAN_SR_OFFSET));
507 
508 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
509 	return 0;
510 }
511 
512 /**
513  * xcan_do_set_mode - This sets the mode of the driver
514  * @ndev:	Pointer to net_device structure
515  * @mode:	Tells the mode of the driver
516  *
517  * This check the drivers state and calls the
518  * the corresponding modes to set.
519  *
520  * Return: 0 on success and failure value on error
521  */
522 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
523 {
524 	int ret;
525 
526 	switch (mode) {
527 	case CAN_MODE_START:
528 		ret = xcan_chip_start(ndev);
529 		if (ret < 0) {
530 			netdev_err(ndev, "xcan_chip_start failed!\n");
531 			return ret;
532 		}
533 		netif_wake_queue(ndev);
534 		break;
535 	default:
536 		ret = -EOPNOTSUPP;
537 		break;
538 	}
539 
540 	return ret;
541 }
542 
543 /**
544  * xcan_write_frame - Write a frame to HW
545  * @priv:		Driver private data structure
546  * @skb:		sk_buff pointer that contains data to be Txed
547  * @frame_offset:	Register offset to write the frame to
548  */
549 static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
550 			     int frame_offset)
551 {
552 	u32 id, dlc, data[2] = {0, 0};
553 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
554 	u32 ramoff, dwindex = 0, i;
555 
556 	/* Watch carefully on the bit sequence */
557 	if (cf->can_id & CAN_EFF_FLAG) {
558 		/* Extended CAN ID format */
559 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
560 			XCAN_IDR_ID2_MASK;
561 		id |= (((cf->can_id & CAN_EFF_MASK) >>
562 			(CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
563 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
564 
565 		/* The substibute remote TX request bit should be "1"
566 		 * for extended frames as in the Xilinx CAN datasheet
567 		 */
568 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
569 
570 		if (cf->can_id & CAN_RTR_FLAG)
571 			/* Extended frames remote TX request */
572 			id |= XCAN_IDR_RTR_MASK;
573 	} else {
574 		/* Standard CAN ID format */
575 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
576 			XCAN_IDR_ID1_MASK;
577 
578 		if (cf->can_id & CAN_RTR_FLAG)
579 			/* Standard frames remote TX request */
580 			id |= XCAN_IDR_SRR_MASK;
581 	}
582 
583 	dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
584 	if (can_is_canfd_skb(skb)) {
585 		if (cf->flags & CANFD_BRS)
586 			dlc |= XCAN_DLCR_BRS_MASK;
587 		dlc |= XCAN_DLCR_EDL_MASK;
588 	}
589 
590 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
591 	/* If the CAN frame is RTR frame this write triggers transmission
592 	 * (not on CAN FD)
593 	 */
594 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
595 	if (priv->devtype.cantype == XAXI_CANFD ||
596 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
597 		for (i = 0; i < cf->len; i += 4) {
598 			ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
599 					(dwindex * XCANFD_DW_BYTES);
600 			priv->write_reg(priv, ramoff,
601 					be32_to_cpup((__be32 *)(cf->data + i)));
602 			dwindex++;
603 		}
604 	} else {
605 		if (cf->len > 0)
606 			data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
607 		if (cf->len > 4)
608 			data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
609 
610 		if (!(cf->can_id & CAN_RTR_FLAG)) {
611 			priv->write_reg(priv,
612 					XCAN_FRAME_DW1_OFFSET(frame_offset),
613 					data[0]);
614 			/* If the CAN frame is Standard/Extended frame this
615 			 * write triggers transmission (not on CAN FD)
616 			 */
617 			priv->write_reg(priv,
618 					XCAN_FRAME_DW2_OFFSET(frame_offset),
619 					data[1]);
620 		}
621 	}
622 }
623 
624 /**
625  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
626  * @skb:	sk_buff pointer that contains data to be Txed
627  * @ndev:	Pointer to net_device structure
628  *
629  * Return: 0 on success, -ENOSPC if FIFO is full.
630  */
631 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
632 {
633 	struct xcan_priv *priv = netdev_priv(ndev);
634 	unsigned long flags;
635 
636 	/* Check if the TX buffer is full */
637 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
638 			XCAN_SR_TXFLL_MASK))
639 		return -ENOSPC;
640 
641 	can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
642 
643 	spin_lock_irqsave(&priv->tx_lock, flags);
644 
645 	priv->tx_head++;
646 
647 	xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
648 
649 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
650 	if (priv->tx_max > 1)
651 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
652 
653 	/* Check if the TX buffer is full */
654 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
655 		netif_stop_queue(ndev);
656 
657 	spin_unlock_irqrestore(&priv->tx_lock, flags);
658 
659 	return 0;
660 }
661 
662 /**
663  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
664  * @skb:	sk_buff pointer that contains data to be Txed
665  * @ndev:	Pointer to net_device structure
666  *
667  * Return: 0 on success, -ENOSPC if there is no space
668  */
669 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
670 {
671 	struct xcan_priv *priv = netdev_priv(ndev);
672 	unsigned long flags;
673 
674 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
675 		     BIT(XCAN_TX_MAILBOX_IDX)))
676 		return -ENOSPC;
677 
678 	can_put_echo_skb(skb, ndev, 0);
679 
680 	spin_lock_irqsave(&priv->tx_lock, flags);
681 
682 	priv->tx_head++;
683 
684 	xcan_write_frame(priv, skb,
685 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
686 
687 	/* Mark buffer as ready for transmit */
688 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
689 
690 	netif_stop_queue(ndev);
691 
692 	spin_unlock_irqrestore(&priv->tx_lock, flags);
693 
694 	return 0;
695 }
696 
697 /**
698  * xcan_start_xmit - Starts the transmission
699  * @skb:	sk_buff pointer that contains data to be Txed
700  * @ndev:	Pointer to net_device structure
701  *
702  * This function is invoked from upper layers to initiate transmission.
703  *
704  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
705  */
706 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
707 {
708 	struct xcan_priv *priv = netdev_priv(ndev);
709 	int ret;
710 
711 	if (can_dropped_invalid_skb(ndev, skb))
712 		return NETDEV_TX_OK;
713 
714 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
715 		ret = xcan_start_xmit_mailbox(skb, ndev);
716 	else
717 		ret = xcan_start_xmit_fifo(skb, ndev);
718 
719 	if (ret < 0) {
720 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
721 		netif_stop_queue(ndev);
722 		return NETDEV_TX_BUSY;
723 	}
724 
725 	return NETDEV_TX_OK;
726 }
727 
728 /**
729  * xcan_rx -  Is called from CAN isr to complete the received
730  *		frame  processing
731  * @ndev:	Pointer to net_device structure
732  * @frame_base:	Register offset to the frame to be read
733  *
734  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
735  * does minimal processing and invokes "netif_receive_skb" to complete further
736  * processing.
737  * Return: 1 on success and 0 on failure.
738  */
739 static int xcan_rx(struct net_device *ndev, int frame_base)
740 {
741 	struct xcan_priv *priv = netdev_priv(ndev);
742 	struct net_device_stats *stats = &ndev->stats;
743 	struct can_frame *cf;
744 	struct sk_buff *skb;
745 	u32 id_xcan, dlc, data[2] = {0, 0};
746 
747 	skb = alloc_can_skb(ndev, &cf);
748 	if (unlikely(!skb)) {
749 		stats->rx_dropped++;
750 		return 0;
751 	}
752 
753 	/* Read a frame from Xilinx zynq CANPS */
754 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
755 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
756 				   XCAN_DLCR_DLC_SHIFT;
757 
758 	/* Change Xilinx CAN data length format to socketCAN data format */
759 	cf->can_dlc = get_can_dlc(dlc);
760 
761 	/* Change Xilinx CAN ID format to socketCAN ID format */
762 	if (id_xcan & XCAN_IDR_IDE_MASK) {
763 		/* The received frame is an Extended format frame */
764 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
765 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
766 				XCAN_IDR_ID2_SHIFT;
767 		cf->can_id |= CAN_EFF_FLAG;
768 		if (id_xcan & XCAN_IDR_RTR_MASK)
769 			cf->can_id |= CAN_RTR_FLAG;
770 	} else {
771 		/* The received frame is a standard format frame */
772 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
773 				XCAN_IDR_ID1_SHIFT;
774 		if (id_xcan & XCAN_IDR_SRR_MASK)
775 			cf->can_id |= CAN_RTR_FLAG;
776 	}
777 
778 	/* DW1/DW2 must always be read to remove message from RXFIFO */
779 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
780 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
781 
782 	if (!(cf->can_id & CAN_RTR_FLAG)) {
783 		/* Change Xilinx CAN data format to socketCAN data format */
784 		if (cf->can_dlc > 0)
785 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
786 		if (cf->can_dlc > 4)
787 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
788 	}
789 
790 	stats->rx_bytes += cf->can_dlc;
791 	stats->rx_packets++;
792 	netif_receive_skb(skb);
793 
794 	return 1;
795 }
796 
797 /**
798  * xcanfd_rx -  Is called from CAN isr to complete the received
799  *		frame  processing
800  * @ndev:	Pointer to net_device structure
801  * @frame_base:	Register offset to the frame to be read
802  *
803  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
804  * does minimal processing and invokes "netif_receive_skb" to complete further
805  * processing.
806  * Return: 1 on success and 0 on failure.
807  */
808 static int xcanfd_rx(struct net_device *ndev, int frame_base)
809 {
810 	struct xcan_priv *priv = netdev_priv(ndev);
811 	struct net_device_stats *stats = &ndev->stats;
812 	struct canfd_frame *cf;
813 	struct sk_buff *skb;
814 	u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
815 
816 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
817 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
818 	if (dlc & XCAN_DLCR_EDL_MASK)
819 		skb = alloc_canfd_skb(ndev, &cf);
820 	else
821 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
822 
823 	if (unlikely(!skb)) {
824 		stats->rx_dropped++;
825 		return 0;
826 	}
827 
828 	/* Change Xilinx CANFD data length format to socketCAN data
829 	 * format
830 	 */
831 	if (dlc & XCAN_DLCR_EDL_MASK)
832 		cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
833 				  XCAN_DLCR_DLC_SHIFT);
834 	else
835 		cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
836 					  XCAN_DLCR_DLC_SHIFT);
837 
838 	/* Change Xilinx CAN ID format to socketCAN ID format */
839 	if (id_xcan & XCAN_IDR_IDE_MASK) {
840 		/* The received frame is an Extended format frame */
841 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
842 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
843 				XCAN_IDR_ID2_SHIFT;
844 		cf->can_id |= CAN_EFF_FLAG;
845 		if (id_xcan & XCAN_IDR_RTR_MASK)
846 			cf->can_id |= CAN_RTR_FLAG;
847 	} else {
848 		/* The received frame is a standard format frame */
849 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
850 				XCAN_IDR_ID1_SHIFT;
851 		if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
852 					XCAN_IDR_SRR_MASK))
853 			cf->can_id |= CAN_RTR_FLAG;
854 	}
855 
856 	/* Check the frame received is FD or not*/
857 	if (dlc & XCAN_DLCR_EDL_MASK) {
858 		for (i = 0; i < cf->len; i += 4) {
859 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
860 					(dwindex * XCANFD_DW_BYTES);
861 			data[0] = priv->read_reg(priv, dw_offset);
862 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
863 			dwindex++;
864 		}
865 	} else {
866 		for (i = 0; i < cf->len; i += 4) {
867 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
868 			data[0] = priv->read_reg(priv, dw_offset + i);
869 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
870 		}
871 	}
872 	stats->rx_bytes += cf->len;
873 	stats->rx_packets++;
874 	netif_receive_skb(skb);
875 
876 	return 1;
877 }
878 
879 /**
880  * xcan_current_error_state - Get current error state from HW
881  * @ndev:	Pointer to net_device structure
882  *
883  * Checks the current CAN error state from the HW. Note that this
884  * only checks for ERROR_PASSIVE and ERROR_WARNING.
885  *
886  * Return:
887  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
888  * otherwise.
889  */
890 static enum can_state xcan_current_error_state(struct net_device *ndev)
891 {
892 	struct xcan_priv *priv = netdev_priv(ndev);
893 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
894 
895 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
896 		return CAN_STATE_ERROR_PASSIVE;
897 	else if (status & XCAN_SR_ERRWRN_MASK)
898 		return CAN_STATE_ERROR_WARNING;
899 	else
900 		return CAN_STATE_ERROR_ACTIVE;
901 }
902 
903 /**
904  * xcan_set_error_state - Set new CAN error state
905  * @ndev:	Pointer to net_device structure
906  * @new_state:	The new CAN state to be set
907  * @cf:		Error frame to be populated or NULL
908  *
909  * Set new CAN error state for the device, updating statistics and
910  * populating the error frame if given.
911  */
912 static void xcan_set_error_state(struct net_device *ndev,
913 				 enum can_state new_state,
914 				 struct can_frame *cf)
915 {
916 	struct xcan_priv *priv = netdev_priv(ndev);
917 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
918 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
919 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
920 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
921 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
922 
923 	/* non-ERROR states are handled elsewhere */
924 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
925 		return;
926 
927 	can_change_state(ndev, cf, tx_state, rx_state);
928 
929 	if (cf) {
930 		cf->data[6] = txerr;
931 		cf->data[7] = rxerr;
932 	}
933 }
934 
935 /**
936  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
937  * @ndev:	Pointer to net_device structure
938  *
939  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
940  * the performed RX/TX has caused it to drop to a lesser state and set
941  * the interface state accordingly.
942  */
943 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
944 {
945 	struct xcan_priv *priv = netdev_priv(ndev);
946 	enum can_state old_state = priv->can.state;
947 	enum can_state new_state;
948 
949 	/* changing error state due to successful frame RX/TX can only
950 	 * occur from these states
951 	 */
952 	if (old_state != CAN_STATE_ERROR_WARNING &&
953 	    old_state != CAN_STATE_ERROR_PASSIVE)
954 		return;
955 
956 	new_state = xcan_current_error_state(ndev);
957 
958 	if (new_state != old_state) {
959 		struct sk_buff *skb;
960 		struct can_frame *cf;
961 
962 		skb = alloc_can_err_skb(ndev, &cf);
963 
964 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
965 
966 		if (skb) {
967 			struct net_device_stats *stats = &ndev->stats;
968 
969 			stats->rx_packets++;
970 			stats->rx_bytes += cf->can_dlc;
971 			netif_rx(skb);
972 		}
973 	}
974 }
975 
976 /**
977  * xcan_err_interrupt - error frame Isr
978  * @ndev:	net_device pointer
979  * @isr:	interrupt status register value
980  *
981  * This is the CAN error interrupt and it will
982  * check the the type of error and forward the error
983  * frame to upper layers.
984  */
985 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
986 {
987 	struct xcan_priv *priv = netdev_priv(ndev);
988 	struct net_device_stats *stats = &ndev->stats;
989 	struct can_frame cf = { };
990 	u32 err_status;
991 
992 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
993 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
994 
995 	if (isr & XCAN_IXR_BSOFF_MASK) {
996 		priv->can.state = CAN_STATE_BUS_OFF;
997 		priv->can.can_stats.bus_off++;
998 		/* Leave device in Config Mode in bus-off state */
999 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1000 		can_bus_off(ndev);
1001 		cf.can_id |= CAN_ERR_BUSOFF;
1002 	} else {
1003 		enum can_state new_state = xcan_current_error_state(ndev);
1004 
1005 		if (new_state != priv->can.state)
1006 			xcan_set_error_state(ndev, new_state, &cf);
1007 	}
1008 
1009 	/* Check for Arbitration lost interrupt */
1010 	if (isr & XCAN_IXR_ARBLST_MASK) {
1011 		priv->can.can_stats.arbitration_lost++;
1012 		cf.can_id |= CAN_ERR_LOSTARB;
1013 		cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1014 	}
1015 
1016 	/* Check for RX FIFO Overflow interrupt */
1017 	if (isr & XCAN_IXR_RXOFLW_MASK) {
1018 		stats->rx_over_errors++;
1019 		stats->rx_errors++;
1020 		cf.can_id |= CAN_ERR_CRTL;
1021 		cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1022 	}
1023 
1024 	/* Check for RX Match Not Finished interrupt */
1025 	if (isr & XCAN_IXR_RXMNF_MASK) {
1026 		stats->rx_dropped++;
1027 		stats->rx_errors++;
1028 		netdev_err(ndev, "RX match not finished, frame discarded\n");
1029 		cf.can_id |= CAN_ERR_CRTL;
1030 		cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1031 	}
1032 
1033 	/* Check for error interrupt */
1034 	if (isr & XCAN_IXR_ERROR_MASK) {
1035 		bool berr_reporting = false;
1036 
1037 		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1038 			berr_reporting = true;
1039 			cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1040 		}
1041 
1042 		/* Check for Ack error interrupt */
1043 		if (err_status & XCAN_ESR_ACKER_MASK) {
1044 			stats->tx_errors++;
1045 			if (berr_reporting) {
1046 				cf.can_id |= CAN_ERR_ACK;
1047 				cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1048 			}
1049 		}
1050 
1051 		/* Check for Bit error interrupt */
1052 		if (err_status & XCAN_ESR_BERR_MASK) {
1053 			stats->tx_errors++;
1054 			if (berr_reporting) {
1055 				cf.can_id |= CAN_ERR_PROT;
1056 				cf.data[2] = CAN_ERR_PROT_BIT;
1057 			}
1058 		}
1059 
1060 		/* Check for Stuff error interrupt */
1061 		if (err_status & XCAN_ESR_STER_MASK) {
1062 			stats->rx_errors++;
1063 			if (berr_reporting) {
1064 				cf.can_id |= CAN_ERR_PROT;
1065 				cf.data[2] = CAN_ERR_PROT_STUFF;
1066 			}
1067 		}
1068 
1069 		/* Check for Form error interrupt */
1070 		if (err_status & XCAN_ESR_FMER_MASK) {
1071 			stats->rx_errors++;
1072 			if (berr_reporting) {
1073 				cf.can_id |= CAN_ERR_PROT;
1074 				cf.data[2] = CAN_ERR_PROT_FORM;
1075 			}
1076 		}
1077 
1078 		/* Check for CRC error interrupt */
1079 		if (err_status & XCAN_ESR_CRCER_MASK) {
1080 			stats->rx_errors++;
1081 			if (berr_reporting) {
1082 				cf.can_id |= CAN_ERR_PROT;
1083 				cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1084 			}
1085 		}
1086 		priv->can.can_stats.bus_error++;
1087 	}
1088 
1089 	if (cf.can_id) {
1090 		struct can_frame *skb_cf;
1091 		struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1092 
1093 		if (skb) {
1094 			skb_cf->can_id |= cf.can_id;
1095 			memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1096 			stats->rx_packets++;
1097 			stats->rx_bytes += CAN_ERR_DLC;
1098 			netif_rx(skb);
1099 		}
1100 	}
1101 
1102 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
1103 		   __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1104 }
1105 
1106 /**
1107  * xcan_state_interrupt - It will check the state of the CAN device
1108  * @ndev:	net_device pointer
1109  * @isr:	interrupt status register value
1110  *
1111  * This will checks the state of the CAN device
1112  * and puts the device into appropriate state.
1113  */
1114 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1115 {
1116 	struct xcan_priv *priv = netdev_priv(ndev);
1117 
1118 	/* Check for Sleep interrupt if set put CAN device in sleep state */
1119 	if (isr & XCAN_IXR_SLP_MASK)
1120 		priv->can.state = CAN_STATE_SLEEPING;
1121 
1122 	/* Check for Wake up interrupt if set put CAN device in Active state */
1123 	if (isr & XCAN_IXR_WKUP_MASK)
1124 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1125 }
1126 
1127 /**
1128  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1129  * @priv:	Driver private data structure
1130  *
1131  * Return: Register offset of the next frame in RX FIFO.
1132  */
1133 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1134 {
1135 	int offset;
1136 
1137 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1138 		u32 fsr, mask;
1139 
1140 		/* clear RXOK before the is-empty check so that any newly
1141 		 * received frame will reassert it without a race
1142 		 */
1143 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1144 
1145 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1146 
1147 		/* check if RX FIFO is empty */
1148 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1149 			mask = XCAN_2_FSR_FL_MASK;
1150 		else
1151 			mask = XCAN_FSR_FL_MASK;
1152 
1153 		if (!(fsr & mask))
1154 			return -ENOENT;
1155 
1156 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1157 			offset =
1158 			  XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1159 		else
1160 			offset =
1161 			  XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1162 
1163 	} else {
1164 		/* check if RX FIFO is empty */
1165 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1166 		      XCAN_IXR_RXNEMP_MASK))
1167 			return -ENOENT;
1168 
1169 		/* frames are read from a static offset */
1170 		offset = XCAN_RXFIFO_OFFSET;
1171 	}
1172 
1173 	return offset;
1174 }
1175 
1176 /**
1177  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1178  * @napi:	napi structure pointer
1179  * @quota:	Max number of rx packets to be processed.
1180  *
1181  * This is the poll routine for rx part.
1182  * It will process the packets maximux quota value.
1183  *
1184  * Return: number of packets received
1185  */
1186 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1187 {
1188 	struct net_device *ndev = napi->dev;
1189 	struct xcan_priv *priv = netdev_priv(ndev);
1190 	u32 ier;
1191 	int work_done = 0;
1192 	int frame_offset;
1193 
1194 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1195 	       (work_done < quota)) {
1196 		if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1197 			work_done += xcanfd_rx(ndev, frame_offset);
1198 		else
1199 			work_done += xcan_rx(ndev, frame_offset);
1200 
1201 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1202 			/* increment read index */
1203 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1204 					XCAN_FSR_IRI_MASK);
1205 		else
1206 			/* clear rx-not-empty (will actually clear only if
1207 			 * empty)
1208 			 */
1209 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1210 					XCAN_IXR_RXNEMP_MASK);
1211 	}
1212 
1213 	if (work_done) {
1214 		can_led_event(ndev, CAN_LED_EVENT_RX);
1215 		xcan_update_error_state_after_rxtx(ndev);
1216 	}
1217 
1218 	if (work_done < quota) {
1219 		napi_complete_done(napi, work_done);
1220 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1221 		ier |= xcan_rx_int_mask(priv);
1222 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1223 	}
1224 	return work_done;
1225 }
1226 
1227 /**
1228  * xcan_tx_interrupt - Tx Done Isr
1229  * @ndev:	net_device pointer
1230  * @isr:	Interrupt status register value
1231  */
1232 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1233 {
1234 	struct xcan_priv *priv = netdev_priv(ndev);
1235 	struct net_device_stats *stats = &ndev->stats;
1236 	unsigned int frames_in_fifo;
1237 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1238 	unsigned long flags;
1239 	int retries = 0;
1240 
1241 	/* Synchronize with xmit as we need to know the exact number
1242 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1243 	 * handling.
1244 	 * This also prevents a race between netif_wake_queue() and
1245 	 * netif_stop_queue().
1246 	 */
1247 	spin_lock_irqsave(&priv->tx_lock, flags);
1248 
1249 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1250 
1251 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1252 		/* clear TXOK anyway to avoid getting back here */
1253 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1254 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1255 		return;
1256 	}
1257 
1258 	/* Check if 2 frames were sent (TXOK only means that at least 1
1259 	 * frame was sent).
1260 	 */
1261 	if (frames_in_fifo > 1) {
1262 		WARN_ON(frames_in_fifo > priv->tx_max);
1263 
1264 		/* Synchronize TXOK and isr so that after the loop:
1265 		 * (1) isr variable is up-to-date at least up to TXOK clear
1266 		 *     time. This avoids us clearing a TXOK of a second frame
1267 		 *     but not noticing that the FIFO is now empty and thus
1268 		 *     marking only a single frame as sent.
1269 		 * (2) No TXOK is left. Having one could mean leaving a
1270 		 *     stray TXOK as we might process the associated frame
1271 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1272 		 *     clear to satisfy (1).
1273 		 */
1274 		while ((isr & XCAN_IXR_TXOK_MASK) &&
1275 		       !WARN_ON(++retries == 100)) {
1276 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1277 					XCAN_IXR_TXOK_MASK);
1278 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1279 		}
1280 
1281 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1282 			/* nothing in FIFO anymore */
1283 			frames_sent = frames_in_fifo;
1284 		}
1285 	} else {
1286 		/* single frame in fifo, just clear TXOK */
1287 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1288 	}
1289 
1290 	while (frames_sent--) {
1291 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1292 						    priv->tx_max);
1293 		priv->tx_tail++;
1294 		stats->tx_packets++;
1295 	}
1296 
1297 	netif_wake_queue(ndev);
1298 
1299 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1300 
1301 	can_led_event(ndev, CAN_LED_EVENT_TX);
1302 	xcan_update_error_state_after_rxtx(ndev);
1303 }
1304 
1305 /**
1306  * xcan_interrupt - CAN Isr
1307  * @irq:	irq number
1308  * @dev_id:	device id poniter
1309  *
1310  * This is the xilinx CAN Isr. It checks for the type of interrupt
1311  * and invokes the corresponding ISR.
1312  *
1313  * Return:
1314  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1315  */
1316 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1317 {
1318 	struct net_device *ndev = (struct net_device *)dev_id;
1319 	struct xcan_priv *priv = netdev_priv(ndev);
1320 	u32 isr, ier;
1321 	u32 isr_errors;
1322 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1323 
1324 	/* Get the interrupt status from Xilinx CAN */
1325 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1326 	if (!isr)
1327 		return IRQ_NONE;
1328 
1329 	/* Check for the type of interrupt and Processing it */
1330 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1331 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1332 				XCAN_IXR_WKUP_MASK));
1333 		xcan_state_interrupt(ndev, isr);
1334 	}
1335 
1336 	/* Check for Tx interrupt and Processing it */
1337 	if (isr & XCAN_IXR_TXOK_MASK)
1338 		xcan_tx_interrupt(ndev, isr);
1339 
1340 	/* Check for the type of error interrupt and Processing it */
1341 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1342 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1343 			    XCAN_IXR_RXMNF_MASK);
1344 	if (isr_errors) {
1345 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1346 		xcan_err_interrupt(ndev, isr);
1347 	}
1348 
1349 	/* Check for the type of receive interrupt and Processing it */
1350 	if (isr & rx_int_mask) {
1351 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1352 		ier &= ~rx_int_mask;
1353 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1354 		napi_schedule(&priv->napi);
1355 	}
1356 	return IRQ_HANDLED;
1357 }
1358 
1359 /**
1360  * xcan_chip_stop - Driver stop routine
1361  * @ndev:	Pointer to net_device structure
1362  *
1363  * This is the drivers stop routine. It will disable the
1364  * interrupts and put the device into configuration mode.
1365  */
1366 static void xcan_chip_stop(struct net_device *ndev)
1367 {
1368 	struct xcan_priv *priv = netdev_priv(ndev);
1369 
1370 	/* Disable interrupts and leave the can in configuration mode */
1371 	set_reset_mode(ndev);
1372 	priv->can.state = CAN_STATE_STOPPED;
1373 }
1374 
1375 /**
1376  * xcan_open - Driver open routine
1377  * @ndev:	Pointer to net_device structure
1378  *
1379  * This is the driver open routine.
1380  * Return: 0 on success and failure value on error
1381  */
1382 static int xcan_open(struct net_device *ndev)
1383 {
1384 	struct xcan_priv *priv = netdev_priv(ndev);
1385 	int ret;
1386 
1387 	ret = pm_runtime_get_sync(priv->dev);
1388 	if (ret < 0) {
1389 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1390 			   __func__, ret);
1391 		return ret;
1392 	}
1393 
1394 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1395 			  ndev->name, ndev);
1396 	if (ret < 0) {
1397 		netdev_err(ndev, "irq allocation for CAN failed\n");
1398 		goto err;
1399 	}
1400 
1401 	/* Set chip into reset mode */
1402 	ret = set_reset_mode(ndev);
1403 	if (ret < 0) {
1404 		netdev_err(ndev, "mode resetting failed!\n");
1405 		goto err_irq;
1406 	}
1407 
1408 	/* Common open */
1409 	ret = open_candev(ndev);
1410 	if (ret)
1411 		goto err_irq;
1412 
1413 	ret = xcan_chip_start(ndev);
1414 	if (ret < 0) {
1415 		netdev_err(ndev, "xcan_chip_start failed!\n");
1416 		goto err_candev;
1417 	}
1418 
1419 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
1420 	napi_enable(&priv->napi);
1421 	netif_start_queue(ndev);
1422 
1423 	return 0;
1424 
1425 err_candev:
1426 	close_candev(ndev);
1427 err_irq:
1428 	free_irq(ndev->irq, ndev);
1429 err:
1430 	pm_runtime_put(priv->dev);
1431 
1432 	return ret;
1433 }
1434 
1435 /**
1436  * xcan_close - Driver close routine
1437  * @ndev:	Pointer to net_device structure
1438  *
1439  * Return: 0 always
1440  */
1441 static int xcan_close(struct net_device *ndev)
1442 {
1443 	struct xcan_priv *priv = netdev_priv(ndev);
1444 
1445 	netif_stop_queue(ndev);
1446 	napi_disable(&priv->napi);
1447 	xcan_chip_stop(ndev);
1448 	free_irq(ndev->irq, ndev);
1449 	close_candev(ndev);
1450 
1451 	can_led_event(ndev, CAN_LED_EVENT_STOP);
1452 	pm_runtime_put(priv->dev);
1453 
1454 	return 0;
1455 }
1456 
1457 /**
1458  * xcan_get_berr_counter - error counter routine
1459  * @ndev:	Pointer to net_device structure
1460  * @bec:	Pointer to can_berr_counter structure
1461  *
1462  * This is the driver error counter routine.
1463  * Return: 0 on success and failure value on error
1464  */
1465 static int xcan_get_berr_counter(const struct net_device *ndev,
1466 				 struct can_berr_counter *bec)
1467 {
1468 	struct xcan_priv *priv = netdev_priv(ndev);
1469 	int ret;
1470 
1471 	ret = pm_runtime_get_sync(priv->dev);
1472 	if (ret < 0) {
1473 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1474 			   __func__, ret);
1475 		return ret;
1476 	}
1477 
1478 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1479 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1480 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1481 
1482 	pm_runtime_put(priv->dev);
1483 
1484 	return 0;
1485 }
1486 
1487 static const struct net_device_ops xcan_netdev_ops = {
1488 	.ndo_open	= xcan_open,
1489 	.ndo_stop	= xcan_close,
1490 	.ndo_start_xmit	= xcan_start_xmit,
1491 	.ndo_change_mtu	= can_change_mtu,
1492 };
1493 
1494 /**
1495  * xcan_suspend - Suspend method for the driver
1496  * @dev:	Address of the device structure
1497  *
1498  * Put the driver into low power mode.
1499  * Return: 0 on success and failure value on error
1500  */
1501 static int __maybe_unused xcan_suspend(struct device *dev)
1502 {
1503 	struct net_device *ndev = dev_get_drvdata(dev);
1504 
1505 	if (netif_running(ndev)) {
1506 		netif_stop_queue(ndev);
1507 		netif_device_detach(ndev);
1508 		xcan_chip_stop(ndev);
1509 	}
1510 
1511 	return pm_runtime_force_suspend(dev);
1512 }
1513 
1514 /**
1515  * xcan_resume - Resume from suspend
1516  * @dev:	Address of the device structure
1517  *
1518  * Resume operation after suspend.
1519  * Return: 0 on success and failure value on error
1520  */
1521 static int __maybe_unused xcan_resume(struct device *dev)
1522 {
1523 	struct net_device *ndev = dev_get_drvdata(dev);
1524 	int ret;
1525 
1526 	ret = pm_runtime_force_resume(dev);
1527 	if (ret) {
1528 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1529 		return ret;
1530 	}
1531 
1532 	if (netif_running(ndev)) {
1533 		ret = xcan_chip_start(ndev);
1534 		if (ret) {
1535 			dev_err(dev, "xcan_chip_start failed on resume\n");
1536 			return ret;
1537 		}
1538 
1539 		netif_device_attach(ndev);
1540 		netif_start_queue(ndev);
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * xcan_runtime_suspend - Runtime suspend method for the driver
1548  * @dev:	Address of the device structure
1549  *
1550  * Put the driver into low power mode.
1551  * Return: 0 always
1552  */
1553 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1554 {
1555 	struct net_device *ndev = dev_get_drvdata(dev);
1556 	struct xcan_priv *priv = netdev_priv(ndev);
1557 
1558 	clk_disable_unprepare(priv->bus_clk);
1559 	clk_disable_unprepare(priv->can_clk);
1560 
1561 	return 0;
1562 }
1563 
1564 /**
1565  * xcan_runtime_resume - Runtime resume from suspend
1566  * @dev:	Address of the device structure
1567  *
1568  * Resume operation after suspend.
1569  * Return: 0 on success and failure value on error
1570  */
1571 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1572 {
1573 	struct net_device *ndev = dev_get_drvdata(dev);
1574 	struct xcan_priv *priv = netdev_priv(ndev);
1575 	int ret;
1576 
1577 	ret = clk_prepare_enable(priv->bus_clk);
1578 	if (ret) {
1579 		dev_err(dev, "Cannot enable clock.\n");
1580 		return ret;
1581 	}
1582 	ret = clk_prepare_enable(priv->can_clk);
1583 	if (ret) {
1584 		dev_err(dev, "Cannot enable clock.\n");
1585 		clk_disable_unprepare(priv->bus_clk);
1586 		return ret;
1587 	}
1588 
1589 	return 0;
1590 }
1591 
1592 static const struct dev_pm_ops xcan_dev_pm_ops = {
1593 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1594 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1595 };
1596 
1597 static const struct xcan_devtype_data xcan_zynq_data = {
1598 	.cantype = XZYNQ_CANPS,
1599 	.flags = XCAN_FLAG_TXFEMP,
1600 	.bittiming_const = &xcan_bittiming_const,
1601 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1602 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1603 	.bus_clk_name = "pclk",
1604 };
1605 
1606 static const struct xcan_devtype_data xcan_axi_data = {
1607 	.cantype = XAXI_CAN,
1608 	.bittiming_const = &xcan_bittiming_const,
1609 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1610 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1611 	.bus_clk_name = "s_axi_aclk",
1612 };
1613 
1614 static const struct xcan_devtype_data xcan_canfd_data = {
1615 	.cantype = XAXI_CANFD,
1616 	.flags = XCAN_FLAG_EXT_FILTERS |
1617 		 XCAN_FLAG_RXMNF |
1618 		 XCAN_FLAG_TX_MAILBOXES |
1619 		 XCAN_FLAG_RX_FIFO_MULTI,
1620 	.bittiming_const = &xcan_bittiming_const_canfd,
1621 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1622 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1623 	.bus_clk_name = "s_axi_aclk",
1624 };
1625 
1626 static const struct xcan_devtype_data xcan_canfd2_data = {
1627 	.cantype = XAXI_CANFD_2_0,
1628 	.flags = XCAN_FLAG_EXT_FILTERS |
1629 		 XCAN_FLAG_RXMNF |
1630 		 XCAN_FLAG_TX_MAILBOXES |
1631 		 XCAN_FLAG_CANFD_2 |
1632 		 XCAN_FLAG_RX_FIFO_MULTI,
1633 	.bittiming_const = &xcan_bittiming_const_canfd2,
1634 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1635 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1636 	.bus_clk_name = "s_axi_aclk",
1637 };
1638 
1639 /* Match table for OF platform binding */
1640 static const struct of_device_id xcan_of_match[] = {
1641 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1642 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1643 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1644 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1645 	{ /* end of list */ },
1646 };
1647 MODULE_DEVICE_TABLE(of, xcan_of_match);
1648 
1649 /**
1650  * xcan_probe - Platform registration call
1651  * @pdev:	Handle to the platform device structure
1652  *
1653  * This function does all the memory allocation and registration for the CAN
1654  * device.
1655  *
1656  * Return: 0 on success and failure value on error
1657  */
1658 static int xcan_probe(struct platform_device *pdev)
1659 {
1660 	struct net_device *ndev;
1661 	struct xcan_priv *priv;
1662 	const struct of_device_id *of_id;
1663 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1664 	void __iomem *addr;
1665 	int ret;
1666 	int rx_max, tx_max;
1667 	int hw_tx_max, hw_rx_max;
1668 	const char *hw_tx_max_property;
1669 
1670 	/* Get the virtual base address for the device */
1671 	addr = devm_platform_ioremap_resource(pdev, 0);
1672 	if (IS_ERR(addr)) {
1673 		ret = PTR_ERR(addr);
1674 		goto err;
1675 	}
1676 
1677 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1678 	if (of_id && of_id->data)
1679 		devtype = of_id->data;
1680 
1681 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1682 			     "tx-mailbox-count" : "tx-fifo-depth";
1683 
1684 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1685 				   &hw_tx_max);
1686 	if (ret < 0) {
1687 		dev_err(&pdev->dev, "missing %s property\n",
1688 			hw_tx_max_property);
1689 		goto err;
1690 	}
1691 
1692 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1693 				   &hw_rx_max);
1694 	if (ret < 0) {
1695 		dev_err(&pdev->dev,
1696 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1697 		goto err;
1698 	}
1699 
1700 	/* With TX FIFO:
1701 	 *
1702 	 * There is no way to directly figure out how many frames have been
1703 	 * sent when the TXOK interrupt is processed. If TXFEMP
1704 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1705 	 * to determine if 1 or 2 frames have been sent.
1706 	 * Theoretically we should be able to use TXFWMEMP to determine up
1707 	 * to 3 frames, but it seems that after putting a second frame in the
1708 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1709 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1710 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1711 	 * completely synchronized with the rest of the bits?
1712 	 *
1713 	 * With TX mailboxes:
1714 	 *
1715 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1716 	 * we submit frames one at a time.
1717 	 */
1718 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1719 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1720 		tx_max = min(hw_tx_max, 2);
1721 	else
1722 		tx_max = 1;
1723 
1724 	rx_max = hw_rx_max;
1725 
1726 	/* Create a CAN device instance */
1727 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1728 	if (!ndev)
1729 		return -ENOMEM;
1730 
1731 	priv = netdev_priv(ndev);
1732 	priv->dev = &pdev->dev;
1733 	priv->can.bittiming_const = devtype->bittiming_const;
1734 	priv->can.do_set_mode = xcan_do_set_mode;
1735 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1736 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1737 					CAN_CTRLMODE_BERR_REPORTING;
1738 
1739 	if (devtype->cantype == XAXI_CANFD)
1740 		priv->can.data_bittiming_const =
1741 			&xcan_data_bittiming_const_canfd;
1742 
1743 	if (devtype->cantype == XAXI_CANFD_2_0)
1744 		priv->can.data_bittiming_const =
1745 			&xcan_data_bittiming_const_canfd2;
1746 
1747 	if (devtype->cantype == XAXI_CANFD ||
1748 	    devtype->cantype == XAXI_CANFD_2_0)
1749 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1750 
1751 	priv->reg_base = addr;
1752 	priv->tx_max = tx_max;
1753 	priv->devtype = *devtype;
1754 	spin_lock_init(&priv->tx_lock);
1755 
1756 	/* Get IRQ for the device */
1757 	ndev->irq = platform_get_irq(pdev, 0);
1758 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1759 
1760 	platform_set_drvdata(pdev, ndev);
1761 	SET_NETDEV_DEV(ndev, &pdev->dev);
1762 	ndev->netdev_ops = &xcan_netdev_ops;
1763 
1764 	/* Getting the CAN can_clk info */
1765 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1766 	if (IS_ERR(priv->can_clk)) {
1767 		if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
1768 			dev_err(&pdev->dev, "Device clock not found.\n");
1769 		ret = PTR_ERR(priv->can_clk);
1770 		goto err_free;
1771 	}
1772 
1773 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1774 	if (IS_ERR(priv->bus_clk)) {
1775 		dev_err(&pdev->dev, "bus clock not found\n");
1776 		ret = PTR_ERR(priv->bus_clk);
1777 		goto err_free;
1778 	}
1779 
1780 	priv->write_reg = xcan_write_reg_le;
1781 	priv->read_reg = xcan_read_reg_le;
1782 
1783 	pm_runtime_enable(&pdev->dev);
1784 	ret = pm_runtime_get_sync(&pdev->dev);
1785 	if (ret < 0) {
1786 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1787 			   __func__, ret);
1788 		goto err_pmdisable;
1789 	}
1790 
1791 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1792 		priv->write_reg = xcan_write_reg_be;
1793 		priv->read_reg = xcan_read_reg_be;
1794 	}
1795 
1796 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1797 
1798 	netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1799 
1800 	ret = register_candev(ndev);
1801 	if (ret) {
1802 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1803 		goto err_disableclks;
1804 	}
1805 
1806 	devm_can_led_init(ndev);
1807 
1808 	pm_runtime_put(&pdev->dev);
1809 
1810 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1811 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1812 		   hw_tx_max, priv->tx_max);
1813 
1814 	return 0;
1815 
1816 err_disableclks:
1817 	pm_runtime_put(priv->dev);
1818 err_pmdisable:
1819 	pm_runtime_disable(&pdev->dev);
1820 err_free:
1821 	free_candev(ndev);
1822 err:
1823 	return ret;
1824 }
1825 
1826 /**
1827  * xcan_remove - Unregister the device after releasing the resources
1828  * @pdev:	Handle to the platform device structure
1829  *
1830  * This function frees all the resources allocated to the device.
1831  * Return: 0 always
1832  */
1833 static int xcan_remove(struct platform_device *pdev)
1834 {
1835 	struct net_device *ndev = platform_get_drvdata(pdev);
1836 	struct xcan_priv *priv = netdev_priv(ndev);
1837 
1838 	unregister_candev(ndev);
1839 	pm_runtime_disable(&pdev->dev);
1840 	netif_napi_del(&priv->napi);
1841 	free_candev(ndev);
1842 
1843 	return 0;
1844 }
1845 
1846 static struct platform_driver xcan_driver = {
1847 	.probe = xcan_probe,
1848 	.remove	= xcan_remove,
1849 	.driver	= {
1850 		.name = DRIVER_NAME,
1851 		.pm = &xcan_dev_pm_ops,
1852 		.of_match_table	= xcan_of_match,
1853 	},
1854 };
1855 
1856 module_platform_driver(xcan_driver);
1857 
1858 MODULE_LICENSE("GPL");
1859 MODULE_AUTHOR("Xilinx Inc");
1860 MODULE_DESCRIPTION("Xilinx CAN interface");
1861