xref: /linux/drivers/net/can/xilinx_can.c (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3  *
4  * Copyright (C) 2012 - 2014 Xilinx, Inc.
5  * Copyright (C) 2009 PetaLogix. All rights reserved.
6  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7  *
8  * Description:
9  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10  */
11 
12 #include <linux/clk.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <linux/can/dev.h>
28 #include <linux/can/error.h>
29 #include <linux/can/led.h>
30 #include <linux/pm_runtime.h>
31 
32 #define DRIVER_NAME	"xilinx_can"
33 
34 /* CAN registers set */
35 enum xcan_reg {
36 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
37 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
38 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
39 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
40 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
41 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
42 	XCAN_SR_OFFSET		= 0x18, /* Status */
43 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
44 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
45 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
46 
47 	/* not on CAN FD cores */
48 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
49 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
50 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
51 
52 	/* only on CAN FD cores */
53 	XCAN_F_BRPR_OFFSET	= 0x088, /* Data Phase Baud Rate
54 					  * Prescalar
55 					  */
56 	XCAN_F_BTR_OFFSET	= 0x08C, /* Data Phase Bit Timing */
57 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
58 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
59 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
60 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
61 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
62 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
63 	XCAN_AFR_2_MASK_OFFSET	= 0x0A00, /* Acceptance Filter MASK */
64 	XCAN_AFR_2_ID_OFFSET	= 0x0A04, /* Acceptance Filter ID */
65 };
66 
67 #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
68 #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
69 #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
70 #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
71 #define XCANFD_FRAME_DW_OFFSET(frame_base)	((frame_base) + 0x08)
72 
73 #define XCAN_CANFD_FRAME_SIZE		0x48
74 #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
75 					 XCAN_CANFD_FRAME_SIZE * (n))
76 #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
77 					 XCAN_CANFD_FRAME_SIZE * (n))
78 #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
79 					 XCAN_CANFD_FRAME_SIZE * (n))
80 
81 /* the single TX mailbox used by this driver on CAN FD HW */
82 #define XCAN_TX_MAILBOX_IDX		0
83 
84 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
85 #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
86 #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
87 #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
88 #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
89 #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
90 #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
91 #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
92 #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
93 #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
94 #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
95 #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
96 #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
97 #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
98 #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
99 #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
100 #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
101 #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
102 #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
103 #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
104 #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
105 #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
106 #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
107 #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
108 #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
109 #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
110 #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
111 #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
112 #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
113 #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
114 #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
115 #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
116 #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
117 #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
118 #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
119 #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
120 #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
121 #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
122 #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
123 #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
124 #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
125 #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
126 #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
127 #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
128 #define XCAN_2_FSR_FL_MASK		0x00007F00 /* RX Fill Level */
129 #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
130 #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
131 #define XCAN_2_FSR_RI_MASK		0x0000003F /* RX Read Index */
132 #define XCAN_DLCR_EDL_MASK		0x08000000 /* EDL Mask in DLC */
133 #define XCAN_DLCR_BRS_MASK		0x04000000 /* BRS Mask in DLC */
134 
135 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
136 #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
137 #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
138 #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
139 #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
140 #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
141 #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
142 #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
143 #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
144 
145 /* CAN frame length constants */
146 #define XCAN_FRAME_MAX_DATA_LEN		8
147 #define XCANFD_DW_BYTES			4
148 #define XCAN_TIMEOUT			(1 * HZ)
149 
150 /* TX-FIFO-empty interrupt available */
151 #define XCAN_FLAG_TXFEMP	0x0001
152 /* RX Match Not Finished interrupt available */
153 #define XCAN_FLAG_RXMNF		0x0002
154 /* Extended acceptance filters with control at 0xE0 */
155 #define XCAN_FLAG_EXT_FILTERS	0x0004
156 /* TX mailboxes instead of TX FIFO */
157 #define XCAN_FLAG_TX_MAILBOXES	0x0008
158 /* RX FIFO with each buffer in separate registers at 0x1100
159  * instead of the regular FIFO at 0x50
160  */
161 #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
162 #define XCAN_FLAG_CANFD_2	0x0020
163 
164 enum xcan_ip_type {
165 	XAXI_CAN = 0,
166 	XZYNQ_CANPS,
167 	XAXI_CANFD,
168 	XAXI_CANFD_2_0,
169 };
170 
171 struct xcan_devtype_data {
172 	enum xcan_ip_type cantype;
173 	unsigned int flags;
174 	const struct can_bittiming_const *bittiming_const;
175 	const char *bus_clk_name;
176 	unsigned int btr_ts2_shift;
177 	unsigned int btr_sjw_shift;
178 };
179 
180 /**
181  * struct xcan_priv - This definition define CAN driver instance
182  * @can:			CAN private data structure.
183  * @tx_lock:			Lock for synchronizing TX interrupt handling
184  * @tx_head:			Tx CAN packets ready to send on the queue
185  * @tx_tail:			Tx CAN packets successfully sended on the queue
186  * @tx_max:			Maximum number packets the driver can send
187  * @napi:			NAPI structure
188  * @read_reg:			For reading data from CAN registers
189  * @write_reg:			For writing data to CAN registers
190  * @dev:			Network device data structure
191  * @reg_base:			Ioremapped address to registers
192  * @irq_flags:			For request_irq()
193  * @bus_clk:			Pointer to struct clk
194  * @can_clk:			Pointer to struct clk
195  * @devtype:			Device type specific constants
196  */
197 struct xcan_priv {
198 	struct can_priv can;
199 	spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
200 	unsigned int tx_head;
201 	unsigned int tx_tail;
202 	unsigned int tx_max;
203 	struct napi_struct napi;
204 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
205 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
206 			  u32 val);
207 	struct device *dev;
208 	void __iomem *reg_base;
209 	unsigned long irq_flags;
210 	struct clk *bus_clk;
211 	struct clk *can_clk;
212 	struct xcan_devtype_data devtype;
213 };
214 
215 /* CAN Bittiming constants as per Xilinx CAN specs */
216 static const struct can_bittiming_const xcan_bittiming_const = {
217 	.name = DRIVER_NAME,
218 	.tseg1_min = 1,
219 	.tseg1_max = 16,
220 	.tseg2_min = 1,
221 	.tseg2_max = 8,
222 	.sjw_max = 4,
223 	.brp_min = 1,
224 	.brp_max = 256,
225 	.brp_inc = 1,
226 };
227 
228 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
229 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
230 	.name = DRIVER_NAME,
231 	.tseg1_min = 1,
232 	.tseg1_max = 64,
233 	.tseg2_min = 1,
234 	.tseg2_max = 16,
235 	.sjw_max = 16,
236 	.brp_min = 1,
237 	.brp_max = 256,
238 	.brp_inc = 1,
239 };
240 
241 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
242 static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
243 	.name = DRIVER_NAME,
244 	.tseg1_min = 1,
245 	.tseg1_max = 16,
246 	.tseg2_min = 1,
247 	.tseg2_max = 8,
248 	.sjw_max = 8,
249 	.brp_min = 1,
250 	.brp_max = 256,
251 	.brp_inc = 1,
252 };
253 
254 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
255 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
256 	.name = DRIVER_NAME,
257 	.tseg1_min = 1,
258 	.tseg1_max = 256,
259 	.tseg2_min = 1,
260 	.tseg2_max = 128,
261 	.sjw_max = 128,
262 	.brp_min = 2,
263 	.brp_max = 256,
264 	.brp_inc = 1,
265 };
266 
267 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
268 static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
269 	.name = DRIVER_NAME,
270 	.tseg1_min = 1,
271 	.tseg1_max = 32,
272 	.tseg2_min = 1,
273 	.tseg2_max = 16,
274 	.sjw_max = 16,
275 	.brp_min = 2,
276 	.brp_max = 256,
277 	.brp_inc = 1,
278 };
279 
280 /**
281  * xcan_write_reg_le - Write a value to the device register little endian
282  * @priv:	Driver private data structure
283  * @reg:	Register offset
284  * @val:	Value to write at the Register offset
285  *
286  * Write data to the paricular CAN register
287  */
288 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
289 			      u32 val)
290 {
291 	iowrite32(val, priv->reg_base + reg);
292 }
293 
294 /**
295  * xcan_read_reg_le - Read a value from the device register little endian
296  * @priv:	Driver private data structure
297  * @reg:	Register offset
298  *
299  * Read data from the particular CAN register
300  * Return: value read from the CAN register
301  */
302 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
303 {
304 	return ioread32(priv->reg_base + reg);
305 }
306 
307 /**
308  * xcan_write_reg_be - Write a value to the device register big endian
309  * @priv:	Driver private data structure
310  * @reg:	Register offset
311  * @val:	Value to write at the Register offset
312  *
313  * Write data to the paricular CAN register
314  */
315 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
316 			      u32 val)
317 {
318 	iowrite32be(val, priv->reg_base + reg);
319 }
320 
321 /**
322  * xcan_read_reg_be - Read a value from the device register big endian
323  * @priv:	Driver private data structure
324  * @reg:	Register offset
325  *
326  * Read data from the particular CAN register
327  * Return: value read from the CAN register
328  */
329 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
330 {
331 	return ioread32be(priv->reg_base + reg);
332 }
333 
334 /**
335  * xcan_rx_int_mask - Get the mask for the receive interrupt
336  * @priv:	Driver private data structure
337  *
338  * Return: The receive interrupt mask used by the driver on this HW
339  */
340 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
341 {
342 	/* RXNEMP is better suited for our use case as it cannot be cleared
343 	 * while the FIFO is non-empty, but CAN FD HW does not have it
344 	 */
345 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
346 		return XCAN_IXR_RXOK_MASK;
347 	else
348 		return XCAN_IXR_RXNEMP_MASK;
349 }
350 
351 /**
352  * set_reset_mode - Resets the CAN device mode
353  * @ndev:	Pointer to net_device structure
354  *
355  * This is the driver reset mode routine.The driver
356  * enters into configuration mode.
357  *
358  * Return: 0 on success and failure value on error
359  */
360 static int set_reset_mode(struct net_device *ndev)
361 {
362 	struct xcan_priv *priv = netdev_priv(ndev);
363 	unsigned long timeout;
364 
365 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
366 
367 	timeout = jiffies + XCAN_TIMEOUT;
368 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
369 		if (time_after(jiffies, timeout)) {
370 			netdev_warn(ndev, "timed out for config mode\n");
371 			return -ETIMEDOUT;
372 		}
373 		usleep_range(500, 10000);
374 	}
375 
376 	/* reset clears FIFOs */
377 	priv->tx_head = 0;
378 	priv->tx_tail = 0;
379 
380 	return 0;
381 }
382 
383 /**
384  * xcan_set_bittiming - CAN set bit timing routine
385  * @ndev:	Pointer to net_device structure
386  *
387  * This is the driver set bittiming  routine.
388  * Return: 0 on success and failure value on error
389  */
390 static int xcan_set_bittiming(struct net_device *ndev)
391 {
392 	struct xcan_priv *priv = netdev_priv(ndev);
393 	struct can_bittiming *bt = &priv->can.bittiming;
394 	struct can_bittiming *dbt = &priv->can.data_bittiming;
395 	u32 btr0, btr1;
396 	u32 is_config_mode;
397 
398 	/* Check whether Xilinx CAN is in configuration mode.
399 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
400 	 */
401 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
402 				XCAN_SR_CONFIG_MASK;
403 	if (!is_config_mode) {
404 		netdev_alert(ndev,
405 			     "BUG! Cannot set bittiming - CAN is not in config mode\n");
406 		return -EPERM;
407 	}
408 
409 	/* Setting Baud Rate prescalar value in BRPR Register */
410 	btr0 = (bt->brp - 1);
411 
412 	/* Setting Time Segment 1 in BTR Register */
413 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
414 
415 	/* Setting Time Segment 2 in BTR Register */
416 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
417 
418 	/* Setting Synchronous jump width in BTR Register */
419 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
420 
421 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
422 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
423 
424 	if (priv->devtype.cantype == XAXI_CANFD ||
425 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
426 		/* Setting Baud Rate prescalar value in F_BRPR Register */
427 		btr0 = dbt->brp - 1;
428 
429 		/* Setting Time Segment 1 in BTR Register */
430 		btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
431 
432 		/* Setting Time Segment 2 in BTR Register */
433 		btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
434 
435 		/* Setting Synchronous jump width in BTR Register */
436 		btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
437 
438 		priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
439 		priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
440 	}
441 
442 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
443 		   priv->read_reg(priv, XCAN_BRPR_OFFSET),
444 		   priv->read_reg(priv, XCAN_BTR_OFFSET));
445 
446 	return 0;
447 }
448 
449 /**
450  * xcan_chip_start - This the drivers start routine
451  * @ndev:	Pointer to net_device structure
452  *
453  * This is the drivers start routine.
454  * Based on the State of the CAN device it puts
455  * the CAN device into a proper mode.
456  *
457  * Return: 0 on success and failure value on error
458  */
459 static int xcan_chip_start(struct net_device *ndev)
460 {
461 	struct xcan_priv *priv = netdev_priv(ndev);
462 	u32 reg_msr;
463 	int err;
464 	u32 ier;
465 
466 	/* Check if it is in reset mode */
467 	err = set_reset_mode(ndev);
468 	if (err < 0)
469 		return err;
470 
471 	err = xcan_set_bittiming(ndev);
472 	if (err < 0)
473 		return err;
474 
475 	/* Enable interrupts
476 	 *
477 	 * We enable the ERROR interrupt even with
478 	 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
479 	 * dedicated interrupt for a state change to
480 	 * ERROR_WARNING/ERROR_PASSIVE.
481 	 */
482 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
483 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
484 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
485 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
486 
487 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
488 		ier |= XCAN_IXR_RXMNF_MASK;
489 
490 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
491 
492 	/* Check whether it is loopback mode or normal mode  */
493 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
494 		reg_msr = XCAN_MSR_LBACK_MASK;
495 	else
496 		reg_msr = 0x0;
497 
498 	/* enable the first extended filter, if any, as cores with extended
499 	 * filtering default to non-receipt if all filters are disabled
500 	 */
501 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
502 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
503 
504 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
505 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
506 
507 	netdev_dbg(ndev, "status:#x%08x\n",
508 		   priv->read_reg(priv, XCAN_SR_OFFSET));
509 
510 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
511 	return 0;
512 }
513 
514 /**
515  * xcan_do_set_mode - This sets the mode of the driver
516  * @ndev:	Pointer to net_device structure
517  * @mode:	Tells the mode of the driver
518  *
519  * This check the drivers state and calls the corresponding modes to set.
520  *
521  * Return: 0 on success and failure value on error
522  */
523 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
524 {
525 	int ret;
526 
527 	switch (mode) {
528 	case CAN_MODE_START:
529 		ret = xcan_chip_start(ndev);
530 		if (ret < 0) {
531 			netdev_err(ndev, "xcan_chip_start failed!\n");
532 			return ret;
533 		}
534 		netif_wake_queue(ndev);
535 		break;
536 	default:
537 		ret = -EOPNOTSUPP;
538 		break;
539 	}
540 
541 	return ret;
542 }
543 
544 /**
545  * xcan_write_frame - Write a frame to HW
546  * @ndev:		Pointer to net_device structure
547  * @skb:		sk_buff pointer that contains data to be Txed
548  * @frame_offset:	Register offset to write the frame to
549  */
550 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
551 			     int frame_offset)
552 {
553 	u32 id, dlc, data[2] = {0, 0};
554 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
555 	u32 ramoff, dwindex = 0, i;
556 	struct xcan_priv *priv = netdev_priv(ndev);
557 
558 	/* Watch carefully on the bit sequence */
559 	if (cf->can_id & CAN_EFF_FLAG) {
560 		/* Extended CAN ID format */
561 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
562 			XCAN_IDR_ID2_MASK;
563 		id |= (((cf->can_id & CAN_EFF_MASK) >>
564 			(CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
565 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
566 
567 		/* The substibute remote TX request bit should be "1"
568 		 * for extended frames as in the Xilinx CAN datasheet
569 		 */
570 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
571 
572 		if (cf->can_id & CAN_RTR_FLAG)
573 			/* Extended frames remote TX request */
574 			id |= XCAN_IDR_RTR_MASK;
575 	} else {
576 		/* Standard CAN ID format */
577 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
578 			XCAN_IDR_ID1_MASK;
579 
580 		if (cf->can_id & CAN_RTR_FLAG)
581 			/* Standard frames remote TX request */
582 			id |= XCAN_IDR_SRR_MASK;
583 	}
584 
585 	dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
586 	if (can_is_canfd_skb(skb)) {
587 		if (cf->flags & CANFD_BRS)
588 			dlc |= XCAN_DLCR_BRS_MASK;
589 		dlc |= XCAN_DLCR_EDL_MASK;
590 	}
591 
592 	if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
593 	    (priv->devtype.flags & XCAN_FLAG_TXFEMP))
594 		can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
595 	else
596 		can_put_echo_skb(skb, ndev, 0, 0);
597 
598 	priv->tx_head++;
599 
600 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
601 	/* If the CAN frame is RTR frame this write triggers transmission
602 	 * (not on CAN FD)
603 	 */
604 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
605 	if (priv->devtype.cantype == XAXI_CANFD ||
606 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
607 		for (i = 0; i < cf->len; i += 4) {
608 			ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
609 					(dwindex * XCANFD_DW_BYTES);
610 			priv->write_reg(priv, ramoff,
611 					be32_to_cpup((__be32 *)(cf->data + i)));
612 			dwindex++;
613 		}
614 	} else {
615 		if (cf->len > 0)
616 			data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
617 		if (cf->len > 4)
618 			data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
619 
620 		if (!(cf->can_id & CAN_RTR_FLAG)) {
621 			priv->write_reg(priv,
622 					XCAN_FRAME_DW1_OFFSET(frame_offset),
623 					data[0]);
624 			/* If the CAN frame is Standard/Extended frame this
625 			 * write triggers transmission (not on CAN FD)
626 			 */
627 			priv->write_reg(priv,
628 					XCAN_FRAME_DW2_OFFSET(frame_offset),
629 					data[1]);
630 		}
631 	}
632 }
633 
634 /**
635  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
636  * @skb:	sk_buff pointer that contains data to be Txed
637  * @ndev:	Pointer to net_device structure
638  *
639  * Return: 0 on success, -ENOSPC if FIFO is full.
640  */
641 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
642 {
643 	struct xcan_priv *priv = netdev_priv(ndev);
644 	unsigned long flags;
645 
646 	/* Check if the TX buffer is full */
647 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
648 			XCAN_SR_TXFLL_MASK))
649 		return -ENOSPC;
650 
651 	spin_lock_irqsave(&priv->tx_lock, flags);
652 
653 	xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
654 
655 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
656 	if (priv->tx_max > 1)
657 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
658 
659 	/* Check if the TX buffer is full */
660 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
661 		netif_stop_queue(ndev);
662 
663 	spin_unlock_irqrestore(&priv->tx_lock, flags);
664 
665 	return 0;
666 }
667 
668 /**
669  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
670  * @skb:	sk_buff pointer that contains data to be Txed
671  * @ndev:	Pointer to net_device structure
672  *
673  * Return: 0 on success, -ENOSPC if there is no space
674  */
675 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
676 {
677 	struct xcan_priv *priv = netdev_priv(ndev);
678 	unsigned long flags;
679 
680 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
681 		     BIT(XCAN_TX_MAILBOX_IDX)))
682 		return -ENOSPC;
683 
684 	spin_lock_irqsave(&priv->tx_lock, flags);
685 
686 	xcan_write_frame(ndev, skb,
687 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
688 
689 	/* Mark buffer as ready for transmit */
690 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
691 
692 	netif_stop_queue(ndev);
693 
694 	spin_unlock_irqrestore(&priv->tx_lock, flags);
695 
696 	return 0;
697 }
698 
699 /**
700  * xcan_start_xmit - Starts the transmission
701  * @skb:	sk_buff pointer that contains data to be Txed
702  * @ndev:	Pointer to net_device structure
703  *
704  * This function is invoked from upper layers to initiate transmission.
705  *
706  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
707  */
708 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
709 {
710 	struct xcan_priv *priv = netdev_priv(ndev);
711 	int ret;
712 
713 	if (can_dropped_invalid_skb(ndev, skb))
714 		return NETDEV_TX_OK;
715 
716 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
717 		ret = xcan_start_xmit_mailbox(skb, ndev);
718 	else
719 		ret = xcan_start_xmit_fifo(skb, ndev);
720 
721 	if (ret < 0) {
722 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
723 		netif_stop_queue(ndev);
724 		return NETDEV_TX_BUSY;
725 	}
726 
727 	return NETDEV_TX_OK;
728 }
729 
730 /**
731  * xcan_rx -  Is called from CAN isr to complete the received
732  *		frame  processing
733  * @ndev:	Pointer to net_device structure
734  * @frame_base:	Register offset to the frame to be read
735  *
736  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
737  * does minimal processing and invokes "netif_receive_skb" to complete further
738  * processing.
739  * Return: 1 on success and 0 on failure.
740  */
741 static int xcan_rx(struct net_device *ndev, int frame_base)
742 {
743 	struct xcan_priv *priv = netdev_priv(ndev);
744 	struct net_device_stats *stats = &ndev->stats;
745 	struct can_frame *cf;
746 	struct sk_buff *skb;
747 	u32 id_xcan, dlc, data[2] = {0, 0};
748 
749 	skb = alloc_can_skb(ndev, &cf);
750 	if (unlikely(!skb)) {
751 		stats->rx_dropped++;
752 		return 0;
753 	}
754 
755 	/* Read a frame from Xilinx zynq CANPS */
756 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
757 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
758 				   XCAN_DLCR_DLC_SHIFT;
759 
760 	/* Change Xilinx CAN data length format to socketCAN data format */
761 	cf->len = can_cc_dlc2len(dlc);
762 
763 	/* Change Xilinx CAN ID format to socketCAN ID format */
764 	if (id_xcan & XCAN_IDR_IDE_MASK) {
765 		/* The received frame is an Extended format frame */
766 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
767 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
768 				XCAN_IDR_ID2_SHIFT;
769 		cf->can_id |= CAN_EFF_FLAG;
770 		if (id_xcan & XCAN_IDR_RTR_MASK)
771 			cf->can_id |= CAN_RTR_FLAG;
772 	} else {
773 		/* The received frame is a standard format frame */
774 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
775 				XCAN_IDR_ID1_SHIFT;
776 		if (id_xcan & XCAN_IDR_SRR_MASK)
777 			cf->can_id |= CAN_RTR_FLAG;
778 	}
779 
780 	/* DW1/DW2 must always be read to remove message from RXFIFO */
781 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
782 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
783 
784 	if (!(cf->can_id & CAN_RTR_FLAG)) {
785 		/* Change Xilinx CAN data format to socketCAN data format */
786 		if (cf->len > 0)
787 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
788 		if (cf->len > 4)
789 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
790 
791 		stats->rx_bytes += cf->len;
792 	}
793 	stats->rx_packets++;
794 
795 	netif_receive_skb(skb);
796 
797 	return 1;
798 }
799 
800 /**
801  * xcanfd_rx -  Is called from CAN isr to complete the received
802  *		frame  processing
803  * @ndev:	Pointer to net_device structure
804  * @frame_base:	Register offset to the frame to be read
805  *
806  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
807  * does minimal processing and invokes "netif_receive_skb" to complete further
808  * processing.
809  * Return: 1 on success and 0 on failure.
810  */
811 static int xcanfd_rx(struct net_device *ndev, int frame_base)
812 {
813 	struct xcan_priv *priv = netdev_priv(ndev);
814 	struct net_device_stats *stats = &ndev->stats;
815 	struct canfd_frame *cf;
816 	struct sk_buff *skb;
817 	u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
818 
819 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
820 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
821 	if (dlc & XCAN_DLCR_EDL_MASK)
822 		skb = alloc_canfd_skb(ndev, &cf);
823 	else
824 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
825 
826 	if (unlikely(!skb)) {
827 		stats->rx_dropped++;
828 		return 0;
829 	}
830 
831 	/* Change Xilinx CANFD data length format to socketCAN data
832 	 * format
833 	 */
834 	if (dlc & XCAN_DLCR_EDL_MASK)
835 		cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
836 				  XCAN_DLCR_DLC_SHIFT);
837 	else
838 		cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
839 					  XCAN_DLCR_DLC_SHIFT);
840 
841 	/* Change Xilinx CAN ID format to socketCAN ID format */
842 	if (id_xcan & XCAN_IDR_IDE_MASK) {
843 		/* The received frame is an Extended format frame */
844 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
845 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
846 				XCAN_IDR_ID2_SHIFT;
847 		cf->can_id |= CAN_EFF_FLAG;
848 		if (id_xcan & XCAN_IDR_RTR_MASK)
849 			cf->can_id |= CAN_RTR_FLAG;
850 	} else {
851 		/* The received frame is a standard format frame */
852 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
853 				XCAN_IDR_ID1_SHIFT;
854 		if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
855 					XCAN_IDR_SRR_MASK))
856 			cf->can_id |= CAN_RTR_FLAG;
857 	}
858 
859 	/* Check the frame received is FD or not*/
860 	if (dlc & XCAN_DLCR_EDL_MASK) {
861 		for (i = 0; i < cf->len; i += 4) {
862 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
863 					(dwindex * XCANFD_DW_BYTES);
864 			data[0] = priv->read_reg(priv, dw_offset);
865 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
866 			dwindex++;
867 		}
868 	} else {
869 		for (i = 0; i < cf->len; i += 4) {
870 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
871 			data[0] = priv->read_reg(priv, dw_offset + i);
872 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
873 		}
874 	}
875 
876 	if (!(cf->can_id & CAN_RTR_FLAG))
877 		stats->rx_bytes += cf->len;
878 	stats->rx_packets++;
879 
880 	netif_receive_skb(skb);
881 
882 	return 1;
883 }
884 
885 /**
886  * xcan_current_error_state - Get current error state from HW
887  * @ndev:	Pointer to net_device structure
888  *
889  * Checks the current CAN error state from the HW. Note that this
890  * only checks for ERROR_PASSIVE and ERROR_WARNING.
891  *
892  * Return:
893  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
894  * otherwise.
895  */
896 static enum can_state xcan_current_error_state(struct net_device *ndev)
897 {
898 	struct xcan_priv *priv = netdev_priv(ndev);
899 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
900 
901 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
902 		return CAN_STATE_ERROR_PASSIVE;
903 	else if (status & XCAN_SR_ERRWRN_MASK)
904 		return CAN_STATE_ERROR_WARNING;
905 	else
906 		return CAN_STATE_ERROR_ACTIVE;
907 }
908 
909 /**
910  * xcan_set_error_state - Set new CAN error state
911  * @ndev:	Pointer to net_device structure
912  * @new_state:	The new CAN state to be set
913  * @cf:		Error frame to be populated or NULL
914  *
915  * Set new CAN error state for the device, updating statistics and
916  * populating the error frame if given.
917  */
918 static void xcan_set_error_state(struct net_device *ndev,
919 				 enum can_state new_state,
920 				 struct can_frame *cf)
921 {
922 	struct xcan_priv *priv = netdev_priv(ndev);
923 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
924 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
925 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
926 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
927 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
928 
929 	/* non-ERROR states are handled elsewhere */
930 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
931 		return;
932 
933 	can_change_state(ndev, cf, tx_state, rx_state);
934 
935 	if (cf) {
936 		cf->data[6] = txerr;
937 		cf->data[7] = rxerr;
938 	}
939 }
940 
941 /**
942  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
943  * @ndev:	Pointer to net_device structure
944  *
945  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
946  * the performed RX/TX has caused it to drop to a lesser state and set
947  * the interface state accordingly.
948  */
949 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
950 {
951 	struct xcan_priv *priv = netdev_priv(ndev);
952 	enum can_state old_state = priv->can.state;
953 	enum can_state new_state;
954 
955 	/* changing error state due to successful frame RX/TX can only
956 	 * occur from these states
957 	 */
958 	if (old_state != CAN_STATE_ERROR_WARNING &&
959 	    old_state != CAN_STATE_ERROR_PASSIVE)
960 		return;
961 
962 	new_state = xcan_current_error_state(ndev);
963 
964 	if (new_state != old_state) {
965 		struct sk_buff *skb;
966 		struct can_frame *cf;
967 
968 		skb = alloc_can_err_skb(ndev, &cf);
969 
970 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
971 
972 		if (skb)
973 			netif_rx(skb);
974 	}
975 }
976 
977 /**
978  * xcan_err_interrupt - error frame Isr
979  * @ndev:	net_device pointer
980  * @isr:	interrupt status register value
981  *
982  * This is the CAN error interrupt and it will
983  * check the type of error and forward the error
984  * frame to upper layers.
985  */
986 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
987 {
988 	struct xcan_priv *priv = netdev_priv(ndev);
989 	struct net_device_stats *stats = &ndev->stats;
990 	struct can_frame cf = { };
991 	u32 err_status;
992 
993 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
994 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
995 
996 	if (isr & XCAN_IXR_BSOFF_MASK) {
997 		priv->can.state = CAN_STATE_BUS_OFF;
998 		priv->can.can_stats.bus_off++;
999 		/* Leave device in Config Mode in bus-off state */
1000 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1001 		can_bus_off(ndev);
1002 		cf.can_id |= CAN_ERR_BUSOFF;
1003 	} else {
1004 		enum can_state new_state = xcan_current_error_state(ndev);
1005 
1006 		if (new_state != priv->can.state)
1007 			xcan_set_error_state(ndev, new_state, &cf);
1008 	}
1009 
1010 	/* Check for Arbitration lost interrupt */
1011 	if (isr & XCAN_IXR_ARBLST_MASK) {
1012 		priv->can.can_stats.arbitration_lost++;
1013 		cf.can_id |= CAN_ERR_LOSTARB;
1014 		cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1015 	}
1016 
1017 	/* Check for RX FIFO Overflow interrupt */
1018 	if (isr & XCAN_IXR_RXOFLW_MASK) {
1019 		stats->rx_over_errors++;
1020 		stats->rx_errors++;
1021 		cf.can_id |= CAN_ERR_CRTL;
1022 		cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1023 	}
1024 
1025 	/* Check for RX Match Not Finished interrupt */
1026 	if (isr & XCAN_IXR_RXMNF_MASK) {
1027 		stats->rx_dropped++;
1028 		stats->rx_errors++;
1029 		netdev_err(ndev, "RX match not finished, frame discarded\n");
1030 		cf.can_id |= CAN_ERR_CRTL;
1031 		cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1032 	}
1033 
1034 	/* Check for error interrupt */
1035 	if (isr & XCAN_IXR_ERROR_MASK) {
1036 		bool berr_reporting = false;
1037 
1038 		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1039 			berr_reporting = true;
1040 			cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1041 		}
1042 
1043 		/* Check for Ack error interrupt */
1044 		if (err_status & XCAN_ESR_ACKER_MASK) {
1045 			stats->tx_errors++;
1046 			if (berr_reporting) {
1047 				cf.can_id |= CAN_ERR_ACK;
1048 				cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1049 			}
1050 		}
1051 
1052 		/* Check for Bit error interrupt */
1053 		if (err_status & XCAN_ESR_BERR_MASK) {
1054 			stats->tx_errors++;
1055 			if (berr_reporting) {
1056 				cf.can_id |= CAN_ERR_PROT;
1057 				cf.data[2] = CAN_ERR_PROT_BIT;
1058 			}
1059 		}
1060 
1061 		/* Check for Stuff error interrupt */
1062 		if (err_status & XCAN_ESR_STER_MASK) {
1063 			stats->rx_errors++;
1064 			if (berr_reporting) {
1065 				cf.can_id |= CAN_ERR_PROT;
1066 				cf.data[2] = CAN_ERR_PROT_STUFF;
1067 			}
1068 		}
1069 
1070 		/* Check for Form error interrupt */
1071 		if (err_status & XCAN_ESR_FMER_MASK) {
1072 			stats->rx_errors++;
1073 			if (berr_reporting) {
1074 				cf.can_id |= CAN_ERR_PROT;
1075 				cf.data[2] = CAN_ERR_PROT_FORM;
1076 			}
1077 		}
1078 
1079 		/* Check for CRC error interrupt */
1080 		if (err_status & XCAN_ESR_CRCER_MASK) {
1081 			stats->rx_errors++;
1082 			if (berr_reporting) {
1083 				cf.can_id |= CAN_ERR_PROT;
1084 				cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1085 			}
1086 		}
1087 		priv->can.can_stats.bus_error++;
1088 	}
1089 
1090 	if (cf.can_id) {
1091 		struct can_frame *skb_cf;
1092 		struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1093 
1094 		if (skb) {
1095 			skb_cf->can_id |= cf.can_id;
1096 			memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1097 			netif_rx(skb);
1098 		}
1099 	}
1100 
1101 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
1102 		   __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1103 }
1104 
1105 /**
1106  * xcan_state_interrupt - It will check the state of the CAN device
1107  * @ndev:	net_device pointer
1108  * @isr:	interrupt status register value
1109  *
1110  * This will checks the state of the CAN device
1111  * and puts the device into appropriate state.
1112  */
1113 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1114 {
1115 	struct xcan_priv *priv = netdev_priv(ndev);
1116 
1117 	/* Check for Sleep interrupt if set put CAN device in sleep state */
1118 	if (isr & XCAN_IXR_SLP_MASK)
1119 		priv->can.state = CAN_STATE_SLEEPING;
1120 
1121 	/* Check for Wake up interrupt if set put CAN device in Active state */
1122 	if (isr & XCAN_IXR_WKUP_MASK)
1123 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1124 }
1125 
1126 /**
1127  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1128  * @priv:	Driver private data structure
1129  *
1130  * Return: Register offset of the next frame in RX FIFO.
1131  */
1132 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1133 {
1134 	int offset;
1135 
1136 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1137 		u32 fsr, mask;
1138 
1139 		/* clear RXOK before the is-empty check so that any newly
1140 		 * received frame will reassert it without a race
1141 		 */
1142 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1143 
1144 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1145 
1146 		/* check if RX FIFO is empty */
1147 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1148 			mask = XCAN_2_FSR_FL_MASK;
1149 		else
1150 			mask = XCAN_FSR_FL_MASK;
1151 
1152 		if (!(fsr & mask))
1153 			return -ENOENT;
1154 
1155 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1156 			offset =
1157 			  XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1158 		else
1159 			offset =
1160 			  XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1161 
1162 	} else {
1163 		/* check if RX FIFO is empty */
1164 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1165 		      XCAN_IXR_RXNEMP_MASK))
1166 			return -ENOENT;
1167 
1168 		/* frames are read from a static offset */
1169 		offset = XCAN_RXFIFO_OFFSET;
1170 	}
1171 
1172 	return offset;
1173 }
1174 
1175 /**
1176  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1177  * @napi:	napi structure pointer
1178  * @quota:	Max number of rx packets to be processed.
1179  *
1180  * This is the poll routine for rx part.
1181  * It will process the packets maximux quota value.
1182  *
1183  * Return: number of packets received
1184  */
1185 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1186 {
1187 	struct net_device *ndev = napi->dev;
1188 	struct xcan_priv *priv = netdev_priv(ndev);
1189 	u32 ier;
1190 	int work_done = 0;
1191 	int frame_offset;
1192 
1193 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1194 	       (work_done < quota)) {
1195 		if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1196 			work_done += xcanfd_rx(ndev, frame_offset);
1197 		else
1198 			work_done += xcan_rx(ndev, frame_offset);
1199 
1200 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1201 			/* increment read index */
1202 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1203 					XCAN_FSR_IRI_MASK);
1204 		else
1205 			/* clear rx-not-empty (will actually clear only if
1206 			 * empty)
1207 			 */
1208 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1209 					XCAN_IXR_RXNEMP_MASK);
1210 	}
1211 
1212 	if (work_done) {
1213 		can_led_event(ndev, CAN_LED_EVENT_RX);
1214 		xcan_update_error_state_after_rxtx(ndev);
1215 	}
1216 
1217 	if (work_done < quota) {
1218 		if (napi_complete_done(napi, work_done)) {
1219 			ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1220 			ier |= xcan_rx_int_mask(priv);
1221 			priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1222 		}
1223 	}
1224 	return work_done;
1225 }
1226 
1227 /**
1228  * xcan_tx_interrupt - Tx Done Isr
1229  * @ndev:	net_device pointer
1230  * @isr:	Interrupt status register value
1231  */
1232 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1233 {
1234 	struct xcan_priv *priv = netdev_priv(ndev);
1235 	struct net_device_stats *stats = &ndev->stats;
1236 	unsigned int frames_in_fifo;
1237 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1238 	unsigned long flags;
1239 	int retries = 0;
1240 
1241 	/* Synchronize with xmit as we need to know the exact number
1242 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1243 	 * handling.
1244 	 * This also prevents a race between netif_wake_queue() and
1245 	 * netif_stop_queue().
1246 	 */
1247 	spin_lock_irqsave(&priv->tx_lock, flags);
1248 
1249 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1250 
1251 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1252 		/* clear TXOK anyway to avoid getting back here */
1253 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1254 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1255 		return;
1256 	}
1257 
1258 	/* Check if 2 frames were sent (TXOK only means that at least 1
1259 	 * frame was sent).
1260 	 */
1261 	if (frames_in_fifo > 1) {
1262 		WARN_ON(frames_in_fifo > priv->tx_max);
1263 
1264 		/* Synchronize TXOK and isr so that after the loop:
1265 		 * (1) isr variable is up-to-date at least up to TXOK clear
1266 		 *     time. This avoids us clearing a TXOK of a second frame
1267 		 *     but not noticing that the FIFO is now empty and thus
1268 		 *     marking only a single frame as sent.
1269 		 * (2) No TXOK is left. Having one could mean leaving a
1270 		 *     stray TXOK as we might process the associated frame
1271 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1272 		 *     clear to satisfy (1).
1273 		 */
1274 		while ((isr & XCAN_IXR_TXOK_MASK) &&
1275 		       !WARN_ON(++retries == 100)) {
1276 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1277 					XCAN_IXR_TXOK_MASK);
1278 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1279 		}
1280 
1281 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1282 			/* nothing in FIFO anymore */
1283 			frames_sent = frames_in_fifo;
1284 		}
1285 	} else {
1286 		/* single frame in fifo, just clear TXOK */
1287 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1288 	}
1289 
1290 	while (frames_sent--) {
1291 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1292 						    priv->tx_max, NULL);
1293 		priv->tx_tail++;
1294 		stats->tx_packets++;
1295 	}
1296 
1297 	netif_wake_queue(ndev);
1298 
1299 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1300 
1301 	can_led_event(ndev, CAN_LED_EVENT_TX);
1302 	xcan_update_error_state_after_rxtx(ndev);
1303 }
1304 
1305 /**
1306  * xcan_interrupt - CAN Isr
1307  * @irq:	irq number
1308  * @dev_id:	device id pointer
1309  *
1310  * This is the xilinx CAN Isr. It checks for the type of interrupt
1311  * and invokes the corresponding ISR.
1312  *
1313  * Return:
1314  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1315  */
1316 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1317 {
1318 	struct net_device *ndev = (struct net_device *)dev_id;
1319 	struct xcan_priv *priv = netdev_priv(ndev);
1320 	u32 isr, ier;
1321 	u32 isr_errors;
1322 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1323 
1324 	/* Get the interrupt status from Xilinx CAN */
1325 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1326 	if (!isr)
1327 		return IRQ_NONE;
1328 
1329 	/* Check for the type of interrupt and Processing it */
1330 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1331 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1332 				XCAN_IXR_WKUP_MASK));
1333 		xcan_state_interrupt(ndev, isr);
1334 	}
1335 
1336 	/* Check for Tx interrupt and Processing it */
1337 	if (isr & XCAN_IXR_TXOK_MASK)
1338 		xcan_tx_interrupt(ndev, isr);
1339 
1340 	/* Check for the type of error interrupt and Processing it */
1341 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1342 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1343 			    XCAN_IXR_RXMNF_MASK);
1344 	if (isr_errors) {
1345 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1346 		xcan_err_interrupt(ndev, isr);
1347 	}
1348 
1349 	/* Check for the type of receive interrupt and Processing it */
1350 	if (isr & rx_int_mask) {
1351 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1352 		ier &= ~rx_int_mask;
1353 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1354 		napi_schedule(&priv->napi);
1355 	}
1356 	return IRQ_HANDLED;
1357 }
1358 
1359 /**
1360  * xcan_chip_stop - Driver stop routine
1361  * @ndev:	Pointer to net_device structure
1362  *
1363  * This is the drivers stop routine. It will disable the
1364  * interrupts and put the device into configuration mode.
1365  */
1366 static void xcan_chip_stop(struct net_device *ndev)
1367 {
1368 	struct xcan_priv *priv = netdev_priv(ndev);
1369 	int ret;
1370 
1371 	/* Disable interrupts and leave the can in configuration mode */
1372 	ret = set_reset_mode(ndev);
1373 	if (ret < 0)
1374 		netdev_dbg(ndev, "set_reset_mode() Failed\n");
1375 
1376 	priv->can.state = CAN_STATE_STOPPED;
1377 }
1378 
1379 /**
1380  * xcan_open - Driver open routine
1381  * @ndev:	Pointer to net_device structure
1382  *
1383  * This is the driver open routine.
1384  * Return: 0 on success and failure value on error
1385  */
1386 static int xcan_open(struct net_device *ndev)
1387 {
1388 	struct xcan_priv *priv = netdev_priv(ndev);
1389 	int ret;
1390 
1391 	ret = pm_runtime_get_sync(priv->dev);
1392 	if (ret < 0) {
1393 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1394 			   __func__, ret);
1395 		goto err;
1396 	}
1397 
1398 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1399 			  ndev->name, ndev);
1400 	if (ret < 0) {
1401 		netdev_err(ndev, "irq allocation for CAN failed\n");
1402 		goto err;
1403 	}
1404 
1405 	/* Set chip into reset mode */
1406 	ret = set_reset_mode(ndev);
1407 	if (ret < 0) {
1408 		netdev_err(ndev, "mode resetting failed!\n");
1409 		goto err_irq;
1410 	}
1411 
1412 	/* Common open */
1413 	ret = open_candev(ndev);
1414 	if (ret)
1415 		goto err_irq;
1416 
1417 	ret = xcan_chip_start(ndev);
1418 	if (ret < 0) {
1419 		netdev_err(ndev, "xcan_chip_start failed!\n");
1420 		goto err_candev;
1421 	}
1422 
1423 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
1424 	napi_enable(&priv->napi);
1425 	netif_start_queue(ndev);
1426 
1427 	return 0;
1428 
1429 err_candev:
1430 	close_candev(ndev);
1431 err_irq:
1432 	free_irq(ndev->irq, ndev);
1433 err:
1434 	pm_runtime_put(priv->dev);
1435 
1436 	return ret;
1437 }
1438 
1439 /**
1440  * xcan_close - Driver close routine
1441  * @ndev:	Pointer to net_device structure
1442  *
1443  * Return: 0 always
1444  */
1445 static int xcan_close(struct net_device *ndev)
1446 {
1447 	struct xcan_priv *priv = netdev_priv(ndev);
1448 
1449 	netif_stop_queue(ndev);
1450 	napi_disable(&priv->napi);
1451 	xcan_chip_stop(ndev);
1452 	free_irq(ndev->irq, ndev);
1453 	close_candev(ndev);
1454 
1455 	can_led_event(ndev, CAN_LED_EVENT_STOP);
1456 	pm_runtime_put(priv->dev);
1457 
1458 	return 0;
1459 }
1460 
1461 /**
1462  * xcan_get_berr_counter - error counter routine
1463  * @ndev:	Pointer to net_device structure
1464  * @bec:	Pointer to can_berr_counter structure
1465  *
1466  * This is the driver error counter routine.
1467  * Return: 0 on success and failure value on error
1468  */
1469 static int xcan_get_berr_counter(const struct net_device *ndev,
1470 				 struct can_berr_counter *bec)
1471 {
1472 	struct xcan_priv *priv = netdev_priv(ndev);
1473 	int ret;
1474 
1475 	ret = pm_runtime_get_sync(priv->dev);
1476 	if (ret < 0) {
1477 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1478 			   __func__, ret);
1479 		pm_runtime_put(priv->dev);
1480 		return ret;
1481 	}
1482 
1483 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1484 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1485 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1486 
1487 	pm_runtime_put(priv->dev);
1488 
1489 	return 0;
1490 }
1491 
1492 static const struct net_device_ops xcan_netdev_ops = {
1493 	.ndo_open	= xcan_open,
1494 	.ndo_stop	= xcan_close,
1495 	.ndo_start_xmit	= xcan_start_xmit,
1496 	.ndo_change_mtu	= can_change_mtu,
1497 };
1498 
1499 /**
1500  * xcan_suspend - Suspend method for the driver
1501  * @dev:	Address of the device structure
1502  *
1503  * Put the driver into low power mode.
1504  * Return: 0 on success and failure value on error
1505  */
1506 static int __maybe_unused xcan_suspend(struct device *dev)
1507 {
1508 	struct net_device *ndev = dev_get_drvdata(dev);
1509 
1510 	if (netif_running(ndev)) {
1511 		netif_stop_queue(ndev);
1512 		netif_device_detach(ndev);
1513 		xcan_chip_stop(ndev);
1514 	}
1515 
1516 	return pm_runtime_force_suspend(dev);
1517 }
1518 
1519 /**
1520  * xcan_resume - Resume from suspend
1521  * @dev:	Address of the device structure
1522  *
1523  * Resume operation after suspend.
1524  * Return: 0 on success and failure value on error
1525  */
1526 static int __maybe_unused xcan_resume(struct device *dev)
1527 {
1528 	struct net_device *ndev = dev_get_drvdata(dev);
1529 	int ret;
1530 
1531 	ret = pm_runtime_force_resume(dev);
1532 	if (ret) {
1533 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1534 		return ret;
1535 	}
1536 
1537 	if (netif_running(ndev)) {
1538 		ret = xcan_chip_start(ndev);
1539 		if (ret) {
1540 			dev_err(dev, "xcan_chip_start failed on resume\n");
1541 			return ret;
1542 		}
1543 
1544 		netif_device_attach(ndev);
1545 		netif_start_queue(ndev);
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * xcan_runtime_suspend - Runtime suspend method for the driver
1553  * @dev:	Address of the device structure
1554  *
1555  * Put the driver into low power mode.
1556  * Return: 0 always
1557  */
1558 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1559 {
1560 	struct net_device *ndev = dev_get_drvdata(dev);
1561 	struct xcan_priv *priv = netdev_priv(ndev);
1562 
1563 	clk_disable_unprepare(priv->bus_clk);
1564 	clk_disable_unprepare(priv->can_clk);
1565 
1566 	return 0;
1567 }
1568 
1569 /**
1570  * xcan_runtime_resume - Runtime resume from suspend
1571  * @dev:	Address of the device structure
1572  *
1573  * Resume operation after suspend.
1574  * Return: 0 on success and failure value on error
1575  */
1576 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1577 {
1578 	struct net_device *ndev = dev_get_drvdata(dev);
1579 	struct xcan_priv *priv = netdev_priv(ndev);
1580 	int ret;
1581 
1582 	ret = clk_prepare_enable(priv->bus_clk);
1583 	if (ret) {
1584 		dev_err(dev, "Cannot enable clock.\n");
1585 		return ret;
1586 	}
1587 	ret = clk_prepare_enable(priv->can_clk);
1588 	if (ret) {
1589 		dev_err(dev, "Cannot enable clock.\n");
1590 		clk_disable_unprepare(priv->bus_clk);
1591 		return ret;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 static const struct dev_pm_ops xcan_dev_pm_ops = {
1598 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1599 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1600 };
1601 
1602 static const struct xcan_devtype_data xcan_zynq_data = {
1603 	.cantype = XZYNQ_CANPS,
1604 	.flags = XCAN_FLAG_TXFEMP,
1605 	.bittiming_const = &xcan_bittiming_const,
1606 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1607 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1608 	.bus_clk_name = "pclk",
1609 };
1610 
1611 static const struct xcan_devtype_data xcan_axi_data = {
1612 	.cantype = XAXI_CAN,
1613 	.bittiming_const = &xcan_bittiming_const,
1614 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1615 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1616 	.bus_clk_name = "s_axi_aclk",
1617 };
1618 
1619 static const struct xcan_devtype_data xcan_canfd_data = {
1620 	.cantype = XAXI_CANFD,
1621 	.flags = XCAN_FLAG_EXT_FILTERS |
1622 		 XCAN_FLAG_RXMNF |
1623 		 XCAN_FLAG_TX_MAILBOXES |
1624 		 XCAN_FLAG_RX_FIFO_MULTI,
1625 	.bittiming_const = &xcan_bittiming_const_canfd,
1626 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1627 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1628 	.bus_clk_name = "s_axi_aclk",
1629 };
1630 
1631 static const struct xcan_devtype_data xcan_canfd2_data = {
1632 	.cantype = XAXI_CANFD_2_0,
1633 	.flags = XCAN_FLAG_EXT_FILTERS |
1634 		 XCAN_FLAG_RXMNF |
1635 		 XCAN_FLAG_TX_MAILBOXES |
1636 		 XCAN_FLAG_CANFD_2 |
1637 		 XCAN_FLAG_RX_FIFO_MULTI,
1638 	.bittiming_const = &xcan_bittiming_const_canfd2,
1639 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1640 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1641 	.bus_clk_name = "s_axi_aclk",
1642 };
1643 
1644 /* Match table for OF platform binding */
1645 static const struct of_device_id xcan_of_match[] = {
1646 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1647 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1648 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1649 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1650 	{ /* end of list */ },
1651 };
1652 MODULE_DEVICE_TABLE(of, xcan_of_match);
1653 
1654 /**
1655  * xcan_probe - Platform registration call
1656  * @pdev:	Handle to the platform device structure
1657  *
1658  * This function does all the memory allocation and registration for the CAN
1659  * device.
1660  *
1661  * Return: 0 on success and failure value on error
1662  */
1663 static int xcan_probe(struct platform_device *pdev)
1664 {
1665 	struct net_device *ndev;
1666 	struct xcan_priv *priv;
1667 	const struct of_device_id *of_id;
1668 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1669 	void __iomem *addr;
1670 	int ret;
1671 	int rx_max, tx_max;
1672 	u32 hw_tx_max = 0, hw_rx_max = 0;
1673 	const char *hw_tx_max_property;
1674 
1675 	/* Get the virtual base address for the device */
1676 	addr = devm_platform_ioremap_resource(pdev, 0);
1677 	if (IS_ERR(addr)) {
1678 		ret = PTR_ERR(addr);
1679 		goto err;
1680 	}
1681 
1682 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1683 	if (of_id && of_id->data)
1684 		devtype = of_id->data;
1685 
1686 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1687 			     "tx-mailbox-count" : "tx-fifo-depth";
1688 
1689 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1690 				   &hw_tx_max);
1691 	if (ret < 0) {
1692 		dev_err(&pdev->dev, "missing %s property\n",
1693 			hw_tx_max_property);
1694 		goto err;
1695 	}
1696 
1697 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1698 				   &hw_rx_max);
1699 	if (ret < 0) {
1700 		dev_err(&pdev->dev,
1701 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1702 		goto err;
1703 	}
1704 
1705 	/* With TX FIFO:
1706 	 *
1707 	 * There is no way to directly figure out how many frames have been
1708 	 * sent when the TXOK interrupt is processed. If TXFEMP
1709 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1710 	 * to determine if 1 or 2 frames have been sent.
1711 	 * Theoretically we should be able to use TXFWMEMP to determine up
1712 	 * to 3 frames, but it seems that after putting a second frame in the
1713 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1714 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1715 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1716 	 * completely synchronized with the rest of the bits?
1717 	 *
1718 	 * With TX mailboxes:
1719 	 *
1720 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1721 	 * we submit frames one at a time.
1722 	 */
1723 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1724 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1725 		tx_max = min(hw_tx_max, 2U);
1726 	else
1727 		tx_max = 1;
1728 
1729 	rx_max = hw_rx_max;
1730 
1731 	/* Create a CAN device instance */
1732 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1733 	if (!ndev)
1734 		return -ENOMEM;
1735 
1736 	priv = netdev_priv(ndev);
1737 	priv->dev = &pdev->dev;
1738 	priv->can.bittiming_const = devtype->bittiming_const;
1739 	priv->can.do_set_mode = xcan_do_set_mode;
1740 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1741 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1742 					CAN_CTRLMODE_BERR_REPORTING;
1743 
1744 	if (devtype->cantype == XAXI_CANFD)
1745 		priv->can.data_bittiming_const =
1746 			&xcan_data_bittiming_const_canfd;
1747 
1748 	if (devtype->cantype == XAXI_CANFD_2_0)
1749 		priv->can.data_bittiming_const =
1750 			&xcan_data_bittiming_const_canfd2;
1751 
1752 	if (devtype->cantype == XAXI_CANFD ||
1753 	    devtype->cantype == XAXI_CANFD_2_0)
1754 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1755 
1756 	priv->reg_base = addr;
1757 	priv->tx_max = tx_max;
1758 	priv->devtype = *devtype;
1759 	spin_lock_init(&priv->tx_lock);
1760 
1761 	/* Get IRQ for the device */
1762 	ret = platform_get_irq(pdev, 0);
1763 	if (ret < 0)
1764 		goto err_free;
1765 
1766 	ndev->irq = ret;
1767 
1768 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1769 
1770 	platform_set_drvdata(pdev, ndev);
1771 	SET_NETDEV_DEV(ndev, &pdev->dev);
1772 	ndev->netdev_ops = &xcan_netdev_ops;
1773 
1774 	/* Getting the CAN can_clk info */
1775 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1776 	if (IS_ERR(priv->can_clk)) {
1777 		ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
1778 				    "device clock not found\n");
1779 		goto err_free;
1780 	}
1781 
1782 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1783 	if (IS_ERR(priv->bus_clk)) {
1784 		ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
1785 				    "bus clock not found\n");
1786 		goto err_free;
1787 	}
1788 
1789 	priv->write_reg = xcan_write_reg_le;
1790 	priv->read_reg = xcan_read_reg_le;
1791 
1792 	pm_runtime_enable(&pdev->dev);
1793 	ret = pm_runtime_get_sync(&pdev->dev);
1794 	if (ret < 0) {
1795 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1796 			   __func__, ret);
1797 		goto err_disableclks;
1798 	}
1799 
1800 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1801 		priv->write_reg = xcan_write_reg_be;
1802 		priv->read_reg = xcan_read_reg_be;
1803 	}
1804 
1805 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1806 
1807 	netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1808 
1809 	ret = register_candev(ndev);
1810 	if (ret) {
1811 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1812 		goto err_disableclks;
1813 	}
1814 
1815 	devm_can_led_init(ndev);
1816 
1817 	pm_runtime_put(&pdev->dev);
1818 
1819 	if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1820 		priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1821 		priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1822 	}
1823 
1824 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1825 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1826 		   hw_tx_max, priv->tx_max);
1827 
1828 	return 0;
1829 
1830 err_disableclks:
1831 	pm_runtime_put(priv->dev);
1832 	pm_runtime_disable(&pdev->dev);
1833 err_free:
1834 	free_candev(ndev);
1835 err:
1836 	return ret;
1837 }
1838 
1839 /**
1840  * xcan_remove - Unregister the device after releasing the resources
1841  * @pdev:	Handle to the platform device structure
1842  *
1843  * This function frees all the resources allocated to the device.
1844  * Return: 0 always
1845  */
1846 static int xcan_remove(struct platform_device *pdev)
1847 {
1848 	struct net_device *ndev = platform_get_drvdata(pdev);
1849 
1850 	unregister_candev(ndev);
1851 	pm_runtime_disable(&pdev->dev);
1852 	free_candev(ndev);
1853 
1854 	return 0;
1855 }
1856 
1857 static struct platform_driver xcan_driver = {
1858 	.probe = xcan_probe,
1859 	.remove	= xcan_remove,
1860 	.driver	= {
1861 		.name = DRIVER_NAME,
1862 		.pm = &xcan_dev_pm_ops,
1863 		.of_match_table	= xcan_of_match,
1864 	},
1865 };
1866 
1867 module_platform_driver(xcan_driver);
1868 
1869 MODULE_LICENSE("GPL");
1870 MODULE_AUTHOR("Xilinx Inc");
1871 MODULE_DESCRIPTION("Xilinx CAN interface");
1872