1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xilinx CAN device driver
3 *
4 * Copyright (C) 2012 - 2022 Xilinx, Inc.
5 * Copyright (C) 2009 PetaLogix. All rights reserved.
6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7 *
8 * Description:
9 * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller.
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/errno.h>
15 #include <linux/ethtool.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/netdevice.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/property.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/can/dev.h>
30 #include <linux/can/error.h>
31 #include <linux/phy/phy.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/reset.h>
34 #include <linux/u64_stats_sync.h>
35
36 #define DRIVER_NAME "xilinx_can"
37
38 /* CAN registers set */
39 enum xcan_reg {
40 XCAN_SRR_OFFSET = 0x00, /* Software reset */
41 XCAN_MSR_OFFSET = 0x04, /* Mode select */
42 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
43 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
44 XCAN_ECR_OFFSET = 0x10, /* Error counter */
45 XCAN_ESR_OFFSET = 0x14, /* Error status */
46 XCAN_SR_OFFSET = 0x18, /* Status */
47 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
48 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
49 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
50
51 /* not on CAN FD cores */
52 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */
53 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */
54 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
55
56 /* only on CAN FD cores */
57 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
58 * Prescaler
59 */
60 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
61 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
62
63 /* only on AXI CAN cores */
64 XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
65 XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
66 XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
67 XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
68
69 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
70 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
71 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
72 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
73 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
74 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */
75 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */
76 };
77
78 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
79 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
80 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
81 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
82 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
83
84 #define XCAN_CANFD_FRAME_SIZE 0x48
85 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
86 XCAN_CANFD_FRAME_SIZE * (n))
87 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
88 XCAN_CANFD_FRAME_SIZE * (n))
89 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
90 XCAN_CANFD_FRAME_SIZE * (n))
91
92 /* the single TX mailbox used by this driver on CAN FD HW */
93 #define XCAN_TX_MAILBOX_IDX 0
94
95 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
96 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
97 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
98 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
99 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
100 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
101 #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
102 #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
103 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
104 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
105 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
106 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */
107 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */
108 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */
109 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
110 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
111 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
112 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
113 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
114 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
115 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
116 #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
117 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
118 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
119 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
120 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
121 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
122 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
123 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */
124 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
125 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
126 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
127 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
128 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
129 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
130 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
131 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
132 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
133 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
134 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
135 #define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
136 #define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
137 #define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
138 #define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
139 #define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
140 #define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
141 #define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
142 XCAN_IXR_E1BERX_MASK | \
143 XCAN_IXR_E2BETXOL_MASK | \
144 XCAN_IXR_E1BETXOL_MASK | \
145 XCAN_IXR_E2BETXTL_MASK | \
146 XCAN_IXR_E1BETXTL_MASK)
147 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
148 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
149 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
150 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
151 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
152 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
153 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
154 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
155 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
156 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
157 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
158 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
159 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
160 #define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
161 #define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
162 #define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
163 #define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
164 #define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
165
166 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
167 #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
168 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
169 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
170 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
171 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */
172 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
173 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
174 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
175 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
176
177 /* CAN frame length constants */
178 #define XCAN_FRAME_MAX_DATA_LEN 8
179 #define XCANFD_DW_BYTES 4
180 #define XCAN_TIMEOUT (1 * HZ)
181
182 /* TX-FIFO-empty interrupt available */
183 #define XCAN_FLAG_TXFEMP 0x0001
184 /* RX Match Not Finished interrupt available */
185 #define XCAN_FLAG_RXMNF 0x0002
186 /* Extended acceptance filters with control at 0xE0 */
187 #define XCAN_FLAG_EXT_FILTERS 0x0004
188 /* TX mailboxes instead of TX FIFO */
189 #define XCAN_FLAG_TX_MAILBOXES 0x0008
190 /* RX FIFO with each buffer in separate registers at 0x1100
191 * instead of the regular FIFO at 0x50
192 */
193 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010
194 #define XCAN_FLAG_CANFD_2 0x0020
195
196 enum xcan_ip_type {
197 XAXI_CAN = 0,
198 XZYNQ_CANPS,
199 XAXI_CANFD,
200 XAXI_CANFD_2_0,
201 };
202
203 struct xcan_devtype_data {
204 enum xcan_ip_type cantype;
205 unsigned int flags;
206 const struct can_bittiming_const *bittiming_const;
207 const char *bus_clk_name;
208 unsigned int btr_ts2_shift;
209 unsigned int btr_sjw_shift;
210 };
211
212 /**
213 * struct xcan_priv - This definition define CAN driver instance
214 * @can: CAN private data structure.
215 * @tx_lock: Lock for synchronizing TX interrupt handling
216 * @tx_head: Tx CAN packets ready to send on the queue
217 * @tx_tail: Tx CAN packets successfully sended on the queue
218 * @tx_max: Maximum number packets the driver can send
219 * @napi: NAPI structure
220 * @read_reg: For reading data from CAN registers
221 * @write_reg: For writing data to CAN registers
222 * @dev: Network device data structure
223 * @reg_base: Ioremapped address to registers
224 * @irq_flags: For request_irq()
225 * @bus_clk: Pointer to struct clk
226 * @can_clk: Pointer to struct clk
227 * @devtype: Device type specific constants
228 * @transceiver: Optional pointer to associated CAN transceiver
229 * @rstc: Pointer to reset control
230 * @ecc_enable: ECC enable flag
231 * @syncp: synchronization for ECC error stats
232 * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
233 * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
234 * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
235 * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
236 * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
237 * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
238 */
239 struct xcan_priv {
240 struct can_priv can;
241 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
242 unsigned int tx_head;
243 unsigned int tx_tail;
244 unsigned int tx_max;
245 struct napi_struct napi;
246 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
247 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
248 u32 val);
249 struct device *dev;
250 void __iomem *reg_base;
251 unsigned long irq_flags;
252 struct clk *bus_clk;
253 struct clk *can_clk;
254 struct xcan_devtype_data devtype;
255 struct phy *transceiver;
256 struct reset_control *rstc;
257 bool ecc_enable;
258 struct u64_stats_sync syncp;
259 u64_stats_t ecc_rx_2_bit_errors;
260 u64_stats_t ecc_rx_1_bit_errors;
261 u64_stats_t ecc_txol_2_bit_errors;
262 u64_stats_t ecc_txol_1_bit_errors;
263 u64_stats_t ecc_txtl_2_bit_errors;
264 u64_stats_t ecc_txtl_1_bit_errors;
265 };
266
267 /* CAN Bittiming constants as per Xilinx CAN specs */
268 static const struct can_bittiming_const xcan_bittiming_const = {
269 .name = DRIVER_NAME,
270 .tseg1_min = 1,
271 .tseg1_max = 16,
272 .tseg2_min = 1,
273 .tseg2_max = 8,
274 .sjw_max = 4,
275 .brp_min = 1,
276 .brp_max = 256,
277 .brp_inc = 1,
278 };
279
280 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
281 static const struct can_bittiming_const xcan_bittiming_const_canfd = {
282 .name = DRIVER_NAME,
283 .tseg1_min = 1,
284 .tseg1_max = 64,
285 .tseg2_min = 1,
286 .tseg2_max = 16,
287 .sjw_max = 16,
288 .brp_min = 1,
289 .brp_max = 256,
290 .brp_inc = 1,
291 };
292
293 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
294 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
295 .name = DRIVER_NAME,
296 .tseg1_min = 1,
297 .tseg1_max = 16,
298 .tseg2_min = 1,
299 .tseg2_max = 8,
300 .sjw_max = 8,
301 .brp_min = 1,
302 .brp_max = 256,
303 .brp_inc = 1,
304 };
305
306 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
307 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
308 .name = DRIVER_NAME,
309 .tseg1_min = 1,
310 .tseg1_max = 256,
311 .tseg2_min = 1,
312 .tseg2_max = 128,
313 .sjw_max = 128,
314 .brp_min = 1,
315 .brp_max = 256,
316 .brp_inc = 1,
317 };
318
319 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
320 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
321 .name = DRIVER_NAME,
322 .tseg1_min = 1,
323 .tseg1_max = 32,
324 .tseg2_min = 1,
325 .tseg2_max = 16,
326 .sjw_max = 16,
327 .brp_min = 1,
328 .brp_max = 256,
329 .brp_inc = 1,
330 };
331
332 /* Transmission Delay Compensation constants for CANFD 1.0 */
333 static const struct can_tdc_const xcan_tdc_const_canfd = {
334 .tdcv_min = 0,
335 .tdcv_max = 0, /* Manual mode not supported. */
336 .tdco_min = 0,
337 .tdco_max = 32,
338 .tdcf_min = 0, /* Filter window not supported */
339 .tdcf_max = 0,
340 };
341
342 /* Transmission Delay Compensation constants for CANFD 2.0 */
343 static const struct can_tdc_const xcan_tdc_const_canfd2 = {
344 .tdcv_min = 0,
345 .tdcv_max = 0, /* Manual mode not supported. */
346 .tdco_min = 0,
347 .tdco_max = 64,
348 .tdcf_min = 0, /* Filter window not supported */
349 .tdcf_max = 0,
350 };
351
352 enum xcan_stats_type {
353 XCAN_ECC_RX_2_BIT_ERRORS,
354 XCAN_ECC_RX_1_BIT_ERRORS,
355 XCAN_ECC_TXOL_2_BIT_ERRORS,
356 XCAN_ECC_TXOL_1_BIT_ERRORS,
357 XCAN_ECC_TXTL_2_BIT_ERRORS,
358 XCAN_ECC_TXTL_1_BIT_ERRORS,
359 };
360
361 static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
362 [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
363 [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
364 [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
365 [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
366 [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
367 [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
368 };
369
370 /**
371 * xcan_write_reg_le - Write a value to the device register little endian
372 * @priv: Driver private data structure
373 * @reg: Register offset
374 * @val: Value to write at the Register offset
375 *
376 * Write data to the paricular CAN register
377 */
xcan_write_reg_le(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)378 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
379 u32 val)
380 {
381 iowrite32(val, priv->reg_base + reg);
382 }
383
384 /**
385 * xcan_read_reg_le - Read a value from the device register little endian
386 * @priv: Driver private data structure
387 * @reg: Register offset
388 *
389 * Read data from the particular CAN register
390 * Return: value read from the CAN register
391 */
xcan_read_reg_le(const struct xcan_priv * priv,enum xcan_reg reg)392 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
393 {
394 return ioread32(priv->reg_base + reg);
395 }
396
397 /**
398 * xcan_write_reg_be - Write a value to the device register big endian
399 * @priv: Driver private data structure
400 * @reg: Register offset
401 * @val: Value to write at the Register offset
402 *
403 * Write data to the paricular CAN register
404 */
xcan_write_reg_be(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)405 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
406 u32 val)
407 {
408 iowrite32be(val, priv->reg_base + reg);
409 }
410
411 /**
412 * xcan_read_reg_be - Read a value from the device register big endian
413 * @priv: Driver private data structure
414 * @reg: Register offset
415 *
416 * Read data from the particular CAN register
417 * Return: value read from the CAN register
418 */
xcan_read_reg_be(const struct xcan_priv * priv,enum xcan_reg reg)419 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
420 {
421 return ioread32be(priv->reg_base + reg);
422 }
423
424 /**
425 * xcan_rx_int_mask - Get the mask for the receive interrupt
426 * @priv: Driver private data structure
427 *
428 * Return: The receive interrupt mask used by the driver on this HW
429 */
xcan_rx_int_mask(const struct xcan_priv * priv)430 static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
431 {
432 /* RXNEMP is better suited for our use case as it cannot be cleared
433 * while the FIFO is non-empty, but CAN FD HW does not have it
434 */
435 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
436 return XCAN_IXR_RXOK_MASK;
437 else
438 return XCAN_IXR_RXNEMP_MASK;
439 }
440
441 /**
442 * set_reset_mode - Resets the CAN device mode
443 * @ndev: Pointer to net_device structure
444 *
445 * This is the driver reset mode routine.The driver
446 * enters into configuration mode.
447 *
448 * Return: 0 on success and failure value on error
449 */
set_reset_mode(struct net_device * ndev)450 static int set_reset_mode(struct net_device *ndev)
451 {
452 struct xcan_priv *priv = netdev_priv(ndev);
453 unsigned long timeout;
454
455 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
456
457 timeout = jiffies + XCAN_TIMEOUT;
458 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
459 if (time_after(jiffies, timeout)) {
460 netdev_warn(ndev, "timed out for config mode\n");
461 return -ETIMEDOUT;
462 }
463 usleep_range(500, 10000);
464 }
465
466 /* reset clears FIFOs */
467 priv->tx_head = 0;
468 priv->tx_tail = 0;
469
470 return 0;
471 }
472
473 /**
474 * xcan_set_bittiming - CAN set bit timing routine
475 * @ndev: Pointer to net_device structure
476 *
477 * This is the driver set bittiming routine.
478 * Return: 0 on success and failure value on error
479 */
xcan_set_bittiming(struct net_device * ndev)480 static int xcan_set_bittiming(struct net_device *ndev)
481 {
482 struct xcan_priv *priv = netdev_priv(ndev);
483 struct can_bittiming *bt = &priv->can.bittiming;
484 struct can_bittiming *dbt = &priv->can.data_bittiming;
485 u32 btr0, btr1;
486 u32 is_config_mode;
487
488 /* Check whether Xilinx CAN is in configuration mode.
489 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
490 */
491 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
492 XCAN_SR_CONFIG_MASK;
493 if (!is_config_mode) {
494 netdev_alert(ndev,
495 "BUG! Cannot set bittiming - CAN is not in config mode\n");
496 return -EPERM;
497 }
498
499 /* Setting Baud Rate prescaler value in BRPR Register */
500 btr0 = (bt->brp - 1);
501
502 /* Setting Time Segment 1 in BTR Register */
503 btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
504
505 /* Setting Time Segment 2 in BTR Register */
506 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
507
508 /* Setting Synchronous jump width in BTR Register */
509 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
510
511 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
512 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
513
514 if (priv->devtype.cantype == XAXI_CANFD ||
515 priv->devtype.cantype == XAXI_CANFD_2_0) {
516 /* Setting Baud Rate prescaler value in F_BRPR Register */
517 btr0 = dbt->brp - 1;
518 if (can_tdc_is_enabled(&priv->can)) {
519 if (priv->devtype.cantype == XAXI_CANFD)
520 btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
521 XCAN_BRPR_TDC_ENABLE;
522 else
523 btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
524 XCAN_BRPR_TDC_ENABLE;
525 }
526
527 /* Setting Time Segment 1 in BTR Register */
528 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
529
530 /* Setting Time Segment 2 in BTR Register */
531 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
532
533 /* Setting Synchronous jump width in BTR Register */
534 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
535
536 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
537 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
538 }
539
540 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
541 priv->read_reg(priv, XCAN_BRPR_OFFSET),
542 priv->read_reg(priv, XCAN_BTR_OFFSET));
543
544 return 0;
545 }
546
547 /**
548 * xcan_chip_start - This the drivers start routine
549 * @ndev: Pointer to net_device structure
550 *
551 * This is the drivers start routine.
552 * Based on the State of the CAN device it puts
553 * the CAN device into a proper mode.
554 *
555 * Return: 0 on success and failure value on error
556 */
xcan_chip_start(struct net_device * ndev)557 static int xcan_chip_start(struct net_device *ndev)
558 {
559 struct xcan_priv *priv = netdev_priv(ndev);
560 u32 reg_msr;
561 int err;
562 u32 ier;
563
564 /* Check if it is in reset mode */
565 err = set_reset_mode(ndev);
566 if (err < 0)
567 return err;
568
569 err = xcan_set_bittiming(ndev);
570 if (err < 0)
571 return err;
572
573 /* Enable interrupts
574 *
575 * We enable the ERROR interrupt even with
576 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
577 * dedicated interrupt for a state change to
578 * ERROR_WARNING/ERROR_PASSIVE.
579 */
580 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
581 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
582 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
583 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
584
585 if (priv->ecc_enable)
586 ier |= XCAN_IXR_ECC_MASK;
587
588 if (priv->devtype.flags & XCAN_FLAG_RXMNF)
589 ier |= XCAN_IXR_RXMNF_MASK;
590
591 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
592
593 /* Check whether it is loopback mode or normal mode */
594 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
595 reg_msr = XCAN_MSR_LBACK_MASK;
596 else
597 reg_msr = 0x0;
598
599 /* enable the first extended filter, if any, as cores with extended
600 * filtering default to non-receipt if all filters are disabled
601 */
602 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
603 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
604
605 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
606 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
607
608 netdev_dbg(ndev, "status:#x%08x\n",
609 priv->read_reg(priv, XCAN_SR_OFFSET));
610
611 priv->can.state = CAN_STATE_ERROR_ACTIVE;
612 return 0;
613 }
614
615 /**
616 * xcan_do_set_mode - This sets the mode of the driver
617 * @ndev: Pointer to net_device structure
618 * @mode: Tells the mode of the driver
619 *
620 * This check the drivers state and calls the corresponding modes to set.
621 *
622 * Return: 0 on success and failure value on error
623 */
xcan_do_set_mode(struct net_device * ndev,enum can_mode mode)624 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
625 {
626 int ret;
627
628 switch (mode) {
629 case CAN_MODE_START:
630 ret = xcan_chip_start(ndev);
631 if (ret < 0) {
632 netdev_err(ndev, "xcan_chip_start failed!\n");
633 return ret;
634 }
635 netif_wake_queue(ndev);
636 break;
637 default:
638 ret = -EOPNOTSUPP;
639 break;
640 }
641
642 return ret;
643 }
644
645 /**
646 * xcan_write_frame - Write a frame to HW
647 * @ndev: Pointer to net_device structure
648 * @skb: sk_buff pointer that contains data to be Txed
649 * @frame_offset: Register offset to write the frame to
650 */
xcan_write_frame(struct net_device * ndev,struct sk_buff * skb,int frame_offset)651 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
652 int frame_offset)
653 {
654 u32 id, dlc, data[2] = {0, 0};
655 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
656 u32 ramoff, dwindex = 0, i;
657 struct xcan_priv *priv = netdev_priv(ndev);
658
659 /* Watch carefully on the bit sequence */
660 if (cf->can_id & CAN_EFF_FLAG) {
661 /* Extended CAN ID format */
662 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
663 XCAN_IDR_ID2_MASK;
664 id |= (((cf->can_id & CAN_EFF_MASK) >>
665 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
666 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
667
668 /* The substibute remote TX request bit should be "1"
669 * for extended frames as in the Xilinx CAN datasheet
670 */
671 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
672
673 if (cf->can_id & CAN_RTR_FLAG)
674 /* Extended frames remote TX request */
675 id |= XCAN_IDR_RTR_MASK;
676 } else {
677 /* Standard CAN ID format */
678 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
679 XCAN_IDR_ID1_MASK;
680
681 if (cf->can_id & CAN_RTR_FLAG)
682 /* Standard frames remote TX request */
683 id |= XCAN_IDR_SRR_MASK;
684 }
685
686 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
687 if (can_is_canfd_skb(skb)) {
688 if (cf->flags & CANFD_BRS)
689 dlc |= XCAN_DLCR_BRS_MASK;
690 dlc |= XCAN_DLCR_EDL_MASK;
691 }
692
693 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
694 (priv->devtype.flags & XCAN_FLAG_TXFEMP))
695 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
696 else
697 can_put_echo_skb(skb, ndev, 0, 0);
698
699 priv->tx_head++;
700
701 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
702 /* If the CAN frame is RTR frame this write triggers transmission
703 * (not on CAN FD)
704 */
705 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
706 if (priv->devtype.cantype == XAXI_CANFD ||
707 priv->devtype.cantype == XAXI_CANFD_2_0) {
708 for (i = 0; i < cf->len; i += 4) {
709 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
710 (dwindex * XCANFD_DW_BYTES);
711 priv->write_reg(priv, ramoff,
712 be32_to_cpup((__be32 *)(cf->data + i)));
713 dwindex++;
714 }
715 } else {
716 if (cf->len > 0)
717 data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
718 if (cf->len > 4)
719 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
720
721 if (!(cf->can_id & CAN_RTR_FLAG)) {
722 priv->write_reg(priv,
723 XCAN_FRAME_DW1_OFFSET(frame_offset),
724 data[0]);
725 /* If the CAN frame is Standard/Extended frame this
726 * write triggers transmission (not on CAN FD)
727 */
728 priv->write_reg(priv,
729 XCAN_FRAME_DW2_OFFSET(frame_offset),
730 data[1]);
731 }
732 }
733 }
734
735 /**
736 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
737 * @skb: sk_buff pointer that contains data to be Txed
738 * @ndev: Pointer to net_device structure
739 *
740 * Return: 0 on success, -ENOSPC if FIFO is full.
741 */
xcan_start_xmit_fifo(struct sk_buff * skb,struct net_device * ndev)742 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
743 {
744 struct xcan_priv *priv = netdev_priv(ndev);
745 unsigned long flags;
746
747 /* Check if the TX buffer is full */
748 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
749 XCAN_SR_TXFLL_MASK))
750 return -ENOSPC;
751
752 spin_lock_irqsave(&priv->tx_lock, flags);
753
754 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
755
756 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
757 if (priv->tx_max > 1)
758 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
759
760 /* Check if the TX buffer is full */
761 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
762 netif_stop_queue(ndev);
763
764 spin_unlock_irqrestore(&priv->tx_lock, flags);
765
766 return 0;
767 }
768
769 /**
770 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
771 * @skb: sk_buff pointer that contains data to be Txed
772 * @ndev: Pointer to net_device structure
773 *
774 * Return: 0 on success, -ENOSPC if there is no space
775 */
xcan_start_xmit_mailbox(struct sk_buff * skb,struct net_device * ndev)776 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
777 {
778 struct xcan_priv *priv = netdev_priv(ndev);
779 unsigned long flags;
780
781 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
782 BIT(XCAN_TX_MAILBOX_IDX)))
783 return -ENOSPC;
784
785 spin_lock_irqsave(&priv->tx_lock, flags);
786
787 xcan_write_frame(ndev, skb,
788 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
789
790 /* Mark buffer as ready for transmit */
791 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
792
793 netif_stop_queue(ndev);
794
795 spin_unlock_irqrestore(&priv->tx_lock, flags);
796
797 return 0;
798 }
799
800 /**
801 * xcan_start_xmit - Starts the transmission
802 * @skb: sk_buff pointer that contains data to be Txed
803 * @ndev: Pointer to net_device structure
804 *
805 * This function is invoked from upper layers to initiate transmission.
806 *
807 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
808 */
xcan_start_xmit(struct sk_buff * skb,struct net_device * ndev)809 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
810 {
811 struct xcan_priv *priv = netdev_priv(ndev);
812 int ret;
813
814 if (can_dev_dropped_skb(ndev, skb))
815 return NETDEV_TX_OK;
816
817 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
818 ret = xcan_start_xmit_mailbox(skb, ndev);
819 else
820 ret = xcan_start_xmit_fifo(skb, ndev);
821
822 if (ret < 0) {
823 netdev_err(ndev, "BUG!, TX full when queue awake!\n");
824 netif_stop_queue(ndev);
825 return NETDEV_TX_BUSY;
826 }
827
828 return NETDEV_TX_OK;
829 }
830
831 /**
832 * xcan_rx - Is called from CAN isr to complete the received
833 * frame processing
834 * @ndev: Pointer to net_device structure
835 * @frame_base: Register offset to the frame to be read
836 *
837 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
838 * does minimal processing and invokes "netif_receive_skb" to complete further
839 * processing.
840 * Return: 1 on success and 0 on failure.
841 */
xcan_rx(struct net_device * ndev,int frame_base)842 static int xcan_rx(struct net_device *ndev, int frame_base)
843 {
844 struct xcan_priv *priv = netdev_priv(ndev);
845 struct net_device_stats *stats = &ndev->stats;
846 struct can_frame *cf;
847 struct sk_buff *skb;
848 u32 id_xcan, dlc, data[2] = {0, 0};
849
850 skb = alloc_can_skb(ndev, &cf);
851 if (unlikely(!skb)) {
852 stats->rx_dropped++;
853 return 0;
854 }
855
856 /* Read a frame from Xilinx zynq CANPS */
857 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
858 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
859 XCAN_DLCR_DLC_SHIFT;
860
861 /* Change Xilinx CAN data length format to socketCAN data format */
862 cf->len = can_cc_dlc2len(dlc);
863
864 /* Change Xilinx CAN ID format to socketCAN ID format */
865 if (id_xcan & XCAN_IDR_IDE_MASK) {
866 /* The received frame is an Extended format frame */
867 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
868 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
869 XCAN_IDR_ID2_SHIFT;
870 cf->can_id |= CAN_EFF_FLAG;
871 if (id_xcan & XCAN_IDR_RTR_MASK)
872 cf->can_id |= CAN_RTR_FLAG;
873 } else {
874 /* The received frame is a standard format frame */
875 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
876 XCAN_IDR_ID1_SHIFT;
877 if (id_xcan & XCAN_IDR_SRR_MASK)
878 cf->can_id |= CAN_RTR_FLAG;
879 }
880
881 /* DW1/DW2 must always be read to remove message from RXFIFO */
882 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
883 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
884
885 if (!(cf->can_id & CAN_RTR_FLAG)) {
886 /* Change Xilinx CAN data format to socketCAN data format */
887 if (cf->len > 0)
888 *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
889 if (cf->len > 4)
890 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
891
892 stats->rx_bytes += cf->len;
893 }
894 stats->rx_packets++;
895
896 netif_receive_skb(skb);
897
898 return 1;
899 }
900
901 /**
902 * xcanfd_rx - Is called from CAN isr to complete the received
903 * frame processing
904 * @ndev: Pointer to net_device structure
905 * @frame_base: Register offset to the frame to be read
906 *
907 * This function is invoked from the CAN isr(poll) to process the Rx frames. It
908 * does minimal processing and invokes "netif_receive_skb" to complete further
909 * processing.
910 * Return: 1 on success and 0 on failure.
911 */
xcanfd_rx(struct net_device * ndev,int frame_base)912 static int xcanfd_rx(struct net_device *ndev, int frame_base)
913 {
914 struct xcan_priv *priv = netdev_priv(ndev);
915 struct net_device_stats *stats = &ndev->stats;
916 struct canfd_frame *cf;
917 struct sk_buff *skb;
918 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
919
920 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
921 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
922 if (dlc & XCAN_DLCR_EDL_MASK)
923 skb = alloc_canfd_skb(ndev, &cf);
924 else
925 skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
926
927 if (unlikely(!skb)) {
928 stats->rx_dropped++;
929 return 0;
930 }
931
932 /* Change Xilinx CANFD data length format to socketCAN data
933 * format
934 */
935 if (dlc & XCAN_DLCR_EDL_MASK)
936 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
937 XCAN_DLCR_DLC_SHIFT);
938 else
939 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
940 XCAN_DLCR_DLC_SHIFT);
941
942 /* Change Xilinx CAN ID format to socketCAN ID format */
943 if (id_xcan & XCAN_IDR_IDE_MASK) {
944 /* The received frame is an Extended format frame */
945 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
946 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
947 XCAN_IDR_ID2_SHIFT;
948 cf->can_id |= CAN_EFF_FLAG;
949 if (id_xcan & XCAN_IDR_RTR_MASK)
950 cf->can_id |= CAN_RTR_FLAG;
951 } else {
952 /* The received frame is a standard format frame */
953 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
954 XCAN_IDR_ID1_SHIFT;
955 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
956 XCAN_IDR_SRR_MASK))
957 cf->can_id |= CAN_RTR_FLAG;
958 }
959
960 /* Check the frame received is FD or not*/
961 if (dlc & XCAN_DLCR_EDL_MASK) {
962 for (i = 0; i < cf->len; i += 4) {
963 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
964 (dwindex * XCANFD_DW_BYTES);
965 data[0] = priv->read_reg(priv, dw_offset);
966 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
967 dwindex++;
968 }
969 } else {
970 for (i = 0; i < cf->len; i += 4) {
971 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
972 data[0] = priv->read_reg(priv, dw_offset + i);
973 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
974 }
975 }
976
977 if (!(cf->can_id & CAN_RTR_FLAG))
978 stats->rx_bytes += cf->len;
979 stats->rx_packets++;
980
981 netif_receive_skb(skb);
982
983 return 1;
984 }
985
986 /**
987 * xcan_current_error_state - Get current error state from HW
988 * @ndev: Pointer to net_device structure
989 *
990 * Checks the current CAN error state from the HW. Note that this
991 * only checks for ERROR_PASSIVE and ERROR_WARNING.
992 *
993 * Return:
994 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
995 * otherwise.
996 */
xcan_current_error_state(struct net_device * ndev)997 static enum can_state xcan_current_error_state(struct net_device *ndev)
998 {
999 struct xcan_priv *priv = netdev_priv(ndev);
1000 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
1001
1002 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
1003 return CAN_STATE_ERROR_PASSIVE;
1004 else if (status & XCAN_SR_ERRWRN_MASK)
1005 return CAN_STATE_ERROR_WARNING;
1006 else
1007 return CAN_STATE_ERROR_ACTIVE;
1008 }
1009
1010 /**
1011 * xcan_set_error_state - Set new CAN error state
1012 * @ndev: Pointer to net_device structure
1013 * @new_state: The new CAN state to be set
1014 * @cf: Error frame to be populated or NULL
1015 *
1016 * Set new CAN error state for the device, updating statistics and
1017 * populating the error frame if given.
1018 */
xcan_set_error_state(struct net_device * ndev,enum can_state new_state,struct can_frame * cf)1019 static void xcan_set_error_state(struct net_device *ndev,
1020 enum can_state new_state,
1021 struct can_frame *cf)
1022 {
1023 struct xcan_priv *priv = netdev_priv(ndev);
1024 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
1025 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
1026 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
1027 enum can_state tx_state = txerr >= rxerr ? new_state : 0;
1028 enum can_state rx_state = txerr <= rxerr ? new_state : 0;
1029
1030 /* non-ERROR states are handled elsewhere */
1031 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
1032 return;
1033
1034 can_change_state(ndev, cf, tx_state, rx_state);
1035
1036 if (cf) {
1037 cf->can_id |= CAN_ERR_CNT;
1038 cf->data[6] = txerr;
1039 cf->data[7] = rxerr;
1040 }
1041 }
1042
1043 /**
1044 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
1045 * @ndev: Pointer to net_device structure
1046 *
1047 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
1048 * the performed RX/TX has caused it to drop to a lesser state and set
1049 * the interface state accordingly.
1050 */
xcan_update_error_state_after_rxtx(struct net_device * ndev)1051 static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
1052 {
1053 struct xcan_priv *priv = netdev_priv(ndev);
1054 enum can_state old_state = priv->can.state;
1055 enum can_state new_state;
1056
1057 /* changing error state due to successful frame RX/TX can only
1058 * occur from these states
1059 */
1060 if (old_state != CAN_STATE_ERROR_WARNING &&
1061 old_state != CAN_STATE_ERROR_PASSIVE)
1062 return;
1063
1064 new_state = xcan_current_error_state(ndev);
1065
1066 if (new_state != old_state) {
1067 struct sk_buff *skb;
1068 struct can_frame *cf;
1069
1070 skb = alloc_can_err_skb(ndev, &cf);
1071
1072 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
1073
1074 if (skb)
1075 netif_rx(skb);
1076 }
1077 }
1078
1079 /**
1080 * xcan_err_interrupt - error frame Isr
1081 * @ndev: net_device pointer
1082 * @isr: interrupt status register value
1083 *
1084 * This is the CAN error interrupt and it will
1085 * check the type of error and forward the error
1086 * frame to upper layers.
1087 */
xcan_err_interrupt(struct net_device * ndev,u32 isr)1088 static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1089 {
1090 struct xcan_priv *priv = netdev_priv(ndev);
1091 struct net_device_stats *stats = &ndev->stats;
1092 struct can_frame cf = { };
1093 u32 err_status;
1094
1095 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
1096 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
1097
1098 if (isr & XCAN_IXR_BSOFF_MASK) {
1099 priv->can.state = CAN_STATE_BUS_OFF;
1100 priv->can.can_stats.bus_off++;
1101 /* Leave device in Config Mode in bus-off state */
1102 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1103 can_bus_off(ndev);
1104 cf.can_id |= CAN_ERR_BUSOFF;
1105 } else {
1106 enum can_state new_state = xcan_current_error_state(ndev);
1107
1108 if (new_state != priv->can.state)
1109 xcan_set_error_state(ndev, new_state, &cf);
1110 }
1111
1112 /* Check for Arbitration lost interrupt */
1113 if (isr & XCAN_IXR_ARBLST_MASK) {
1114 priv->can.can_stats.arbitration_lost++;
1115 cf.can_id |= CAN_ERR_LOSTARB;
1116 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1117 }
1118
1119 /* Check for RX FIFO Overflow interrupt */
1120 if (isr & XCAN_IXR_RXOFLW_MASK) {
1121 stats->rx_over_errors++;
1122 stats->rx_errors++;
1123 cf.can_id |= CAN_ERR_CRTL;
1124 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1125 }
1126
1127 /* Check for RX Match Not Finished interrupt */
1128 if (isr & XCAN_IXR_RXMNF_MASK) {
1129 stats->rx_dropped++;
1130 stats->rx_errors++;
1131 netdev_err(ndev, "RX match not finished, frame discarded\n");
1132 cf.can_id |= CAN_ERR_CRTL;
1133 cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1134 }
1135
1136 /* Check for error interrupt */
1137 if (isr & XCAN_IXR_ERROR_MASK) {
1138 bool berr_reporting = false;
1139
1140 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1141 berr_reporting = true;
1142 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1143 }
1144
1145 /* Check for Ack error interrupt */
1146 if (err_status & XCAN_ESR_ACKER_MASK) {
1147 stats->tx_errors++;
1148 if (berr_reporting) {
1149 cf.can_id |= CAN_ERR_ACK;
1150 cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1151 }
1152 }
1153
1154 /* Check for Bit error interrupt */
1155 if (err_status & XCAN_ESR_BERR_MASK) {
1156 stats->tx_errors++;
1157 if (berr_reporting) {
1158 cf.can_id |= CAN_ERR_PROT;
1159 cf.data[2] = CAN_ERR_PROT_BIT;
1160 }
1161 }
1162
1163 /* Check for Stuff error interrupt */
1164 if (err_status & XCAN_ESR_STER_MASK) {
1165 stats->rx_errors++;
1166 if (berr_reporting) {
1167 cf.can_id |= CAN_ERR_PROT;
1168 cf.data[2] = CAN_ERR_PROT_STUFF;
1169 }
1170 }
1171
1172 /* Check for Form error interrupt */
1173 if (err_status & XCAN_ESR_FMER_MASK) {
1174 stats->rx_errors++;
1175 if (berr_reporting) {
1176 cf.can_id |= CAN_ERR_PROT;
1177 cf.data[2] = CAN_ERR_PROT_FORM;
1178 }
1179 }
1180
1181 /* Check for CRC error interrupt */
1182 if (err_status & XCAN_ESR_CRCER_MASK) {
1183 stats->rx_errors++;
1184 if (berr_reporting) {
1185 cf.can_id |= CAN_ERR_PROT;
1186 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1187 }
1188 }
1189 priv->can.can_stats.bus_error++;
1190 }
1191
1192 if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
1193 u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
1194
1195 reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
1196 reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
1197 reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
1198
1199 /* The counter reaches its maximum at 0xffff and does not overflow.
1200 * Accept the small race window between reading and resetting ECC counters.
1201 */
1202 priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
1203 XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
1204
1205 u64_stats_update_begin(&priv->syncp);
1206
1207 if (isr & XCAN_IXR_E2BERX_MASK) {
1208 u64_stats_add(&priv->ecc_rx_2_bit_errors,
1209 FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
1210 }
1211
1212 if (isr & XCAN_IXR_E1BERX_MASK) {
1213 u64_stats_add(&priv->ecc_rx_1_bit_errors,
1214 FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
1215 }
1216
1217 if (isr & XCAN_IXR_E2BETXOL_MASK) {
1218 u64_stats_add(&priv->ecc_txol_2_bit_errors,
1219 FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
1220 }
1221
1222 if (isr & XCAN_IXR_E1BETXOL_MASK) {
1223 u64_stats_add(&priv->ecc_txol_1_bit_errors,
1224 FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
1225 }
1226
1227 if (isr & XCAN_IXR_E2BETXTL_MASK) {
1228 u64_stats_add(&priv->ecc_txtl_2_bit_errors,
1229 FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
1230 }
1231
1232 if (isr & XCAN_IXR_E1BETXTL_MASK) {
1233 u64_stats_add(&priv->ecc_txtl_1_bit_errors,
1234 FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
1235 }
1236
1237 u64_stats_update_end(&priv->syncp);
1238 }
1239
1240 if (cf.can_id) {
1241 struct can_frame *skb_cf;
1242 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1243
1244 if (skb) {
1245 skb_cf->can_id |= cf.can_id;
1246 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1247 netif_rx(skb);
1248 }
1249 }
1250
1251 netdev_dbg(ndev, "%s: error status register:0x%x\n",
1252 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1253 }
1254
1255 /**
1256 * xcan_state_interrupt - It will check the state of the CAN device
1257 * @ndev: net_device pointer
1258 * @isr: interrupt status register value
1259 *
1260 * This will checks the state of the CAN device
1261 * and puts the device into appropriate state.
1262 */
xcan_state_interrupt(struct net_device * ndev,u32 isr)1263 static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1264 {
1265 struct xcan_priv *priv = netdev_priv(ndev);
1266
1267 /* Check for Sleep interrupt if set put CAN device in sleep state */
1268 if (isr & XCAN_IXR_SLP_MASK)
1269 priv->can.state = CAN_STATE_SLEEPING;
1270
1271 /* Check for Wake up interrupt if set put CAN device in Active state */
1272 if (isr & XCAN_IXR_WKUP_MASK)
1273 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1274 }
1275
1276 /**
1277 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1278 * @priv: Driver private data structure
1279 *
1280 * Return: Register offset of the next frame in RX FIFO.
1281 */
xcan_rx_fifo_get_next_frame(struct xcan_priv * priv)1282 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1283 {
1284 int offset;
1285
1286 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1287 u32 fsr, mask;
1288
1289 /* clear RXOK before the is-empty check so that any newly
1290 * received frame will reassert it without a race
1291 */
1292 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1293
1294 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1295
1296 /* check if RX FIFO is empty */
1297 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1298 mask = XCAN_2_FSR_FL_MASK;
1299 else
1300 mask = XCAN_FSR_FL_MASK;
1301
1302 if (!(fsr & mask))
1303 return -ENOENT;
1304
1305 if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1306 offset =
1307 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1308 else
1309 offset =
1310 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1311
1312 } else {
1313 /* check if RX FIFO is empty */
1314 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1315 XCAN_IXR_RXNEMP_MASK))
1316 return -ENOENT;
1317
1318 /* frames are read from a static offset */
1319 offset = XCAN_RXFIFO_OFFSET;
1320 }
1321
1322 return offset;
1323 }
1324
1325 /**
1326 * xcan_rx_poll - Poll routine for rx packets (NAPI)
1327 * @napi: napi structure pointer
1328 * @quota: Max number of rx packets to be processed.
1329 *
1330 * This is the poll routine for rx part.
1331 * It will process the packets maximux quota value.
1332 *
1333 * Return: number of packets received
1334 */
xcan_rx_poll(struct napi_struct * napi,int quota)1335 static int xcan_rx_poll(struct napi_struct *napi, int quota)
1336 {
1337 struct net_device *ndev = napi->dev;
1338 struct xcan_priv *priv = netdev_priv(ndev);
1339 u32 ier;
1340 int work_done = 0;
1341 int frame_offset;
1342
1343 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1344 (work_done < quota)) {
1345 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1346 work_done += xcanfd_rx(ndev, frame_offset);
1347 else
1348 work_done += xcan_rx(ndev, frame_offset);
1349
1350 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1351 /* increment read index */
1352 priv->write_reg(priv, XCAN_FSR_OFFSET,
1353 XCAN_FSR_IRI_MASK);
1354 else
1355 /* clear rx-not-empty (will actually clear only if
1356 * empty)
1357 */
1358 priv->write_reg(priv, XCAN_ICR_OFFSET,
1359 XCAN_IXR_RXNEMP_MASK);
1360 }
1361
1362 if (work_done)
1363 xcan_update_error_state_after_rxtx(ndev);
1364
1365 if (work_done < quota) {
1366 if (napi_complete_done(napi, work_done)) {
1367 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1368 ier |= xcan_rx_int_mask(priv);
1369 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1370 }
1371 }
1372 return work_done;
1373 }
1374
1375 /**
1376 * xcan_tx_interrupt - Tx Done Isr
1377 * @ndev: net_device pointer
1378 * @isr: Interrupt status register value
1379 */
xcan_tx_interrupt(struct net_device * ndev,u32 isr)1380 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1381 {
1382 struct xcan_priv *priv = netdev_priv(ndev);
1383 struct net_device_stats *stats = &ndev->stats;
1384 unsigned int frames_in_fifo;
1385 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1386 unsigned long flags;
1387 int retries = 0;
1388
1389 /* Synchronize with xmit as we need to know the exact number
1390 * of frames in the FIFO to stay in sync due to the TXFEMP
1391 * handling.
1392 * This also prevents a race between netif_wake_queue() and
1393 * netif_stop_queue().
1394 */
1395 spin_lock_irqsave(&priv->tx_lock, flags);
1396
1397 frames_in_fifo = priv->tx_head - priv->tx_tail;
1398
1399 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1400 /* clear TXOK anyway to avoid getting back here */
1401 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1402 spin_unlock_irqrestore(&priv->tx_lock, flags);
1403 return;
1404 }
1405
1406 /* Check if 2 frames were sent (TXOK only means that at least 1
1407 * frame was sent).
1408 */
1409 if (frames_in_fifo > 1) {
1410 WARN_ON(frames_in_fifo > priv->tx_max);
1411
1412 /* Synchronize TXOK and isr so that after the loop:
1413 * (1) isr variable is up-to-date at least up to TXOK clear
1414 * time. This avoids us clearing a TXOK of a second frame
1415 * but not noticing that the FIFO is now empty and thus
1416 * marking only a single frame as sent.
1417 * (2) No TXOK is left. Having one could mean leaving a
1418 * stray TXOK as we might process the associated frame
1419 * via TXFEMP handling as we read TXFEMP *after* TXOK
1420 * clear to satisfy (1).
1421 */
1422 while ((isr & XCAN_IXR_TXOK_MASK) &&
1423 !WARN_ON(++retries == 100)) {
1424 priv->write_reg(priv, XCAN_ICR_OFFSET,
1425 XCAN_IXR_TXOK_MASK);
1426 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1427 }
1428
1429 if (isr & XCAN_IXR_TXFEMP_MASK) {
1430 /* nothing in FIFO anymore */
1431 frames_sent = frames_in_fifo;
1432 }
1433 } else {
1434 /* single frame in fifo, just clear TXOK */
1435 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1436 }
1437
1438 while (frames_sent--) {
1439 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1440 priv->tx_max, NULL);
1441 priv->tx_tail++;
1442 stats->tx_packets++;
1443 }
1444
1445 netif_wake_queue(ndev);
1446
1447 spin_unlock_irqrestore(&priv->tx_lock, flags);
1448
1449 xcan_update_error_state_after_rxtx(ndev);
1450 }
1451
1452 /**
1453 * xcan_interrupt - CAN Isr
1454 * @irq: irq number
1455 * @dev_id: device id pointer
1456 *
1457 * This is the xilinx CAN Isr. It checks for the type of interrupt
1458 * and invokes the corresponding ISR.
1459 *
1460 * Return:
1461 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1462 */
xcan_interrupt(int irq,void * dev_id)1463 static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1464 {
1465 struct net_device *ndev = (struct net_device *)dev_id;
1466 struct xcan_priv *priv = netdev_priv(ndev);
1467 u32 isr_errors, mask;
1468 u32 isr, ier;
1469 u32 rx_int_mask = xcan_rx_int_mask(priv);
1470
1471 /* Get the interrupt status from Xilinx CAN */
1472 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1473 if (!isr)
1474 return IRQ_NONE;
1475
1476 /* Check for the type of interrupt and Processing it */
1477 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1478 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1479 XCAN_IXR_WKUP_MASK));
1480 xcan_state_interrupt(ndev, isr);
1481 }
1482
1483 /* Check for Tx interrupt and Processing it */
1484 if (isr & XCAN_IXR_TXOK_MASK)
1485 xcan_tx_interrupt(ndev, isr);
1486
1487 mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1488 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1489 XCAN_IXR_RXMNF_MASK;
1490
1491 if (priv->ecc_enable)
1492 mask |= XCAN_IXR_ECC_MASK;
1493
1494 /* Check for the type of error interrupt and Processing it */
1495 isr_errors = isr & mask;
1496 if (isr_errors) {
1497 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1498 xcan_err_interrupt(ndev, isr);
1499 }
1500
1501 /* Check for the type of receive interrupt and Processing it */
1502 if (isr & rx_int_mask) {
1503 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1504 ier &= ~rx_int_mask;
1505 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1506 napi_schedule(&priv->napi);
1507 }
1508 return IRQ_HANDLED;
1509 }
1510
1511 /**
1512 * xcan_chip_stop - Driver stop routine
1513 * @ndev: Pointer to net_device structure
1514 *
1515 * This is the drivers stop routine. It will disable the
1516 * interrupts and put the device into configuration mode.
1517 */
xcan_chip_stop(struct net_device * ndev)1518 static void xcan_chip_stop(struct net_device *ndev)
1519 {
1520 struct xcan_priv *priv = netdev_priv(ndev);
1521 int ret;
1522
1523 /* Disable interrupts and leave the can in configuration mode */
1524 ret = set_reset_mode(ndev);
1525 if (ret < 0)
1526 netdev_dbg(ndev, "set_reset_mode() Failed\n");
1527
1528 priv->can.state = CAN_STATE_STOPPED;
1529 }
1530
1531 /**
1532 * xcan_open - Driver open routine
1533 * @ndev: Pointer to net_device structure
1534 *
1535 * This is the driver open routine.
1536 * Return: 0 on success and failure value on error
1537 */
xcan_open(struct net_device * ndev)1538 static int xcan_open(struct net_device *ndev)
1539 {
1540 struct xcan_priv *priv = netdev_priv(ndev);
1541 int ret;
1542
1543 ret = phy_power_on(priv->transceiver);
1544 if (ret)
1545 return ret;
1546
1547 ret = pm_runtime_get_sync(priv->dev);
1548 if (ret < 0) {
1549 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1550 __func__, ret);
1551 goto err;
1552 }
1553
1554 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1555 ndev->name, ndev);
1556 if (ret < 0) {
1557 netdev_err(ndev, "irq allocation for CAN failed\n");
1558 goto err;
1559 }
1560
1561 /* Set chip into reset mode */
1562 ret = set_reset_mode(ndev);
1563 if (ret < 0) {
1564 netdev_err(ndev, "mode resetting failed!\n");
1565 goto err_irq;
1566 }
1567
1568 /* Common open */
1569 ret = open_candev(ndev);
1570 if (ret)
1571 goto err_irq;
1572
1573 ret = xcan_chip_start(ndev);
1574 if (ret < 0) {
1575 netdev_err(ndev, "xcan_chip_start failed!\n");
1576 goto err_candev;
1577 }
1578
1579 napi_enable(&priv->napi);
1580 netif_start_queue(ndev);
1581
1582 return 0;
1583
1584 err_candev:
1585 close_candev(ndev);
1586 err_irq:
1587 free_irq(ndev->irq, ndev);
1588 err:
1589 pm_runtime_put(priv->dev);
1590 phy_power_off(priv->transceiver);
1591
1592 return ret;
1593 }
1594
1595 /**
1596 * xcan_close - Driver close routine
1597 * @ndev: Pointer to net_device structure
1598 *
1599 * Return: 0 always
1600 */
xcan_close(struct net_device * ndev)1601 static int xcan_close(struct net_device *ndev)
1602 {
1603 struct xcan_priv *priv = netdev_priv(ndev);
1604
1605 netif_stop_queue(ndev);
1606 napi_disable(&priv->napi);
1607 xcan_chip_stop(ndev);
1608 free_irq(ndev->irq, ndev);
1609 close_candev(ndev);
1610
1611 pm_runtime_put(priv->dev);
1612 phy_power_off(priv->transceiver);
1613
1614 return 0;
1615 }
1616
1617 /**
1618 * xcan_get_berr_counter - error counter routine
1619 * @ndev: Pointer to net_device structure
1620 * @bec: Pointer to can_berr_counter structure
1621 *
1622 * This is the driver error counter routine.
1623 * Return: 0 on success and failure value on error
1624 */
xcan_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1625 static int xcan_get_berr_counter(const struct net_device *ndev,
1626 struct can_berr_counter *bec)
1627 {
1628 struct xcan_priv *priv = netdev_priv(ndev);
1629 int ret;
1630
1631 ret = pm_runtime_get_sync(priv->dev);
1632 if (ret < 0) {
1633 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1634 __func__, ret);
1635 pm_runtime_put(priv->dev);
1636 return ret;
1637 }
1638
1639 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1640 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1641 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1642
1643 pm_runtime_put(priv->dev);
1644
1645 return 0;
1646 }
1647
1648 /**
1649 * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
1650 * @ndev: Pointer to net_device structure
1651 * @tdcv: Pointer to TDCV value
1652 *
1653 * Return: 0 on success
1654 */
xcan_get_auto_tdcv(const struct net_device * ndev,u32 * tdcv)1655 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
1656 {
1657 struct xcan_priv *priv = netdev_priv(ndev);
1658
1659 *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
1660
1661 return 0;
1662 }
1663
xcan_get_strings(struct net_device * ndev,u32 stringset,u8 * buf)1664 static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
1665 {
1666 switch (stringset) {
1667 case ETH_SS_STATS:
1668 memcpy(buf, &xcan_priv_flags_strings,
1669 sizeof(xcan_priv_flags_strings));
1670 }
1671 }
1672
xcan_get_sset_count(struct net_device * netdev,int sset)1673 static int xcan_get_sset_count(struct net_device *netdev, int sset)
1674 {
1675 switch (sset) {
1676 case ETH_SS_STATS:
1677 return ARRAY_SIZE(xcan_priv_flags_strings);
1678 default:
1679 return -EOPNOTSUPP;
1680 }
1681 }
1682
xcan_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * stats,u64 * data)1683 static void xcan_get_ethtool_stats(struct net_device *ndev,
1684 struct ethtool_stats *stats, u64 *data)
1685 {
1686 struct xcan_priv *priv = netdev_priv(ndev);
1687 unsigned int start;
1688
1689 do {
1690 start = u64_stats_fetch_begin(&priv->syncp);
1691
1692 data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
1693 data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
1694 data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
1695 data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
1696 data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
1697 data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
1698 } while (u64_stats_fetch_retry(&priv->syncp, start));
1699 }
1700
1701 static const struct net_device_ops xcan_netdev_ops = {
1702 .ndo_open = xcan_open,
1703 .ndo_stop = xcan_close,
1704 .ndo_start_xmit = xcan_start_xmit,
1705 .ndo_change_mtu = can_change_mtu,
1706 };
1707
1708 static const struct ethtool_ops xcan_ethtool_ops = {
1709 .get_ts_info = ethtool_op_get_ts_info,
1710 .get_strings = xcan_get_strings,
1711 .get_sset_count = xcan_get_sset_count,
1712 .get_ethtool_stats = xcan_get_ethtool_stats,
1713 };
1714
1715 /**
1716 * xcan_suspend - Suspend method for the driver
1717 * @dev: Address of the device structure
1718 *
1719 * Put the driver into low power mode.
1720 * Return: 0 on success and failure value on error
1721 */
xcan_suspend(struct device * dev)1722 static int __maybe_unused xcan_suspend(struct device *dev)
1723 {
1724 struct net_device *ndev = dev_get_drvdata(dev);
1725
1726 if (netif_running(ndev)) {
1727 netif_stop_queue(ndev);
1728 netif_device_detach(ndev);
1729 xcan_chip_stop(ndev);
1730 }
1731
1732 return pm_runtime_force_suspend(dev);
1733 }
1734
1735 /**
1736 * xcan_resume - Resume from suspend
1737 * @dev: Address of the device structure
1738 *
1739 * Resume operation after suspend.
1740 * Return: 0 on success and failure value on error
1741 */
xcan_resume(struct device * dev)1742 static int __maybe_unused xcan_resume(struct device *dev)
1743 {
1744 struct net_device *ndev = dev_get_drvdata(dev);
1745 int ret;
1746
1747 ret = pm_runtime_force_resume(dev);
1748 if (ret) {
1749 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1750 return ret;
1751 }
1752
1753 if (netif_running(ndev)) {
1754 ret = xcan_chip_start(ndev);
1755 if (ret) {
1756 dev_err(dev, "xcan_chip_start failed on resume\n");
1757 return ret;
1758 }
1759
1760 netif_device_attach(ndev);
1761 netif_start_queue(ndev);
1762 }
1763
1764 return 0;
1765 }
1766
1767 /**
1768 * xcan_runtime_suspend - Runtime suspend method for the driver
1769 * @dev: Address of the device structure
1770 *
1771 * Put the driver into low power mode.
1772 * Return: 0 always
1773 */
xcan_runtime_suspend(struct device * dev)1774 static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1775 {
1776 struct net_device *ndev = dev_get_drvdata(dev);
1777 struct xcan_priv *priv = netdev_priv(ndev);
1778
1779 clk_disable_unprepare(priv->bus_clk);
1780 clk_disable_unprepare(priv->can_clk);
1781
1782 return 0;
1783 }
1784
1785 /**
1786 * xcan_runtime_resume - Runtime resume from suspend
1787 * @dev: Address of the device structure
1788 *
1789 * Resume operation after suspend.
1790 * Return: 0 on success and failure value on error
1791 */
xcan_runtime_resume(struct device * dev)1792 static int __maybe_unused xcan_runtime_resume(struct device *dev)
1793 {
1794 struct net_device *ndev = dev_get_drvdata(dev);
1795 struct xcan_priv *priv = netdev_priv(ndev);
1796 int ret;
1797
1798 ret = clk_prepare_enable(priv->bus_clk);
1799 if (ret) {
1800 dev_err(dev, "Cannot enable clock.\n");
1801 return ret;
1802 }
1803 ret = clk_prepare_enable(priv->can_clk);
1804 if (ret) {
1805 dev_err(dev, "Cannot enable clock.\n");
1806 clk_disable_unprepare(priv->bus_clk);
1807 return ret;
1808 }
1809
1810 return 0;
1811 }
1812
1813 static const struct dev_pm_ops xcan_dev_pm_ops = {
1814 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1815 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1816 };
1817
1818 static const struct xcan_devtype_data xcan_zynq_data = {
1819 .cantype = XZYNQ_CANPS,
1820 .flags = XCAN_FLAG_TXFEMP,
1821 .bittiming_const = &xcan_bittiming_const,
1822 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1823 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1824 .bus_clk_name = "pclk",
1825 };
1826
1827 static const struct xcan_devtype_data xcan_axi_data = {
1828 .cantype = XAXI_CAN,
1829 .bittiming_const = &xcan_bittiming_const,
1830 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1831 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1832 .bus_clk_name = "s_axi_aclk",
1833 };
1834
1835 static const struct xcan_devtype_data xcan_canfd_data = {
1836 .cantype = XAXI_CANFD,
1837 .flags = XCAN_FLAG_EXT_FILTERS |
1838 XCAN_FLAG_RXMNF |
1839 XCAN_FLAG_TX_MAILBOXES |
1840 XCAN_FLAG_RX_FIFO_MULTI,
1841 .bittiming_const = &xcan_bittiming_const_canfd,
1842 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1843 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1844 .bus_clk_name = "s_axi_aclk",
1845 };
1846
1847 static const struct xcan_devtype_data xcan_canfd2_data = {
1848 .cantype = XAXI_CANFD_2_0,
1849 .flags = XCAN_FLAG_EXT_FILTERS |
1850 XCAN_FLAG_RXMNF |
1851 XCAN_FLAG_TX_MAILBOXES |
1852 XCAN_FLAG_CANFD_2 |
1853 XCAN_FLAG_RX_FIFO_MULTI,
1854 .bittiming_const = &xcan_bittiming_const_canfd2,
1855 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1856 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1857 .bus_clk_name = "s_axi_aclk",
1858 };
1859
1860 /* Match table for OF platform binding */
1861 static const struct of_device_id xcan_of_match[] = {
1862 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1863 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1864 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1865 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1866 { /* end of list */ },
1867 };
1868 MODULE_DEVICE_TABLE(of, xcan_of_match);
1869
1870 /**
1871 * xcan_probe - Platform registration call
1872 * @pdev: Handle to the platform device structure
1873 *
1874 * This function does all the memory allocation and registration for the CAN
1875 * device.
1876 *
1877 * Return: 0 on success and failure value on error
1878 */
xcan_probe(struct platform_device * pdev)1879 static int xcan_probe(struct platform_device *pdev)
1880 {
1881 struct net_device *ndev;
1882 struct xcan_priv *priv;
1883 struct phy *transceiver;
1884 const struct xcan_devtype_data *devtype;
1885 void __iomem *addr;
1886 int ret;
1887 int rx_max, tx_max;
1888 u32 hw_tx_max = 0, hw_rx_max = 0;
1889 const char *hw_tx_max_property;
1890
1891 /* Get the virtual base address for the device */
1892 addr = devm_platform_ioremap_resource(pdev, 0);
1893 if (IS_ERR(addr)) {
1894 ret = PTR_ERR(addr);
1895 goto err;
1896 }
1897
1898 devtype = device_get_match_data(&pdev->dev);
1899
1900 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1901 "tx-mailbox-count" : "tx-fifo-depth";
1902
1903 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1904 &hw_tx_max);
1905 if (ret < 0) {
1906 dev_err(&pdev->dev, "missing %s property\n",
1907 hw_tx_max_property);
1908 goto err;
1909 }
1910
1911 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1912 &hw_rx_max);
1913 if (ret < 0) {
1914 dev_err(&pdev->dev,
1915 "missing rx-fifo-depth property (mailbox mode is not supported)\n");
1916 goto err;
1917 }
1918
1919 /* With TX FIFO:
1920 *
1921 * There is no way to directly figure out how many frames have been
1922 * sent when the TXOK interrupt is processed. If TXFEMP
1923 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1924 * to determine if 1 or 2 frames have been sent.
1925 * Theoretically we should be able to use TXFWMEMP to determine up
1926 * to 3 frames, but it seems that after putting a second frame in the
1927 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1928 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1929 * sent), which is not a sensible state - possibly TXFWMEMP is not
1930 * completely synchronized with the rest of the bits?
1931 *
1932 * With TX mailboxes:
1933 *
1934 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1935 * we submit frames one at a time.
1936 */
1937 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1938 (devtype->flags & XCAN_FLAG_TXFEMP))
1939 tx_max = min(hw_tx_max, 2U);
1940 else
1941 tx_max = 1;
1942
1943 rx_max = hw_rx_max;
1944
1945 /* Create a CAN device instance */
1946 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1947 if (!ndev)
1948 return -ENOMEM;
1949
1950 priv = netdev_priv(ndev);
1951 priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
1952 priv->dev = &pdev->dev;
1953 priv->can.bittiming_const = devtype->bittiming_const;
1954 priv->can.do_set_mode = xcan_do_set_mode;
1955 priv->can.do_get_berr_counter = xcan_get_berr_counter;
1956 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1957 CAN_CTRLMODE_BERR_REPORTING;
1958 priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
1959 if (IS_ERR(priv->rstc)) {
1960 dev_err(&pdev->dev, "Cannot get CAN reset.\n");
1961 ret = PTR_ERR(priv->rstc);
1962 goto err_free;
1963 }
1964
1965 ret = reset_control_reset(priv->rstc);
1966 if (ret)
1967 goto err_free;
1968
1969 if (devtype->cantype == XAXI_CANFD) {
1970 priv->can.data_bittiming_const =
1971 &xcan_data_bittiming_const_canfd;
1972 priv->can.tdc_const = &xcan_tdc_const_canfd;
1973 }
1974
1975 if (devtype->cantype == XAXI_CANFD_2_0) {
1976 priv->can.data_bittiming_const =
1977 &xcan_data_bittiming_const_canfd2;
1978 priv->can.tdc_const = &xcan_tdc_const_canfd2;
1979 }
1980
1981 if (devtype->cantype == XAXI_CANFD ||
1982 devtype->cantype == XAXI_CANFD_2_0) {
1983 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
1984 CAN_CTRLMODE_TDC_AUTO;
1985 priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
1986 }
1987
1988 priv->reg_base = addr;
1989 priv->tx_max = tx_max;
1990 priv->devtype = *devtype;
1991 spin_lock_init(&priv->tx_lock);
1992
1993 /* Get IRQ for the device */
1994 ret = platform_get_irq(pdev, 0);
1995 if (ret < 0)
1996 goto err_reset;
1997
1998 ndev->irq = ret;
1999
2000 ndev->flags |= IFF_ECHO; /* We support local echo */
2001
2002 platform_set_drvdata(pdev, ndev);
2003 SET_NETDEV_DEV(ndev, &pdev->dev);
2004 ndev->netdev_ops = &xcan_netdev_ops;
2005 ndev->ethtool_ops = &xcan_ethtool_ops;
2006
2007 /* Getting the CAN can_clk info */
2008 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
2009 if (IS_ERR(priv->can_clk)) {
2010 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk),
2011 "device clock not found\n");
2012 goto err_reset;
2013 }
2014
2015 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
2016 if (IS_ERR(priv->bus_clk)) {
2017 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk),
2018 "bus clock not found\n");
2019 goto err_reset;
2020 }
2021
2022 transceiver = devm_phy_optional_get(&pdev->dev, NULL);
2023 if (IS_ERR(transceiver)) {
2024 ret = PTR_ERR(transceiver);
2025 dev_err_probe(&pdev->dev, ret, "failed to get phy\n");
2026 goto err_reset;
2027 }
2028 priv->transceiver = transceiver;
2029
2030 priv->write_reg = xcan_write_reg_le;
2031 priv->read_reg = xcan_read_reg_le;
2032
2033 pm_runtime_enable(&pdev->dev);
2034 ret = pm_runtime_get_sync(&pdev->dev);
2035 if (ret < 0) {
2036 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
2037 __func__, ret);
2038 goto err_disableclks;
2039 }
2040
2041 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
2042 priv->write_reg = xcan_write_reg_be;
2043 priv->read_reg = xcan_read_reg_be;
2044 }
2045
2046 priv->can.clock.freq = clk_get_rate(priv->can_clk);
2047
2048 netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
2049
2050 ret = register_candev(ndev);
2051 if (ret) {
2052 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
2053 goto err_disableclks;
2054 }
2055
2056 of_can_transceiver(ndev);
2057 pm_runtime_put(&pdev->dev);
2058
2059 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
2060 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
2061 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
2062 }
2063
2064 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
2065 priv->reg_base, ndev->irq, priv->can.clock.freq,
2066 hw_tx_max, priv->tx_max);
2067
2068 if (priv->ecc_enable) {
2069 /* Reset FIFO ECC counters */
2070 priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
2071 XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
2072 }
2073 return 0;
2074
2075 err_disableclks:
2076 pm_runtime_put(priv->dev);
2077 pm_runtime_disable(&pdev->dev);
2078 err_reset:
2079 reset_control_assert(priv->rstc);
2080 err_free:
2081 free_candev(ndev);
2082 err:
2083 return ret;
2084 }
2085
2086 /**
2087 * xcan_remove - Unregister the device after releasing the resources
2088 * @pdev: Handle to the platform device structure
2089 *
2090 * This function frees all the resources allocated to the device.
2091 * Return: 0 always
2092 */
xcan_remove(struct platform_device * pdev)2093 static void xcan_remove(struct platform_device *pdev)
2094 {
2095 struct net_device *ndev = platform_get_drvdata(pdev);
2096 struct xcan_priv *priv = netdev_priv(ndev);
2097
2098 unregister_candev(ndev);
2099 pm_runtime_disable(&pdev->dev);
2100 reset_control_assert(priv->rstc);
2101 free_candev(ndev);
2102 }
2103
2104 static struct platform_driver xcan_driver = {
2105 .probe = xcan_probe,
2106 .remove = xcan_remove,
2107 .driver = {
2108 .name = DRIVER_NAME,
2109 .pm = &xcan_dev_pm_ops,
2110 .of_match_table = xcan_of_match,
2111 },
2112 };
2113
2114 module_platform_driver(xcan_driver);
2115
2116 MODULE_LICENSE("GPL");
2117 MODULE_AUTHOR("Xilinx Inc");
2118 MODULE_DESCRIPTION("Xilinx CAN interface");
2119