xref: /linux/drivers/net/can/bxcan.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // bxcan.c - STM32 Basic Extended CAN controller driver
4 //
5 // Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com>
6 //
7 // NOTE: The ST documentation uses the terms master/slave instead of
8 // primary/secondary.
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/bitfield.h>
13 #include <linux/can.h>
14 #include <linux/can/dev.h>
15 #include <linux/can/error.h>
16 #include <linux/can/rx-offload.h>
17 #include <linux/clk.h>
18 #include <linux/ethtool.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/regmap.h>
28 
29 #define BXCAN_NAPI_WEIGHT 3
30 #define BXCAN_TIMEOUT_US 10000
31 
32 #define BXCAN_RX_MB_NUM 2
33 #define BXCAN_TX_MB_NUM 3
34 
35 /* Primary control register (MCR) bits */
36 #define BXCAN_MCR_RESET BIT(15)
37 #define BXCAN_MCR_TTCM BIT(7)
38 #define BXCAN_MCR_ABOM BIT(6)
39 #define BXCAN_MCR_AWUM BIT(5)
40 #define BXCAN_MCR_NART BIT(4)
41 #define BXCAN_MCR_RFLM BIT(3)
42 #define BXCAN_MCR_TXFP BIT(2)
43 #define BXCAN_MCR_SLEEP BIT(1)
44 #define BXCAN_MCR_INRQ BIT(0)
45 
46 /* Primary status register (MSR) bits */
47 #define BXCAN_MSR_ERRI BIT(2)
48 #define BXCAN_MSR_SLAK BIT(1)
49 #define BXCAN_MSR_INAK BIT(0)
50 
51 /* Transmit status register (TSR) bits */
52 #define BXCAN_TSR_RQCP2 BIT(16)
53 #define BXCAN_TSR_RQCP1 BIT(8)
54 #define BXCAN_TSR_RQCP0 BIT(0)
55 
56 /* Receive FIFO 0 register (RF0R) bits */
57 #define BXCAN_RF0R_RFOM0 BIT(5)
58 #define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0)
59 
60 /* Interrupt enable register (IER) bits */
61 #define BXCAN_IER_SLKIE BIT(17)
62 #define BXCAN_IER_WKUIE BIT(16)
63 #define BXCAN_IER_ERRIE BIT(15)
64 #define BXCAN_IER_LECIE BIT(11)
65 #define BXCAN_IER_BOFIE BIT(10)
66 #define BXCAN_IER_EPVIE BIT(9)
67 #define BXCAN_IER_EWGIE BIT(8)
68 #define BXCAN_IER_FOVIE1 BIT(6)
69 #define BXCAN_IER_FFIE1 BIT(5)
70 #define BXCAN_IER_FMPIE1 BIT(4)
71 #define BXCAN_IER_FOVIE0 BIT(3)
72 #define BXCAN_IER_FFIE0 BIT(2)
73 #define BXCAN_IER_FMPIE0 BIT(1)
74 #define BXCAN_IER_TMEIE BIT(0)
75 
76 /* Error status register (ESR) bits */
77 #define BXCAN_ESR_REC_MASK GENMASK(31, 24)
78 #define BXCAN_ESR_TEC_MASK GENMASK(23, 16)
79 #define BXCAN_ESR_LEC_MASK GENMASK(6, 4)
80 #define BXCAN_ESR_BOFF BIT(2)
81 #define BXCAN_ESR_EPVF BIT(1)
82 #define BXCAN_ESR_EWGF BIT(0)
83 
84 /* Bit timing register (BTR) bits */
85 #define BXCAN_BTR_SILM BIT(31)
86 #define BXCAN_BTR_LBKM BIT(30)
87 #define BXCAN_BTR_SJW_MASK GENMASK(25, 24)
88 #define BXCAN_BTR_TS2_MASK GENMASK(22, 20)
89 #define BXCAN_BTR_TS1_MASK GENMASK(19, 16)
90 #define BXCAN_BTR_BRP_MASK GENMASK(9, 0)
91 
92 /* TX mailbox identifier register (TIxR, x = 0..2) bits */
93 #define BXCAN_TIxR_STID_MASK GENMASK(31, 21)
94 #define BXCAN_TIxR_EXID_MASK GENMASK(31, 3)
95 #define BXCAN_TIxR_IDE BIT(2)
96 #define BXCAN_TIxR_RTR BIT(1)
97 #define BXCAN_TIxR_TXRQ BIT(0)
98 
99 /* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */
100 #define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0)
101 
102 /* RX FIFO mailbox identifier register (RIxR, x = 0..1 */
103 #define BXCAN_RIxR_STID_MASK GENMASK(31, 21)
104 #define BXCAN_RIxR_EXID_MASK GENMASK(31, 3)
105 #define BXCAN_RIxR_IDE BIT(2)
106 #define BXCAN_RIxR_RTR BIT(1)
107 
108 /* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */
109 #define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16)
110 #define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0)
111 
112 #define BXCAN_FMR_REG 0x00
113 #define BXCAN_FM1R_REG 0x04
114 #define BXCAN_FS1R_REG 0x0c
115 #define BXCAN_FFA1R_REG 0x14
116 #define BXCAN_FA1R_REG 0x1c
117 #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
118 #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
119 
120 #define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
121 
122 /* Filter primary register (FMR) bits */
123 #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
124 #define BXCAN_FMR_FINIT BIT(0)
125 
126 enum bxcan_lec_code {
127 	BXCAN_LEC_NO_ERROR = 0,
128 	BXCAN_LEC_STUFF_ERROR,
129 	BXCAN_LEC_FORM_ERROR,
130 	BXCAN_LEC_ACK_ERROR,
131 	BXCAN_LEC_BIT1_ERROR,
132 	BXCAN_LEC_BIT0_ERROR,
133 	BXCAN_LEC_CRC_ERROR,
134 	BXCAN_LEC_UNUSED
135 };
136 
137 enum bxcan_cfg {
138 	BXCAN_CFG_SINGLE = 0,
139 	BXCAN_CFG_DUAL_PRIMARY,
140 	BXCAN_CFG_DUAL_SECONDARY
141 };
142 
143 /* Structure of the message buffer */
144 struct bxcan_mb {
145 	u32 id;			/* can identifier */
146 	u32 dlc;		/* data length control and timestamp */
147 	u32 data[2];		/* data */
148 };
149 
150 /* Structure of the hardware registers */
151 struct bxcan_regs {
152 	u32 mcr;			/* 0x00 - primary control */
153 	u32 msr;			/* 0x04 - primary status */
154 	u32 tsr;			/* 0x08 - transmit status */
155 	u32 rf0r;			/* 0x0c - FIFO 0 */
156 	u32 rf1r;			/* 0x10 - FIFO 1 */
157 	u32 ier;			/* 0x14 - interrupt enable */
158 	u32 esr;			/* 0x18 - error status */
159 	u32 btr;			/* 0x1c - bit timing*/
160 	u32 reserved0[88];		/* 0x20 */
161 	struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM];	/* 0x180 - tx mailbox */
162 	struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM];	/* 0x1b0 - rx mailbox */
163 };
164 
165 struct bxcan_priv {
166 	struct can_priv can;
167 	struct can_rx_offload offload;
168 	struct device *dev;
169 	struct net_device *ndev;
170 
171 	struct bxcan_regs __iomem *regs;
172 	struct regmap *gcan;
173 	int tx_irq;
174 	int sce_irq;
175 	enum bxcan_cfg cfg;
176 	struct clk *clk;
177 	spinlock_t rmw_lock;	/* lock for read-modify-write operations */
178 	unsigned int tx_head;
179 	unsigned int tx_tail;
180 	u32 timestamp;
181 };
182 
183 static const struct can_bittiming_const bxcan_bittiming_const = {
184 	.name = KBUILD_MODNAME,
185 	.tseg1_min = 1,
186 	.tseg1_max = 16,
187 	.tseg2_min = 1,
188 	.tseg2_max = 8,
189 	.sjw_max = 4,
190 	.brp_min = 1,
191 	.brp_max = 1024,
192 	.brp_inc = 1,
193 };
194 
bxcan_rmw(struct bxcan_priv * priv,void __iomem * addr,u32 clear,u32 set)195 static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
196 			     u32 clear, u32 set)
197 {
198 	unsigned long flags;
199 	u32 old, val;
200 
201 	spin_lock_irqsave(&priv->rmw_lock, flags);
202 	old = readl(addr);
203 	val = (old & ~clear) | set;
204 	if (val != old)
205 		writel(val, addr);
206 
207 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
208 }
209 
bxcan_disable_filters(struct bxcan_priv * priv,enum bxcan_cfg cfg)210 static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
211 {
212 	unsigned int fid = BXCAN_FILTER_ID(cfg);
213 	u32 fmask = BIT(fid);
214 
215 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
216 }
217 
bxcan_enable_filters(struct bxcan_priv * priv,enum bxcan_cfg cfg)218 static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
219 {
220 	unsigned int fid = BXCAN_FILTER_ID(cfg);
221 	u32 fmask = BIT(fid);
222 
223 	/* Filter settings:
224 	 *
225 	 * Accept all messages.
226 	 * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier
227 	 * mask mode with 32 bits width.
228 	 */
229 
230 	/* Enter filter initialization mode and assing filters to CAN
231 	 * controllers.
232 	 */
233 	regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
234 			   BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT,
235 			   FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) |
236 			   BXCAN_FMR_FINIT);
237 
238 	/* Deactivate filter */
239 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
240 
241 	/* Two 32-bit registers in identifier mask mode */
242 	regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0);
243 
244 	/* Single 32-bit scale configuration */
245 	regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask);
246 
247 	/* Assign filter to FIFO 0 */
248 	regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0);
249 
250 	/* Accept all messages */
251 	regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0);
252 	regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0);
253 
254 	/* Activate filter */
255 	regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask);
256 
257 	/* Exit filter initialization mode */
258 	regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0);
259 }
260 
bxcan_get_tx_head(const struct bxcan_priv * priv)261 static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv)
262 {
263 	return priv->tx_head % BXCAN_TX_MB_NUM;
264 }
265 
bxcan_get_tx_tail(const struct bxcan_priv * priv)266 static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv)
267 {
268 	return priv->tx_tail % BXCAN_TX_MB_NUM;
269 }
270 
bxcan_get_tx_free(const struct bxcan_priv * priv)271 static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv)
272 {
273 	return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail);
274 }
275 
bxcan_tx_busy(const struct bxcan_priv * priv)276 static bool bxcan_tx_busy(const struct bxcan_priv *priv)
277 {
278 	if (bxcan_get_tx_free(priv) > 0)
279 		return false;
280 
281 	netif_stop_queue(priv->ndev);
282 
283 	/* Memory barrier before checking tx_free (head and tail) */
284 	smp_mb();
285 
286 	if (bxcan_get_tx_free(priv) == 0) {
287 		netdev_dbg(priv->ndev,
288 			   "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
289 			   priv->tx_head, priv->tx_tail,
290 			   priv->tx_head - priv->tx_tail);
291 
292 		return true;
293 	}
294 
295 	netif_start_queue(priv->ndev);
296 
297 	return false;
298 }
299 
bxcan_chip_softreset(struct bxcan_priv * priv)300 static int bxcan_chip_softreset(struct bxcan_priv *priv)
301 {
302 	struct bxcan_regs __iomem *regs = priv->regs;
303 	u32 value;
304 
305 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_RESET);
306 	return readx_poll_timeout(readl, &regs->msr, value,
307 				  value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
308 				  USEC_PER_SEC);
309 }
310 
bxcan_enter_init_mode(struct bxcan_priv * priv)311 static int bxcan_enter_init_mode(struct bxcan_priv *priv)
312 {
313 	struct bxcan_regs __iomem *regs = priv->regs;
314 	u32 value;
315 
316 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_INRQ);
317 	return readx_poll_timeout(readl, &regs->msr, value,
318 				  value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US,
319 				  USEC_PER_SEC);
320 }
321 
bxcan_leave_init_mode(struct bxcan_priv * priv)322 static int bxcan_leave_init_mode(struct bxcan_priv *priv)
323 {
324 	struct bxcan_regs __iomem *regs = priv->regs;
325 	u32 value;
326 
327 	bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_INRQ, 0);
328 	return readx_poll_timeout(readl, &regs->msr, value,
329 				  !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US,
330 				  USEC_PER_SEC);
331 }
332 
bxcan_enter_sleep_mode(struct bxcan_priv * priv)333 static int bxcan_enter_sleep_mode(struct bxcan_priv *priv)
334 {
335 	struct bxcan_regs __iomem *regs = priv->regs;
336 	u32 value;
337 
338 	bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_SLEEP);
339 	return readx_poll_timeout(readl, &regs->msr, value,
340 				  value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
341 				  USEC_PER_SEC);
342 }
343 
bxcan_leave_sleep_mode(struct bxcan_priv * priv)344 static int bxcan_leave_sleep_mode(struct bxcan_priv *priv)
345 {
346 	struct bxcan_regs __iomem *regs = priv->regs;
347 	u32 value;
348 
349 	bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_SLEEP, 0);
350 	return readx_poll_timeout(readl, &regs->msr, value,
351 				  !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US,
352 				  USEC_PER_SEC);
353 }
354 
355 static inline
rx_offload_to_priv(struct can_rx_offload * offload)356 struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
357 {
358 	return container_of(offload, struct bxcan_priv, offload);
359 }
360 
bxcan_mailbox_read(struct can_rx_offload * offload,unsigned int mbxno,u32 * timestamp,bool drop)361 static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload,
362 					  unsigned int mbxno, u32 *timestamp,
363 					  bool drop)
364 {
365 	struct bxcan_priv *priv = rx_offload_to_priv(offload);
366 	struct bxcan_regs __iomem *regs = priv->regs;
367 	struct bxcan_mb __iomem *mb_regs = &regs->rx_mb[0];
368 	struct sk_buff *skb = NULL;
369 	struct can_frame *cf;
370 	u32 rf0r, id, dlc;
371 
372 	rf0r = readl(&regs->rf0r);
373 	if (unlikely(drop)) {
374 		skb = ERR_PTR(-ENOBUFS);
375 		goto mark_as_read;
376 	}
377 
378 	if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
379 		goto mark_as_read;
380 
381 	skb = alloc_can_skb(offload->dev, &cf);
382 	if (unlikely(!skb)) {
383 		skb = ERR_PTR(-ENOMEM);
384 		goto mark_as_read;
385 	}
386 
387 	id = readl(&mb_regs->id);
388 	if (id & BXCAN_RIxR_IDE)
389 		cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG;
390 	else
391 		cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK;
392 
393 	dlc = readl(&mb_regs->dlc);
394 	priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc);
395 	cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc));
396 
397 	if (id & BXCAN_RIxR_RTR) {
398 		cf->can_id |= CAN_RTR_FLAG;
399 	} else {
400 		int i, j;
401 
402 		for (i = 0, j = 0; i < cf->len; i += 4, j++)
403 			*(u32 *)(cf->data + i) = readl(&mb_regs->data[j]);
404 	}
405 
406  mark_as_read:
407 	rf0r |= BXCAN_RF0R_RFOM0;
408 	writel(rf0r, &regs->rf0r);
409 	return skb;
410 }
411 
bxcan_rx_isr(int irq,void * dev_id)412 static irqreturn_t bxcan_rx_isr(int irq, void *dev_id)
413 {
414 	struct net_device *ndev = dev_id;
415 	struct bxcan_priv *priv = netdev_priv(ndev);
416 	struct bxcan_regs __iomem *regs = priv->regs;
417 	u32 rf0r;
418 
419 	rf0r = readl(&regs->rf0r);
420 	if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
421 		return IRQ_NONE;
422 
423 	can_rx_offload_irq_offload_fifo(&priv->offload);
424 	can_rx_offload_irq_finish(&priv->offload);
425 
426 	return IRQ_HANDLED;
427 }
428 
bxcan_tx_isr(int irq,void * dev_id)429 static irqreturn_t bxcan_tx_isr(int irq, void *dev_id)
430 {
431 	struct net_device *ndev = dev_id;
432 	struct bxcan_priv *priv = netdev_priv(ndev);
433 	struct bxcan_regs __iomem *regs = priv->regs;
434 	struct net_device_stats *stats = &ndev->stats;
435 	u32 tsr, rqcp_bit;
436 	int idx;
437 
438 	tsr = readl(&regs->tsr);
439 	if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2)))
440 		return IRQ_NONE;
441 
442 	while (priv->tx_head - priv->tx_tail > 0) {
443 		idx = bxcan_get_tx_tail(priv);
444 		rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3);
445 		if (!(tsr & rqcp_bit))
446 			break;
447 
448 		stats->tx_packets++;
449 		stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL);
450 		priv->tx_tail++;
451 	}
452 
453 	writel(tsr, &regs->tsr);
454 
455 	if (bxcan_get_tx_free(priv)) {
456 		/* Make sure that anybody stopping the queue after
457 		 * this sees the new tx_ring->tail.
458 		 */
459 		smp_mb();
460 		netif_wake_queue(ndev);
461 	}
462 
463 	return IRQ_HANDLED;
464 }
465 
bxcan_handle_state_change(struct net_device * ndev,u32 esr)466 static void bxcan_handle_state_change(struct net_device *ndev, u32 esr)
467 {
468 	struct bxcan_priv *priv = netdev_priv(ndev);
469 	enum can_state new_state = priv->can.state;
470 	struct can_berr_counter bec;
471 	enum can_state rx_state, tx_state;
472 	struct sk_buff *skb;
473 	struct can_frame *cf;
474 
475 	/* Early exit if no error flag is set */
476 	if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF)))
477 		return;
478 
479 	bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
480 	bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
481 
482 	if (esr & BXCAN_ESR_BOFF)
483 		new_state = CAN_STATE_BUS_OFF;
484 	else if (esr & BXCAN_ESR_EPVF)
485 		new_state = CAN_STATE_ERROR_PASSIVE;
486 	else if (esr & BXCAN_ESR_EWGF)
487 		new_state = CAN_STATE_ERROR_WARNING;
488 
489 	/* state hasn't changed */
490 	if (unlikely(new_state == priv->can.state))
491 		return;
492 
493 	skb = alloc_can_err_skb(ndev, &cf);
494 
495 	tx_state = bec.txerr >= bec.rxerr ? new_state : 0;
496 	rx_state = bec.txerr <= bec.rxerr ? new_state : 0;
497 	can_change_state(ndev, cf, tx_state, rx_state);
498 
499 	if (new_state == CAN_STATE_BUS_OFF) {
500 		can_bus_off(ndev);
501 	} else if (skb) {
502 		cf->can_id |= CAN_ERR_CNT;
503 		cf->data[6] = bec.txerr;
504 		cf->data[7] = bec.rxerr;
505 	}
506 
507 	if (skb) {
508 		int err;
509 
510 		err = can_rx_offload_queue_timestamp(&priv->offload, skb,
511 						     priv->timestamp);
512 		if (err)
513 			ndev->stats.rx_fifo_errors++;
514 	}
515 }
516 
bxcan_handle_bus_err(struct net_device * ndev,u32 esr)517 static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr)
518 {
519 	struct bxcan_priv *priv = netdev_priv(ndev);
520 	enum bxcan_lec_code lec_code;
521 	struct can_frame *cf;
522 	struct sk_buff *skb;
523 
524 	lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr);
525 
526 	/* Early exit if no lec update or no error.
527 	 * No lec update means that no CAN bus event has been detected
528 	 * since CPU wrote BXCAN_LEC_UNUSED value to status reg.
529 	 */
530 	if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR)
531 		return;
532 
533 	/* Common for all type of bus errors */
534 	priv->can.can_stats.bus_error++;
535 
536 	/* Propagate the error condition to the CAN stack */
537 	skb = alloc_can_err_skb(ndev, &cf);
538 	if (skb)
539 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
540 
541 	switch (lec_code) {
542 	case BXCAN_LEC_STUFF_ERROR:
543 		netdev_dbg(ndev, "Stuff error\n");
544 		ndev->stats.rx_errors++;
545 		if (skb)
546 			cf->data[2] |= CAN_ERR_PROT_STUFF;
547 		break;
548 
549 	case BXCAN_LEC_FORM_ERROR:
550 		netdev_dbg(ndev, "Form error\n");
551 		ndev->stats.rx_errors++;
552 		if (skb)
553 			cf->data[2] |= CAN_ERR_PROT_FORM;
554 		break;
555 
556 	case BXCAN_LEC_ACK_ERROR:
557 		netdev_dbg(ndev, "Ack error\n");
558 		ndev->stats.tx_errors++;
559 		if (skb) {
560 			cf->can_id |= CAN_ERR_ACK;
561 			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
562 		}
563 		break;
564 
565 	case BXCAN_LEC_BIT1_ERROR:
566 		netdev_dbg(ndev, "Bit error (recessive)\n");
567 		ndev->stats.tx_errors++;
568 		if (skb)
569 			cf->data[2] |= CAN_ERR_PROT_BIT1;
570 		break;
571 
572 	case BXCAN_LEC_BIT0_ERROR:
573 		netdev_dbg(ndev, "Bit error (dominant)\n");
574 		ndev->stats.tx_errors++;
575 		if (skb)
576 			cf->data[2] |= CAN_ERR_PROT_BIT0;
577 		break;
578 
579 	case BXCAN_LEC_CRC_ERROR:
580 		netdev_dbg(ndev, "CRC error\n");
581 		ndev->stats.rx_errors++;
582 		if (skb) {
583 			cf->data[2] |= CAN_ERR_PROT_BIT;
584 			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
585 		}
586 		break;
587 
588 	default:
589 		break;
590 	}
591 
592 	if (skb) {
593 		int err;
594 
595 		err = can_rx_offload_queue_timestamp(&priv->offload, skb,
596 						     priv->timestamp);
597 		if (err)
598 			ndev->stats.rx_fifo_errors++;
599 	}
600 }
601 
bxcan_state_change_isr(int irq,void * dev_id)602 static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id)
603 {
604 	struct net_device *ndev = dev_id;
605 	struct bxcan_priv *priv = netdev_priv(ndev);
606 	struct bxcan_regs __iomem *regs = priv->regs;
607 	u32 msr, esr;
608 
609 	msr = readl(&regs->msr);
610 	if (!(msr & BXCAN_MSR_ERRI))
611 		return IRQ_NONE;
612 
613 	esr = readl(&regs->esr);
614 	bxcan_handle_state_change(ndev, esr);
615 
616 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
617 		bxcan_handle_bus_err(ndev, esr);
618 
619 	msr |= BXCAN_MSR_ERRI;
620 	writel(msr, &regs->msr);
621 	can_rx_offload_irq_finish(&priv->offload);
622 
623 	return IRQ_HANDLED;
624 }
625 
bxcan_chip_start(struct net_device * ndev)626 static int bxcan_chip_start(struct net_device *ndev)
627 {
628 	struct bxcan_priv *priv = netdev_priv(ndev);
629 	struct bxcan_regs __iomem *regs = priv->regs;
630 	struct can_bittiming *bt = &priv->can.bittiming;
631 	u32 clr, set;
632 	int err;
633 
634 	err = bxcan_chip_softreset(priv);
635 	if (err) {
636 		netdev_err(ndev, "failed to reset chip, error %pe\n",
637 			   ERR_PTR(err));
638 		return err;
639 	}
640 
641 	err = bxcan_leave_sleep_mode(priv);
642 	if (err) {
643 		netdev_err(ndev, "failed to leave sleep mode, error %pe\n",
644 			   ERR_PTR(err));
645 		goto failed_leave_sleep;
646 	}
647 
648 	err = bxcan_enter_init_mode(priv);
649 	if (err) {
650 		netdev_err(ndev, "failed to enter init mode, error %pe\n",
651 			   ERR_PTR(err));
652 		goto failed_enter_init;
653 	}
654 
655 	/* MCR
656 	 *
657 	 * select request order priority
658 	 * enable time triggered mode
659 	 * bus-off state left on sw request
660 	 * sleep mode left on sw request
661 	 * retransmit automatically on error
662 	 * do not lock RX FIFO on overrun
663 	 */
664 	bxcan_rmw(priv, &regs->mcr,
665 		  BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART |
666 		  BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP);
667 
668 	/* Bit timing register settings */
669 	set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) |
670 		FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 +
671 			   bt->prop_seg - 1) |
672 		FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) |
673 		FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1);
674 
675 	/* loopback + silent mode put the controller in test mode,
676 	 * useful for hot self-test
677 	 */
678 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
679 		set |= BXCAN_BTR_LBKM;
680 
681 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
682 		set |= BXCAN_BTR_SILM;
683 
684 	bxcan_rmw(priv, &regs->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM |
685 		  BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
686 		  BXCAN_BTR_SJW_MASK, set);
687 
688 	bxcan_enable_filters(priv, priv->cfg);
689 
690 	/* Clear all internal status */
691 	priv->tx_head = 0;
692 	priv->tx_tail = 0;
693 
694 	err = bxcan_leave_init_mode(priv);
695 	if (err) {
696 		netdev_err(ndev, "failed to leave init mode, error %pe\n",
697 			   ERR_PTR(err));
698 		goto failed_leave_init;
699 	}
700 
701 	/* Set a `lec` value so that we can check for updates later */
702 	bxcan_rmw(priv, &regs->esr, BXCAN_ESR_LEC_MASK,
703 		  FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED));
704 
705 	/* IER
706 	 *
707 	 * Enable interrupt for:
708 	 * bus-off
709 	 * passive error
710 	 * warning error
711 	 * last error code
712 	 * RX FIFO pending message
713 	 * TX mailbox empty
714 	 */
715 	clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE |  BXCAN_IER_FOVIE1 |
716 		BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
717 		BXCAN_IER_FFIE0;
718 	set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE |
719 		BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE;
720 
721 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
722 		set |= BXCAN_IER_LECIE;
723 	else
724 		clr |= BXCAN_IER_LECIE;
725 
726 	bxcan_rmw(priv, &regs->ier, clr, set);
727 
728 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
729 	return 0;
730 
731 failed_leave_init:
732 failed_enter_init:
733 failed_leave_sleep:
734 	bxcan_chip_softreset(priv);
735 	return err;
736 }
737 
bxcan_open(struct net_device * ndev)738 static int bxcan_open(struct net_device *ndev)
739 {
740 	struct bxcan_priv *priv = netdev_priv(ndev);
741 	int err;
742 
743 	err = clk_prepare_enable(priv->clk);
744 	if (err) {
745 		netdev_err(ndev, "failed to enable clock, error %pe\n",
746 			   ERR_PTR(err));
747 		return err;
748 	}
749 
750 	err = open_candev(ndev);
751 	if (err) {
752 		netdev_err(ndev, "open_candev() failed, error %pe\n",
753 			   ERR_PTR(err));
754 		goto out_disable_clock;
755 	}
756 
757 	can_rx_offload_enable(&priv->offload);
758 	err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name,
759 			  ndev);
760 	if (err) {
761 		netdev_err(ndev, "failed to register rx irq(%d), error %pe\n",
762 			   ndev->irq, ERR_PTR(err));
763 		goto out_close_candev;
764 	}
765 
766 	err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name,
767 			  ndev);
768 	if (err) {
769 		netdev_err(ndev, "failed to register tx irq(%d), error %pe\n",
770 			   priv->tx_irq, ERR_PTR(err));
771 		goto out_free_rx_irq;
772 	}
773 
774 	err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED,
775 			  ndev->name, ndev);
776 	if (err) {
777 		netdev_err(ndev, "failed to register sce irq(%d), error %pe\n",
778 			   priv->sce_irq, ERR_PTR(err));
779 		goto out_free_tx_irq;
780 	}
781 
782 	err = bxcan_chip_start(ndev);
783 	if (err)
784 		goto out_free_sce_irq;
785 
786 	netif_start_queue(ndev);
787 	return 0;
788 
789 out_free_sce_irq:
790 	free_irq(priv->sce_irq, ndev);
791 out_free_tx_irq:
792 	free_irq(priv->tx_irq, ndev);
793 out_free_rx_irq:
794 	free_irq(ndev->irq, ndev);
795 out_close_candev:
796 	can_rx_offload_disable(&priv->offload);
797 	close_candev(ndev);
798 out_disable_clock:
799 	clk_disable_unprepare(priv->clk);
800 	return err;
801 }
802 
bxcan_chip_stop(struct net_device * ndev)803 static void bxcan_chip_stop(struct net_device *ndev)
804 {
805 	struct bxcan_priv *priv = netdev_priv(ndev);
806 	struct bxcan_regs __iomem *regs = priv->regs;
807 
808 	/* disable all interrupts */
809 	bxcan_rmw(priv, &regs->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE |
810 		  BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE |
811 		  BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
812 		  BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
813 		  BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
814 	bxcan_disable_filters(priv, priv->cfg);
815 	bxcan_enter_sleep_mode(priv);
816 	priv->can.state = CAN_STATE_STOPPED;
817 }
818 
bxcan_stop(struct net_device * ndev)819 static int bxcan_stop(struct net_device *ndev)
820 {
821 	struct bxcan_priv *priv = netdev_priv(ndev);
822 
823 	netif_stop_queue(ndev);
824 	bxcan_chip_stop(ndev);
825 	free_irq(ndev->irq, ndev);
826 	free_irq(priv->tx_irq, ndev);
827 	free_irq(priv->sce_irq, ndev);
828 	can_rx_offload_disable(&priv->offload);
829 	close_candev(ndev);
830 	clk_disable_unprepare(priv->clk);
831 	return 0;
832 }
833 
bxcan_start_xmit(struct sk_buff * skb,struct net_device * ndev)834 static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
835 				    struct net_device *ndev)
836 {
837 	struct bxcan_priv *priv = netdev_priv(ndev);
838 	struct can_frame *cf = (struct can_frame *)skb->data;
839 	struct bxcan_regs __iomem *regs = priv->regs;
840 	struct bxcan_mb __iomem *mb_regs;
841 	unsigned int idx;
842 	u32 id;
843 	int i, j;
844 
845 	if (can_dropped_invalid_skb(ndev, skb))
846 		return NETDEV_TX_OK;
847 
848 	if (bxcan_tx_busy(priv))
849 		return NETDEV_TX_BUSY;
850 
851 	idx = bxcan_get_tx_head(priv);
852 	priv->tx_head++;
853 	if (bxcan_get_tx_free(priv) == 0)
854 		netif_stop_queue(ndev);
855 
856 	mb_regs = &regs->tx_mb[idx];
857 	if (cf->can_id & CAN_EFF_FLAG)
858 		id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) |
859 			BXCAN_TIxR_IDE;
860 	else
861 		id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id);
862 
863 	if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
864 		id |= BXCAN_TIxR_RTR;
865 	} else {
866 		for (i = 0, j = 0; i < cf->len; i += 4, j++)
867 			writel(*(u32 *)(cf->data + i), &mb_regs->data[j]);
868 	}
869 
870 	writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc);
871 
872 	can_put_echo_skb(skb, ndev, idx, 0);
873 
874 	/* Start transmission */
875 	writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id);
876 
877 	return NETDEV_TX_OK;
878 }
879 
880 static const struct net_device_ops bxcan_netdev_ops = {
881 	.ndo_open = bxcan_open,
882 	.ndo_stop = bxcan_stop,
883 	.ndo_start_xmit = bxcan_start_xmit,
884 	.ndo_change_mtu = can_change_mtu,
885 };
886 
887 static const struct ethtool_ops bxcan_ethtool_ops = {
888 	.get_ts_info = ethtool_op_get_ts_info,
889 };
890 
bxcan_do_set_mode(struct net_device * ndev,enum can_mode mode)891 static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
892 {
893 	int err;
894 
895 	switch (mode) {
896 	case CAN_MODE_START:
897 		err = bxcan_chip_start(ndev);
898 		if (err)
899 			return err;
900 
901 		netif_wake_queue(ndev);
902 		break;
903 
904 	default:
905 		return -EOPNOTSUPP;
906 	}
907 
908 	return 0;
909 }
910 
bxcan_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)911 static int bxcan_get_berr_counter(const struct net_device *ndev,
912 				  struct can_berr_counter *bec)
913 {
914 	struct bxcan_priv *priv = netdev_priv(ndev);
915 	struct bxcan_regs __iomem *regs = priv->regs;
916 	u32 esr;
917 	int err;
918 
919 	err = clk_prepare_enable(priv->clk);
920 	if (err)
921 		return err;
922 
923 	esr = readl(&regs->esr);
924 	bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
925 	bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
926 	clk_disable_unprepare(priv->clk);
927 	return 0;
928 }
929 
bxcan_probe(struct platform_device * pdev)930 static int bxcan_probe(struct platform_device *pdev)
931 {
932 	struct device_node *np = pdev->dev.of_node;
933 	struct device *dev = &pdev->dev;
934 	struct net_device *ndev;
935 	struct bxcan_priv *priv;
936 	struct clk *clk = NULL;
937 	void __iomem *regs;
938 	struct regmap *gcan;
939 	enum bxcan_cfg cfg;
940 	int err, rx_irq, tx_irq, sce_irq;
941 
942 	regs = devm_platform_ioremap_resource(pdev, 0);
943 	if (IS_ERR(regs)) {
944 		dev_err(dev, "failed to get base address\n");
945 		return PTR_ERR(regs);
946 	}
947 
948 	gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan");
949 	if (IS_ERR(gcan)) {
950 		dev_err(dev, "failed to get shared memory base address\n");
951 		return PTR_ERR(gcan);
952 	}
953 
954 	if (of_property_read_bool(np, "st,can-primary"))
955 		cfg = BXCAN_CFG_DUAL_PRIMARY;
956 	else if (of_property_read_bool(np, "st,can-secondary"))
957 		cfg = BXCAN_CFG_DUAL_SECONDARY;
958 	else
959 		cfg = BXCAN_CFG_SINGLE;
960 
961 	clk = devm_clk_get(dev, NULL);
962 	if (IS_ERR(clk)) {
963 		dev_err(dev, "failed to get clock\n");
964 		return PTR_ERR(clk);
965 	}
966 
967 	rx_irq = platform_get_irq_byname(pdev, "rx0");
968 	if (rx_irq < 0)
969 		return rx_irq;
970 
971 	tx_irq = platform_get_irq_byname(pdev, "tx");
972 	if (tx_irq < 0)
973 		return tx_irq;
974 
975 	sce_irq = platform_get_irq_byname(pdev, "sce");
976 	if (sce_irq < 0)
977 		return sce_irq;
978 
979 	ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM);
980 	if (!ndev) {
981 		dev_err(dev, "alloc_candev() failed\n");
982 		return -ENOMEM;
983 	}
984 
985 	priv = netdev_priv(ndev);
986 	platform_set_drvdata(pdev, ndev);
987 	SET_NETDEV_DEV(ndev, dev);
988 	ndev->netdev_ops = &bxcan_netdev_ops;
989 	ndev->ethtool_ops = &bxcan_ethtool_ops;
990 	ndev->irq = rx_irq;
991 	ndev->flags |= IFF_ECHO;
992 
993 	priv->dev = dev;
994 	priv->ndev = ndev;
995 	priv->regs = regs;
996 	priv->gcan = gcan;
997 	priv->clk = clk;
998 	priv->tx_irq = tx_irq;
999 	priv->sce_irq = sce_irq;
1000 	priv->cfg = cfg;
1001 	priv->can.clock.freq = clk_get_rate(clk);
1002 	spin_lock_init(&priv->rmw_lock);
1003 	priv->tx_head = 0;
1004 	priv->tx_tail = 0;
1005 	priv->can.bittiming_const = &bxcan_bittiming_const;
1006 	priv->can.do_set_mode = bxcan_do_set_mode;
1007 	priv->can.do_get_berr_counter = bxcan_get_berr_counter;
1008 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1009 		CAN_CTRLMODE_LISTENONLY	| CAN_CTRLMODE_BERR_REPORTING;
1010 
1011 	priv->offload.mailbox_read = bxcan_mailbox_read;
1012 	err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT);
1013 	if (err) {
1014 		dev_err(dev, "failed to add FIFO rx_offload\n");
1015 		goto out_free_candev;
1016 	}
1017 
1018 	err = register_candev(ndev);
1019 	if (err) {
1020 		dev_err(dev, "failed to register netdev\n");
1021 		goto out_can_rx_offload_del;
1022 	}
1023 
1024 	dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq,
1025 		 tx_irq, rx_irq, sce_irq);
1026 	return 0;
1027 
1028 out_can_rx_offload_del:
1029 	can_rx_offload_del(&priv->offload);
1030 out_free_candev:
1031 	free_candev(ndev);
1032 	return err;
1033 }
1034 
bxcan_remove(struct platform_device * pdev)1035 static void bxcan_remove(struct platform_device *pdev)
1036 {
1037 	struct net_device *ndev = platform_get_drvdata(pdev);
1038 	struct bxcan_priv *priv = netdev_priv(ndev);
1039 
1040 	unregister_candev(ndev);
1041 	clk_disable_unprepare(priv->clk);
1042 	can_rx_offload_del(&priv->offload);
1043 	free_candev(ndev);
1044 }
1045 
bxcan_suspend(struct device * dev)1046 static int __maybe_unused bxcan_suspend(struct device *dev)
1047 {
1048 	struct net_device *ndev = dev_get_drvdata(dev);
1049 	struct bxcan_priv *priv = netdev_priv(ndev);
1050 
1051 	if (!netif_running(ndev))
1052 		return 0;
1053 
1054 	netif_stop_queue(ndev);
1055 	netif_device_detach(ndev);
1056 
1057 	bxcan_enter_sleep_mode(priv);
1058 	priv->can.state = CAN_STATE_SLEEPING;
1059 	clk_disable_unprepare(priv->clk);
1060 	return 0;
1061 }
1062 
bxcan_resume(struct device * dev)1063 static int __maybe_unused bxcan_resume(struct device *dev)
1064 {
1065 	struct net_device *ndev = dev_get_drvdata(dev);
1066 	struct bxcan_priv *priv = netdev_priv(ndev);
1067 
1068 	if (!netif_running(ndev))
1069 		return 0;
1070 
1071 	clk_prepare_enable(priv->clk);
1072 	bxcan_leave_sleep_mode(priv);
1073 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1074 
1075 	netif_device_attach(ndev);
1076 	netif_start_queue(ndev);
1077 	return 0;
1078 }
1079 
1080 static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume);
1081 
1082 static const struct of_device_id bxcan_of_match[] = {
1083 	{.compatible = "st,stm32f4-bxcan"},
1084 	{ /* sentinel */ },
1085 };
1086 MODULE_DEVICE_TABLE(of, bxcan_of_match);
1087 
1088 static struct platform_driver bxcan_driver = {
1089 	.driver = {
1090 		.name = KBUILD_MODNAME,
1091 		.pm = &bxcan_pm_ops,
1092 		.of_match_table = bxcan_of_match,
1093 	},
1094 	.probe = bxcan_probe,
1095 	.remove = bxcan_remove,
1096 };
1097 
1098 module_platform_driver(bxcan_driver);
1099 
1100 MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
1101 MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver");
1102 MODULE_LICENSE("GPL");
1103