xref: /linux/drivers/net/can/esd/esdacc.c (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
3  * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
4  */
5 
6 #include "esdacc.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/ktime.h>
12 
13 /* esdACC ID register layout */
14 #define ACC_ID_ID_MASK GENMASK(28, 0)
15 #define ACC_ID_EFF_FLAG BIT(29)
16 
17 /* esdACC DLC register layout */
18 #define ACC_DLC_DLC_MASK GENMASK(3, 0)
19 #define ACC_DLC_RTR_FLAG BIT(4)
20 #define ACC_DLC_TXD_FLAG BIT(5)
21 
22 /* ecc value of esdACC equals SJA1000's ECC register */
23 #define ACC_ECC_SEG 0x1f
24 #define ACC_ECC_DIR 0x20
25 #define ACC_ECC_BIT 0x00
26 #define ACC_ECC_FORM 0x40
27 #define ACC_ECC_STUFF 0x80
28 #define ACC_ECC_MASK 0xc0
29 
30 /* esdACC Status Register bits. Unused bits not documented. */
31 #define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
32 #define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
33 #define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
34 
35 /* esdACC Overview Module BM_IRQ_Mask register related defines */
36 /*   Two bit wide command masks to mask or unmask a single core IRQ */
37 #define ACC_BM_IRQ_UNMASK BIT(0)
38 #define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
39 /*   Command to unmask all IRQ sources. Created by shifting
40  *   and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
41  */
42 #define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
43 
44 static void acc_resetmode_enter(struct acc_core *core)
45 {
46 	acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
47 		     ACC_REG_CONTROL_MASK_MODE_RESETMODE);
48 
49 	/* Read back reset mode bit to flush PCI write posting */
50 	acc_resetmode_entered(core);
51 }
52 
53 static void acc_resetmode_leave(struct acc_core *core)
54 {
55 	acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
56 		       ACC_REG_CONTROL_MASK_MODE_RESETMODE);
57 
58 	/* Read back reset mode bit to flush PCI write posting */
59 	acc_resetmode_entered(core);
60 }
61 
62 static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
63 			const void *data)
64 {
65 	acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
66 			   *((const u32 *)(data + 4)));
67 	acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
68 			   *((const u32 *)data));
69 	acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
70 	/* CAN id must be written at last. This write starts TX. */
71 	acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
72 }
73 
74 static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
75 {
76 	++tx_fifo_idx;
77 	if (tx_fifo_idx >= core->tx_fifo_size)
78 		tx_fifo_idx = 0U;
79 	return tx_fifo_idx;
80 }
81 
82 /* Convert timestamp from esdACC time stamp ticks to ns
83  *
84  * The conversion factor ts2ns from time stamp counts to ns is basically
85  *	ts2ns = NSEC_PER_SEC / timestamp_frequency
86  *
87  * We handle here only a fixed timestamp frequency of 80MHz. The
88  * resulting ts2ns factor would be 12.5.
89  *
90  * At the end we multiply by 12 and add the half of the HW timestamp
91  * to get a multiplication by 12.5. This way any overflow is
92  * avoided until ktime_t itself overflows.
93  */
94 #define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
95 #define ACC_TS_80MHZ_SHIFT 1
96 
97 static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
98 {
99 	u64 ns;
100 
101 	ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
102 
103 	return ns_to_ktime(ns);
104 }
105 
106 #undef ACC_TS_FACTOR
107 #undef ACC_TS_80MHZ_SHIFT
108 
109 void acc_init_ov(struct acc_ov *ov, struct device *dev)
110 {
111 	u32 temp;
112 
113 	temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
114 	ov->version = temp;
115 	ov->features = (temp >> 16);
116 
117 	temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
118 	ov->total_cores = temp;
119 	ov->active_cores = (temp >> 8);
120 
121 	ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
122 	ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
123 
124 	/* Depending on esdACC feature NEW_PSC enable the new prescaler
125 	 * or adjust core_frequency according to the implicit division by 2.
126 	 */
127 	if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
128 		acc_ov_set_bits(ov, ACC_OV_OF_MODE,
129 				ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
130 	} else {
131 		ov->core_frequency /= 2;
132 	}
133 
134 	dev_dbg(dev,
135 		"esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
136 		ov->version, ov->core_frequency, ov->timestamp_frequency,
137 		ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
138 		ov->active_cores, ov->total_cores);
139 }
140 
141 void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
142 {
143 	unsigned int u;
144 
145 	/* DMA buffer layout as follows where N is the number of CAN cores
146 	 * implemented in the FPGA, i.e. N = ov->total_cores
147 	 *
148 	 *  Section Layout           Section size
149 	 * ----------------------------------------------
150 	 *  FIFO Card/Overview	     ACC_CORE_DMABUF_SIZE
151 	 *  FIFO Core0               ACC_CORE_DMABUF_SIZE
152 	 *  ...                      ...
153 	 *  FIFO CoreN               ACC_CORE_DMABUF_SIZE
154 	 *  irq_cnt Card/Overview    sizeof(u32)
155 	 *  irq_cnt Core0            sizeof(u32)
156 	 *  ...                      ...
157 	 *  irq_cnt CoreN            sizeof(u32)
158 	 */
159 	ov->bmfifo.messages = mem;
160 	ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
161 
162 	for (u = 0U; u < ov->active_cores; u++) {
163 		struct acc_core *core = &cores[u];
164 
165 		core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
166 		core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
167 	}
168 }
169 
170 int acc_open(struct net_device *netdev)
171 {
172 	struct acc_net_priv *priv = netdev_priv(netdev);
173 	struct acc_core *core = priv->core;
174 	u32 tx_fifo_status;
175 	u32 ctrl_mode;
176 	int err;
177 
178 	/* Retry to enter RESET mode if out of sync. */
179 	if (priv->can.state != CAN_STATE_STOPPED) {
180 		netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
181 			    __func__, can_get_state_str(priv->can.state));
182 		acc_resetmode_enter(core);
183 		priv->can.state = CAN_STATE_STOPPED;
184 	}
185 
186 	err = open_candev(netdev);
187 	if (err)
188 		return err;
189 
190 	ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
191 			ACC_REG_CONTROL_MASK_IE_TXERROR |
192 			ACC_REG_CONTROL_MASK_IE_ERRWARN |
193 			ACC_REG_CONTROL_MASK_IE_OVERRUN |
194 			ACC_REG_CONTROL_MASK_IE_ERRPASS;
195 
196 	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
197 		ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
198 
199 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
200 		ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
201 
202 	acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
203 
204 	acc_resetmode_leave(core);
205 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
206 
207 	/* Resync TX FIFO indices to HW state after (re-)start. */
208 	tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
209 	core->tx_fifo_head = tx_fifo_status & 0xff;
210 	core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
211 
212 	netif_start_queue(netdev);
213 	return 0;
214 }
215 
216 int acc_close(struct net_device *netdev)
217 {
218 	struct acc_net_priv *priv = netdev_priv(netdev);
219 	struct acc_core *core = priv->core;
220 
221 	acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
222 		       ACC_REG_CONTROL_MASK_IE_RXTX |
223 		       ACC_REG_CONTROL_MASK_IE_TXERROR |
224 		       ACC_REG_CONTROL_MASK_IE_ERRWARN |
225 		       ACC_REG_CONTROL_MASK_IE_OVERRUN |
226 		       ACC_REG_CONTROL_MASK_IE_ERRPASS |
227 		       ACC_REG_CONTROL_MASK_IE_BUSERR);
228 
229 	netif_stop_queue(netdev);
230 	acc_resetmode_enter(core);
231 	priv->can.state = CAN_STATE_STOPPED;
232 
233 	/* Mark pending TX requests to be aborted after controller restart. */
234 	acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
235 
236 	/* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
237 	acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
238 		       ACC_REG_CONTROL_MASK_MODE_LOM);
239 
240 	close_candev(netdev);
241 	return 0;
242 }
243 
244 netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
245 {
246 	struct acc_net_priv *priv = netdev_priv(netdev);
247 	struct acc_core *core = priv->core;
248 	struct can_frame *cf = (struct can_frame *)skb->data;
249 	u8 tx_fifo_head = core->tx_fifo_head;
250 	int fifo_usage;
251 	u32 acc_id;
252 	u8 acc_dlc;
253 
254 	if (can_dropped_invalid_skb(netdev, skb))
255 		return NETDEV_TX_OK;
256 
257 	/* Access core->tx_fifo_tail only once because it may be changed
258 	 * from the interrupt level.
259 	 */
260 	fifo_usage = tx_fifo_head - core->tx_fifo_tail;
261 	if (fifo_usage < 0)
262 		fifo_usage += core->tx_fifo_size;
263 
264 	if (fifo_usage >= core->tx_fifo_size - 1) {
265 		netdev_err(core->netdev,
266 			   "BUG: TX ring full when queue awake!\n");
267 		netif_stop_queue(netdev);
268 		return NETDEV_TX_BUSY;
269 	}
270 
271 	if (fifo_usage == core->tx_fifo_size - 2)
272 		netif_stop_queue(netdev);
273 
274 	acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
275 	if (cf->can_id & CAN_RTR_FLAG)
276 		acc_dlc |= ACC_DLC_RTR_FLAG;
277 
278 	if (cf->can_id & CAN_EFF_FLAG) {
279 		acc_id = cf->can_id & CAN_EFF_MASK;
280 		acc_id |= ACC_ID_EFF_FLAG;
281 	} else {
282 		acc_id = cf->can_id & CAN_SFF_MASK;
283 	}
284 
285 	can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
286 
287 	core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
288 
289 	acc_txq_put(core, acc_id, acc_dlc, cf->data);
290 
291 	return NETDEV_TX_OK;
292 }
293 
294 int acc_get_berr_counter(const struct net_device *netdev,
295 			 struct can_berr_counter *bec)
296 {
297 	struct acc_net_priv *priv = netdev_priv(netdev);
298 	u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
299 
300 	bec->txerr = (core_status >> 8) & 0xff;
301 	bec->rxerr = core_status & 0xff;
302 
303 	return 0;
304 }
305 
306 int acc_set_mode(struct net_device *netdev, enum can_mode mode)
307 {
308 	struct acc_net_priv *priv = netdev_priv(netdev);
309 
310 	switch (mode) {
311 	case CAN_MODE_START:
312 		/* Paranoid FIFO index check. */
313 		{
314 			const u32 tx_fifo_status =
315 				acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
316 			const u8 hw_fifo_head = tx_fifo_status;
317 
318 			if (hw_fifo_head != priv->core->tx_fifo_head ||
319 			    hw_fifo_head != priv->core->tx_fifo_tail) {
320 				netdev_warn(netdev,
321 					    "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
322 					    priv->core->tx_fifo_tail,
323 					    priv->core->tx_fifo_head,
324 					    tx_fifo_status);
325 			}
326 		}
327 		acc_resetmode_leave(priv->core);
328 		/* To leave the bus-off state the esdACC controller begins
329 		 * here a grace period where it counts 128 "idle conditions" (each
330 		 * of 11 consecutive recessive bits) on the bus as required
331 		 * by the CAN spec.
332 		 *
333 		 * During this time the TX FIFO may still contain already
334 		 * aborted "zombie" frames that are only drained from the FIFO
335 		 * at the end of the grace period.
336 		 *
337 		 * To not to interfere with this drain process we don't
338 		 * call netif_wake_queue() here. When the controller reaches
339 		 * the error-active state again, it informs us about that
340 		 * with an acc_bmmsg_errstatechange message. Then
341 		 * netif_wake_queue() is called from
342 		 * handle_core_msg_errstatechange() instead.
343 		 */
344 		break;
345 
346 	default:
347 		return -EOPNOTSUPP;
348 	}
349 
350 	return 0;
351 }
352 
353 int acc_set_bittiming(struct net_device *netdev)
354 {
355 	struct acc_net_priv *priv = netdev_priv(netdev);
356 	const struct can_bittiming *bt = &priv->can.bittiming;
357 	u32 brp;
358 	u32 btr;
359 
360 	if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
361 		u32 fbtr = 0;
362 
363 		netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
364 			   bt->brp, bt->prop_seg,
365 			   bt->phase_seg1, bt->phase_seg2, bt->sjw);
366 
367 		brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
368 
369 		btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
370 		btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
371 		btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
372 
373 		/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
374 		acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
375 		acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
376 
377 		netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
378 			   brp, btr, fbtr);
379 	} else {
380 		netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
381 			   bt->brp, bt->prop_seg,
382 			   bt->phase_seg1, bt->phase_seg2, bt->sjw);
383 
384 		brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
385 
386 		btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
387 		btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
388 		btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
389 
390 		/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
391 		acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
392 		acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
393 
394 		netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
395 	}
396 
397 	return 0;
398 }
399 
400 static void handle_core_msg_rxtxdone(struct acc_core *core,
401 				     const struct acc_bmmsg_rxtxdone *msg)
402 {
403 	struct acc_net_priv *priv = netdev_priv(core->netdev);
404 	struct net_device_stats *stats = &core->netdev->stats;
405 	struct sk_buff *skb;
406 
407 	if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
408 		u8 tx_fifo_tail = core->tx_fifo_tail;
409 
410 		if (core->tx_fifo_head == tx_fifo_tail) {
411 			netdev_warn(core->netdev,
412 				    "TX interrupt, but queue is empty!?\n");
413 			return;
414 		}
415 
416 		/* Direct access echo skb to attach HW time stamp. */
417 		skb = priv->can.echo_skb[tx_fifo_tail];
418 		if (skb) {
419 			skb_hwtstamps(skb)->hwtstamp =
420 				acc_ts2ktime(priv->ov, msg->ts);
421 		}
422 
423 		stats->tx_packets++;
424 		stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
425 						    NULL);
426 
427 		core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
428 
429 		netif_wake_queue(core->netdev);
430 
431 	} else {
432 		struct can_frame *cf;
433 
434 		skb = alloc_can_skb(core->netdev, &cf);
435 		if (!skb) {
436 			stats->rx_dropped++;
437 			return;
438 		}
439 
440 		cf->can_id = msg->id & ACC_ID_ID_MASK;
441 		if (msg->id & ACC_ID_EFF_FLAG)
442 			cf->can_id |= CAN_EFF_FLAG;
443 
444 		can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
445 				     priv->can.ctrlmode);
446 
447 		if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
448 			cf->can_id |= CAN_RTR_FLAG;
449 		} else {
450 			memcpy(cf->data, msg->data, cf->len);
451 			stats->rx_bytes += cf->len;
452 		}
453 		stats->rx_packets++;
454 
455 		skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
456 
457 		netif_rx(skb);
458 	}
459 }
460 
461 static void handle_core_msg_txabort(struct acc_core *core,
462 				    const struct acc_bmmsg_txabort *msg)
463 {
464 	struct net_device_stats *stats = &core->netdev->stats;
465 	u8 tx_fifo_tail = core->tx_fifo_tail;
466 	u32 abort_mask = msg->abort_mask;   /* u32 extend to avoid warnings later */
467 
468 	/* The abort_mask shows which frames were aborted in esdACC's FIFO. */
469 	while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
470 		const u32 tail_mask = (1U << tx_fifo_tail);
471 
472 		if (!(abort_mask & tail_mask))
473 			break;
474 		abort_mask &= ~tail_mask;
475 
476 		can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
477 		stats->tx_dropped++;
478 		stats->tx_aborted_errors++;
479 
480 		tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
481 	}
482 	core->tx_fifo_tail = tx_fifo_tail;
483 	if (abort_mask)
484 		netdev_warn(core->netdev, "Unhandled aborted messages\n");
485 
486 	if (!acc_resetmode_entered(core))
487 		netif_wake_queue(core->netdev);
488 }
489 
490 static void handle_core_msg_overrun(struct acc_core *core,
491 				    const struct acc_bmmsg_overrun *msg)
492 {
493 	struct acc_net_priv *priv = netdev_priv(core->netdev);
494 	struct net_device_stats *stats = &core->netdev->stats;
495 	struct can_frame *cf;
496 	struct sk_buff *skb;
497 
498 	/* lost_cnt may be 0 if not supported by esdACC version */
499 	if (msg->lost_cnt) {
500 		stats->rx_errors += msg->lost_cnt;
501 		stats->rx_over_errors += msg->lost_cnt;
502 	} else {
503 		stats->rx_errors++;
504 		stats->rx_over_errors++;
505 	}
506 
507 	skb = alloc_can_err_skb(core->netdev, &cf);
508 	if (!skb)
509 		return;
510 
511 	cf->can_id |= CAN_ERR_CRTL;
512 	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
513 
514 	skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
515 
516 	netif_rx(skb);
517 }
518 
519 static void handle_core_msg_buserr(struct acc_core *core,
520 				   const struct acc_bmmsg_buserr *msg)
521 {
522 	struct acc_net_priv *priv = netdev_priv(core->netdev);
523 	struct net_device_stats *stats = &core->netdev->stats;
524 	struct can_frame *cf;
525 	struct sk_buff *skb;
526 	const u32 reg_status = msg->reg_status;
527 	const u8 rxerr = reg_status;
528 	const u8 txerr = (reg_status >> 8);
529 	u8 can_err_prot_type = 0U;
530 
531 	priv->can.can_stats.bus_error++;
532 
533 	/* Error occurred during transmission? */
534 	if (msg->ecc & ACC_ECC_DIR) {
535 		stats->rx_errors++;
536 	} else {
537 		can_err_prot_type |= CAN_ERR_PROT_TX;
538 		stats->tx_errors++;
539 	}
540 	/* Determine error type */
541 	switch (msg->ecc & ACC_ECC_MASK) {
542 	case ACC_ECC_BIT:
543 		can_err_prot_type |= CAN_ERR_PROT_BIT;
544 		break;
545 	case ACC_ECC_FORM:
546 		can_err_prot_type |= CAN_ERR_PROT_FORM;
547 		break;
548 	case ACC_ECC_STUFF:
549 		can_err_prot_type |= CAN_ERR_PROT_STUFF;
550 		break;
551 	default:
552 		can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
553 		break;
554 	}
555 
556 	skb = alloc_can_err_skb(core->netdev, &cf);
557 	if (!skb)
558 		return;
559 
560 	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
561 
562 	/* Set protocol error type */
563 	cf->data[2] = can_err_prot_type;
564 	/* Set error location */
565 	cf->data[3] = msg->ecc & ACC_ECC_SEG;
566 
567 	/* Insert CAN TX and RX error counters. */
568 	cf->data[6] = txerr;
569 	cf->data[7] = rxerr;
570 
571 	skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
572 
573 	netif_rx(skb);
574 }
575 
576 static void
577 handle_core_msg_errstatechange(struct acc_core *core,
578 			       const struct acc_bmmsg_errstatechange *msg)
579 {
580 	struct acc_net_priv *priv = netdev_priv(core->netdev);
581 	struct can_frame *cf = NULL;
582 	struct sk_buff *skb;
583 	const u32 reg_status = msg->reg_status;
584 	const u8 rxerr = reg_status;
585 	const u8 txerr = (reg_status >> 8);
586 	enum can_state new_state;
587 
588 	if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
589 		new_state = CAN_STATE_BUS_OFF;
590 	} else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
591 		new_state = CAN_STATE_ERROR_PASSIVE;
592 	} else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
593 		new_state = CAN_STATE_ERROR_WARNING;
594 	} else {
595 		new_state = CAN_STATE_ERROR_ACTIVE;
596 		if (priv->can.state == CAN_STATE_BUS_OFF) {
597 			/* See comment in acc_set_mode() for CAN_MODE_START */
598 			netif_wake_queue(core->netdev);
599 		}
600 	}
601 
602 	skb = alloc_can_err_skb(core->netdev, &cf);
603 
604 	if (new_state != priv->can.state) {
605 		enum can_state tx_state, rx_state;
606 
607 		tx_state = (txerr >= rxerr) ?
608 			new_state : CAN_STATE_ERROR_ACTIVE;
609 		rx_state = (rxerr >= txerr) ?
610 			new_state : CAN_STATE_ERROR_ACTIVE;
611 
612 		/* Always call can_change_state() to update the state
613 		 * even if alloc_can_err_skb() may have failed.
614 		 * can_change_state() can cope with a NULL cf pointer.
615 		 */
616 		can_change_state(core->netdev, cf, tx_state, rx_state);
617 	}
618 
619 	if (skb) {
620 		cf->can_id |= CAN_ERR_CNT;
621 		cf->data[6] = txerr;
622 		cf->data[7] = rxerr;
623 
624 		skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
625 
626 		netif_rx(skb);
627 	}
628 
629 	if (new_state == CAN_STATE_BUS_OFF) {
630 		acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
631 		can_bus_off(core->netdev);
632 	}
633 }
634 
635 static void handle_core_interrupt(struct acc_core *core)
636 {
637 	u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
638 
639 	while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
640 		const union acc_bmmsg *msg =
641 			&core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
642 
643 		switch (msg->msg_id) {
644 		case BM_MSG_ID_RXTXDONE:
645 			handle_core_msg_rxtxdone(core, &msg->rxtxdone);
646 			break;
647 
648 		case BM_MSG_ID_TXABORT:
649 			handle_core_msg_txabort(core, &msg->txabort);
650 			break;
651 
652 		case BM_MSG_ID_OVERRUN:
653 			handle_core_msg_overrun(core, &msg->overrun);
654 			break;
655 
656 		case BM_MSG_ID_BUSERR:
657 			handle_core_msg_buserr(core, &msg->buserr);
658 			break;
659 
660 		case BM_MSG_ID_ERRPASSIVE:
661 		case BM_MSG_ID_ERRWARN:
662 			handle_core_msg_errstatechange(core,
663 						       &msg->errstatechange);
664 			break;
665 
666 		default:
667 			/* Ignore all other BM messages (like the CAN-FD messages) */
668 			break;
669 		}
670 
671 		core->bmfifo.msg_fifo_tail =
672 				(core->bmfifo.msg_fifo_tail + 1) & 0xff;
673 	}
674 }
675 
676 /**
677  * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
678  *
679  * @ov: overview module structure
680  * @cores: array of core structures
681  *
682  * This function handles all interrupts pending for the overview module and the
683  * CAN cores of the esdACC FPGA.
684  *
685  * It examines for all cores (the overview module core and the CAN cores)
686  * the bmfifo.irq_cnt and compares it with the previously saved
687  * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
688  * updates the bmfifo.irq_cnt values by DMA.
689  *
690  * The pending interrupts are masked by writing to the IRQ mask register at
691  * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
692  * field evaluated as follows:
693  *
694  * Define,   bit pattern: meaning
695  *                    00: no action
696  * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
697  * ACC_BM_IRQ_MASK,   10: mask interrupt
698  *                    11: no action
699  *
700  * For each CAN core with a pending IRQ handle_core_interrupt() handles all
701  * busmaster messages from the message FIFO. The last handled message (FIFO
702  * index) is written to the CAN core to acknowledge its handling.
703  *
704  * Last step is to unmask all interrupts in the FPGA using
705  * ACC_BM_IRQ_UNMASK_ALL.
706  *
707  * Return:
708  *	IRQ_HANDLED, if card generated an interrupt that was handled
709  *	IRQ_NONE, if the interrupt is not ours
710  */
711 irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
712 {
713 	u32 irqmask;
714 	int i;
715 
716 	/* First we look for whom interrupts are pending, card/overview
717 	 * or any of the cores. Two bits in irqmask are used for each;
718 	 * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
719 	 * pending.
720 	 */
721 	irqmask = 0U;
722 	if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
723 		irqmask |= ACC_BM_IRQ_MASK;
724 		ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
725 	}
726 
727 	for (i = 0; i < ov->active_cores; i++) {
728 		struct acc_core *core = &cores[i];
729 
730 		if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
731 			irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
732 			core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
733 		}
734 	}
735 
736 	if (!irqmask)
737 		return IRQ_NONE;
738 
739 	/* At second we tell the card we're working on them by writing irqmask,
740 	 * call handle_{ov|core}_interrupt and then acknowledge the
741 	 * interrupts by writing irq_cnt:
742 	 */
743 	acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
744 
745 	if (irqmask & ACC_BM_IRQ_MASK) {
746 		/* handle_ov_interrupt(); - no use yet. */
747 		acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
748 			       ov->bmfifo.local_irq_cnt);
749 	}
750 
751 	for (i = 0; i < ov->active_cores; i++) {
752 		struct acc_core *core = &cores[i];
753 
754 		if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
755 			handle_core_interrupt(core);
756 			acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
757 				    core->bmfifo.local_irq_cnt);
758 		}
759 	}
760 
761 	acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
762 
763 	return IRQ_HANDLED;
764 }
765