xref: /linux/drivers/net/can/m_can/m_can.c (revision 82f78acd5a9270370ef4aa3f032ede25f3dc91ee)
1 // SPDX-License-Identifier: GPL-2.0
2 // CAN bus driver for Bosch M_CAN controller
3 // Copyright (C) 2014 Freescale Semiconductor, Inc.
4 //      Dong Aisheng <aisheng.dong@nxp.com>
5 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
6 
7 /* Bosch M_CAN user manual can be obtained from:
8  * https://github.com/linux-can/can-doc/tree/master/m_can
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/can/dev.h>
13 #include <linux/ethtool.h>
14 #include <linux/hrtimer.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of.h>
22 #include <linux/phy/phy.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/reset.h>
27 
28 #include "m_can.h"
29 
30 /* registers definition */
31 enum m_can_reg {
32 	M_CAN_CREL	= 0x0,
33 	M_CAN_ENDN	= 0x4,
34 	M_CAN_CUST	= 0x8,
35 	M_CAN_DBTP	= 0xc,
36 	M_CAN_TEST	= 0x10,
37 	M_CAN_RWD	= 0x14,
38 	M_CAN_CCCR	= 0x18,
39 	M_CAN_NBTP	= 0x1c,
40 	M_CAN_TSCC	= 0x20,
41 	M_CAN_TSCV	= 0x24,
42 	M_CAN_TOCC	= 0x28,
43 	M_CAN_TOCV	= 0x2c,
44 	M_CAN_ECR	= 0x40,
45 	M_CAN_PSR	= 0x44,
46 	/* TDCR Register only available for version >=3.1.x */
47 	M_CAN_TDCR	= 0x48,
48 	M_CAN_IR	= 0x50,
49 	M_CAN_IE	= 0x54,
50 	M_CAN_ILS	= 0x58,
51 	M_CAN_ILE	= 0x5c,
52 	M_CAN_GFC	= 0x80,
53 	M_CAN_SIDFC	= 0x84,
54 	M_CAN_XIDFC	= 0x88,
55 	M_CAN_XIDAM	= 0x90,
56 	M_CAN_HPMS	= 0x94,
57 	M_CAN_NDAT1	= 0x98,
58 	M_CAN_NDAT2	= 0x9c,
59 	M_CAN_RXF0C	= 0xa0,
60 	M_CAN_RXF0S	= 0xa4,
61 	M_CAN_RXF0A	= 0xa8,
62 	M_CAN_RXBC	= 0xac,
63 	M_CAN_RXF1C	= 0xb0,
64 	M_CAN_RXF1S	= 0xb4,
65 	M_CAN_RXF1A	= 0xb8,
66 	M_CAN_RXESC	= 0xbc,
67 	M_CAN_TXBC	= 0xc0,
68 	M_CAN_TXFQS	= 0xc4,
69 	M_CAN_TXESC	= 0xc8,
70 	M_CAN_TXBRP	= 0xcc,
71 	M_CAN_TXBAR	= 0xd0,
72 	M_CAN_TXBCR	= 0xd4,
73 	M_CAN_TXBTO	= 0xd8,
74 	M_CAN_TXBCF	= 0xdc,
75 	M_CAN_TXBTIE	= 0xe0,
76 	M_CAN_TXBCIE	= 0xe4,
77 	M_CAN_TXEFC	= 0xf0,
78 	M_CAN_TXEFS	= 0xf4,
79 	M_CAN_TXEFA	= 0xf8,
80 };
81 
82 /* message ram configuration data length */
83 #define MRAM_CFG_LEN	8
84 
85 /* Core Release Register (CREL) */
86 #define CREL_REL_MASK		GENMASK(31, 28)
87 #define CREL_STEP_MASK		GENMASK(27, 24)
88 #define CREL_SUBSTEP_MASK	GENMASK(23, 20)
89 
90 /* Data Bit Timing & Prescaler Register (DBTP) */
91 #define DBTP_TDC		BIT(23)
92 #define DBTP_DBRP_MASK		GENMASK(20, 16)
93 #define DBTP_DTSEG1_MASK	GENMASK(12, 8)
94 #define DBTP_DTSEG2_MASK	GENMASK(7, 4)
95 #define DBTP_DSJW_MASK		GENMASK(3, 0)
96 
97 /* Transmitter Delay Compensation Register (TDCR) */
98 #define TDCR_TDCO_MASK		GENMASK(14, 8)
99 #define TDCR_TDCF_MASK		GENMASK(6, 0)
100 
101 /* Test Register (TEST) */
102 #define TEST_LBCK		BIT(4)
103 
104 /* CC Control Register (CCCR) */
105 #define CCCR_TXP		BIT(14)
106 #define CCCR_TEST		BIT(7)
107 #define CCCR_DAR		BIT(6)
108 #define CCCR_MON		BIT(5)
109 #define CCCR_CSR		BIT(4)
110 #define CCCR_CSA		BIT(3)
111 #define CCCR_ASM		BIT(2)
112 #define CCCR_CCE		BIT(1)
113 #define CCCR_INIT		BIT(0)
114 /* for version 3.0.x */
115 #define CCCR_CMR_MASK		GENMASK(11, 10)
116 #define CCCR_CMR_CANFD		0x1
117 #define CCCR_CMR_CANFD_BRS	0x2
118 #define CCCR_CMR_CAN		0x3
119 #define CCCR_CME_MASK		GENMASK(9, 8)
120 #define CCCR_CME_CAN		0
121 #define CCCR_CME_CANFD		0x1
122 #define CCCR_CME_CANFD_BRS	0x2
123 /* for version >=3.1.x */
124 #define CCCR_EFBI		BIT(13)
125 #define CCCR_PXHD		BIT(12)
126 #define CCCR_BRSE		BIT(9)
127 #define CCCR_FDOE		BIT(8)
128 /* for version >=3.2.x */
129 #define CCCR_NISO		BIT(15)
130 /* for version >=3.3.x */
131 #define CCCR_WMM		BIT(11)
132 #define CCCR_UTSU		BIT(10)
133 
134 /* Nominal Bit Timing & Prescaler Register (NBTP) */
135 #define NBTP_NSJW_MASK		GENMASK(31, 25)
136 #define NBTP_NBRP_MASK		GENMASK(24, 16)
137 #define NBTP_NTSEG1_MASK	GENMASK(15, 8)
138 #define NBTP_NTSEG2_MASK	GENMASK(6, 0)
139 
140 /* Timestamp Counter Configuration Register (TSCC) */
141 #define TSCC_TCP_MASK		GENMASK(19, 16)
142 #define TSCC_TSS_MASK		GENMASK(1, 0)
143 #define TSCC_TSS_DISABLE	0x0
144 #define TSCC_TSS_INTERNAL	0x1
145 #define TSCC_TSS_EXTERNAL	0x2
146 
147 /* Timestamp Counter Value Register (TSCV) */
148 #define TSCV_TSC_MASK		GENMASK(15, 0)
149 
150 /* Error Counter Register (ECR) */
151 #define ECR_RP			BIT(15)
152 #define ECR_REC_MASK		GENMASK(14, 8)
153 #define ECR_TEC_MASK		GENMASK(7, 0)
154 
155 /* Protocol Status Register (PSR) */
156 #define PSR_BO		BIT(7)
157 #define PSR_EW		BIT(6)
158 #define PSR_EP		BIT(5)
159 #define PSR_LEC_MASK	GENMASK(2, 0)
160 #define PSR_DLEC_MASK	GENMASK(10, 8)
161 
162 /* Interrupt Register (IR) */
163 #define IR_ALL_INT	0xffffffff
164 
165 /* Renamed bits for versions > 3.1.x */
166 #define IR_ARA		BIT(29)
167 #define IR_PED		BIT(28)
168 #define IR_PEA		BIT(27)
169 
170 /* Bits for version 3.0.x */
171 #define IR_STE		BIT(31)
172 #define IR_FOE		BIT(30)
173 #define IR_ACKE		BIT(29)
174 #define IR_BE		BIT(28)
175 #define IR_CRCE		BIT(27)
176 #define IR_WDI		BIT(26)
177 #define IR_BO		BIT(25)
178 #define IR_EW		BIT(24)
179 #define IR_EP		BIT(23)
180 #define IR_ELO		BIT(22)
181 #define IR_BEU		BIT(21)
182 #define IR_BEC		BIT(20)
183 #define IR_DRX		BIT(19)
184 #define IR_TOO		BIT(18)
185 #define IR_MRAF		BIT(17)
186 #define IR_TSW		BIT(16)
187 #define IR_TEFL		BIT(15)
188 #define IR_TEFF		BIT(14)
189 #define IR_TEFW		BIT(13)
190 #define IR_TEFN		BIT(12)
191 #define IR_TFE		BIT(11)
192 #define IR_TCF		BIT(10)
193 #define IR_TC		BIT(9)
194 #define IR_HPM		BIT(8)
195 #define IR_RF1L		BIT(7)
196 #define IR_RF1F		BIT(6)
197 #define IR_RF1W		BIT(5)
198 #define IR_RF1N		BIT(4)
199 #define IR_RF0L		BIT(3)
200 #define IR_RF0F		BIT(2)
201 #define IR_RF0W		BIT(1)
202 #define IR_RF0N		BIT(0)
203 #define IR_ERR_STATE	(IR_BO | IR_EW | IR_EP)
204 
205 /* Interrupts for version 3.0.x */
206 #define IR_ERR_LEC_30X	(IR_STE	| IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
207 #define IR_ERR_BUS_30X	(IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
208 			 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
209 			 IR_RF0L)
210 #define IR_ERR_ALL_30X	(IR_ERR_STATE | IR_ERR_BUS_30X)
211 
212 /* Interrupts for version >= 3.1.x */
213 #define IR_ERR_LEC_31X	(IR_PED | IR_PEA)
214 #define IR_ERR_BUS_31X	(IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
215 			 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
216 			 IR_RF0L)
217 #define IR_ERR_ALL_31X	(IR_ERR_STATE | IR_ERR_BUS_31X)
218 
219 /* Interrupt Line Select (ILS) */
220 #define ILS_ALL_INT0	0x0
221 #define ILS_ALL_INT1	0xFFFFFFFF
222 
223 /* Interrupt Line Enable (ILE) */
224 #define ILE_EINT1	BIT(1)
225 #define ILE_EINT0	BIT(0)
226 
227 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
228 #define RXFC_FWM_MASK	GENMASK(30, 24)
229 #define RXFC_FS_MASK	GENMASK(22, 16)
230 
231 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
232 #define RXFS_RFL	BIT(25)
233 #define RXFS_FF		BIT(24)
234 #define RXFS_FPI_MASK	GENMASK(21, 16)
235 #define RXFS_FGI_MASK	GENMASK(13, 8)
236 #define RXFS_FFL_MASK	GENMASK(6, 0)
237 
238 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */
239 #define RXESC_RBDS_MASK		GENMASK(10, 8)
240 #define RXESC_F1DS_MASK		GENMASK(6, 4)
241 #define RXESC_F0DS_MASK		GENMASK(2, 0)
242 #define RXESC_64B		0x7
243 
244 /* Tx Buffer Configuration (TXBC) */
245 #define TXBC_TFQS_MASK		GENMASK(29, 24)
246 #define TXBC_NDTB_MASK		GENMASK(21, 16)
247 
248 /* Tx FIFO/Queue Status (TXFQS) */
249 #define TXFQS_TFQF		BIT(21)
250 #define TXFQS_TFQPI_MASK	GENMASK(20, 16)
251 #define TXFQS_TFGI_MASK		GENMASK(12, 8)
252 #define TXFQS_TFFL_MASK		GENMASK(5, 0)
253 
254 /* Tx Buffer Element Size Configuration (TXESC) */
255 #define TXESC_TBDS_MASK		GENMASK(2, 0)
256 #define TXESC_TBDS_64B		0x7
257 
258 /* Tx Event FIFO Configuration (TXEFC) */
259 #define TXEFC_EFWM_MASK		GENMASK(29, 24)
260 #define TXEFC_EFS_MASK		GENMASK(21, 16)
261 
262 /* Tx Event FIFO Status (TXEFS) */
263 #define TXEFS_TEFL		BIT(25)
264 #define TXEFS_EFF		BIT(24)
265 #define TXEFS_EFGI_MASK		GENMASK(12, 8)
266 #define TXEFS_EFFL_MASK		GENMASK(5, 0)
267 
268 /* Tx Event FIFO Acknowledge (TXEFA) */
269 #define TXEFA_EFAI_MASK		GENMASK(4, 0)
270 
271 /* Message RAM Configuration (in bytes) */
272 #define SIDF_ELEMENT_SIZE	4
273 #define XIDF_ELEMENT_SIZE	8
274 #define RXF0_ELEMENT_SIZE	72
275 #define RXF1_ELEMENT_SIZE	72
276 #define RXB_ELEMENT_SIZE	72
277 #define TXE_ELEMENT_SIZE	8
278 #define TXB_ELEMENT_SIZE	72
279 
280 /* Message RAM Elements */
281 #define M_CAN_FIFO_ID		0x0
282 #define M_CAN_FIFO_DLC		0x4
283 #define M_CAN_FIFO_DATA		0x8
284 
285 /* Rx Buffer Element */
286 /* R0 */
287 #define RX_BUF_ESI		BIT(31)
288 #define RX_BUF_XTD		BIT(30)
289 #define RX_BUF_RTR		BIT(29)
290 /* R1 */
291 #define RX_BUF_ANMF		BIT(31)
292 #define RX_BUF_FDF		BIT(21)
293 #define RX_BUF_BRS		BIT(20)
294 #define RX_BUF_RXTS_MASK	GENMASK(15, 0)
295 
296 /* Tx Buffer Element */
297 /* T0 */
298 #define TX_BUF_ESI		BIT(31)
299 #define TX_BUF_XTD		BIT(30)
300 #define TX_BUF_RTR		BIT(29)
301 /* T1 */
302 #define TX_BUF_EFC		BIT(23)
303 #define TX_BUF_FDF		BIT(21)
304 #define TX_BUF_BRS		BIT(20)
305 #define TX_BUF_MM_MASK		GENMASK(31, 24)
306 #define TX_BUF_DLC_MASK		GENMASK(19, 16)
307 
308 /* Tx event FIFO Element */
309 /* E1 */
310 #define TX_EVENT_MM_MASK	GENMASK(31, 24)
311 #define TX_EVENT_TXTS_MASK	GENMASK(15, 0)
312 
313 /* Hrtimer polling interval */
314 #define HRTIMER_POLL_INTERVAL_MS		1
315 
316 /* The ID and DLC registers are adjacent in M_CAN FIFO memory,
317  * and we can save a (potentially slow) bus round trip by combining
318  * reads and writes to them.
319  */
320 struct id_and_dlc {
321 	u32 id;
322 	u32 dlc;
323 };
324 
325 struct m_can_fifo_element {
326 	u32 id;
327 	u32 dlc;
328 	u8 data[CANFD_MAX_DLEN];
329 };
330 
m_can_read(struct m_can_classdev * cdev,enum m_can_reg reg)331 static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
332 {
333 	return cdev->ops->read_reg(cdev, reg);
334 }
335 
m_can_write(struct m_can_classdev * cdev,enum m_can_reg reg,u32 val)336 static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
337 			       u32 val)
338 {
339 	cdev->ops->write_reg(cdev, reg, val);
340 }
341 
342 static int
m_can_fifo_read(struct m_can_classdev * cdev,u32 fgi,unsigned int offset,void * val,size_t val_count)343 m_can_fifo_read(struct m_can_classdev *cdev,
344 		u32 fgi, unsigned int offset, void *val, size_t val_count)
345 {
346 	u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
347 		offset;
348 
349 	if (val_count == 0)
350 		return 0;
351 
352 	return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
353 }
354 
355 static int
m_can_fifo_write(struct m_can_classdev * cdev,u32 fpi,unsigned int offset,const void * val,size_t val_count)356 m_can_fifo_write(struct m_can_classdev *cdev,
357 		 u32 fpi, unsigned int offset, const void *val, size_t val_count)
358 {
359 	u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
360 		offset;
361 
362 	if (val_count == 0)
363 		return 0;
364 
365 	return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
366 }
367 
m_can_fifo_write_no_off(struct m_can_classdev * cdev,u32 fpi,u32 val)368 static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev,
369 					  u32 fpi, u32 val)
370 {
371 	return cdev->ops->write_fifo(cdev, fpi, &val, 1);
372 }
373 
374 static int
m_can_txe_fifo_read(struct m_can_classdev * cdev,u32 fgi,u32 offset,u32 * val)375 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
376 {
377 	u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
378 		offset;
379 
380 	return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
381 }
382 
m_can_cccr_update_bits(struct m_can_classdev * cdev,u32 mask,u32 val)383 static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
384 {
385 	u32 val_before = m_can_read(cdev, M_CAN_CCCR);
386 	u32 val_after = (val_before & ~mask) | val;
387 	size_t tries = 10;
388 
389 	if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
390 		netdev_err(cdev->net,
391 			   "refusing to configure device when in normal mode\n");
392 		return -EBUSY;
393 	}
394 
395 	/* The chip should be in standby mode when changing the CCCR register,
396 	 * and some chips set the CSR and CSA bits when in standby. Furthermore,
397 	 * the CSR and CSA bits should be written as zeros, even when they read
398 	 * ones.
399 	 */
400 	val_after &= ~(CCCR_CSR | CCCR_CSA);
401 
402 	while (tries--) {
403 		u32 val_read;
404 
405 		/* Write the desired value in each try, as setting some bits in
406 		 * the CCCR register require other bits to be set first. E.g.
407 		 * setting the NISO bit requires setting the CCE bit first.
408 		 */
409 		m_can_write(cdev, M_CAN_CCCR, val_after);
410 
411 		val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
412 
413 		if (val_read == val_after)
414 			return 0;
415 
416 		usleep_range(1, 5);
417 	}
418 
419 	return -ETIMEDOUT;
420 }
421 
m_can_config_enable(struct m_can_classdev * cdev)422 static int m_can_config_enable(struct m_can_classdev *cdev)
423 {
424 	int err;
425 
426 	/* CCCR_INIT must be set in order to set CCCR_CCE, but access to
427 	 * configuration registers should only be enabled when in standby mode,
428 	 * where CCCR_INIT is always set.
429 	 */
430 	err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
431 	if (err)
432 		netdev_err(cdev->net, "failed to enable configuration mode\n");
433 
434 	return err;
435 }
436 
m_can_config_disable(struct m_can_classdev * cdev)437 static int m_can_config_disable(struct m_can_classdev *cdev)
438 {
439 	int err;
440 
441 	/* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
442 	 * standby mode
443 	 */
444 	err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
445 	if (err)
446 		netdev_err(cdev->net, "failed to disable configuration registers\n");
447 
448 	return err;
449 }
450 
m_can_interrupt_enable(struct m_can_classdev * cdev,u32 interrupts)451 static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
452 {
453 	if (cdev->active_interrupts == interrupts)
454 		return;
455 	m_can_write(cdev, M_CAN_IE, interrupts);
456 	cdev->active_interrupts = interrupts;
457 }
458 
m_can_coalescing_disable(struct m_can_classdev * cdev)459 static void m_can_coalescing_disable(struct m_can_classdev *cdev)
460 {
461 	u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
462 
463 	if (!cdev->net->irq)
464 		return;
465 
466 	hrtimer_cancel(&cdev->hrtimer);
467 	m_can_interrupt_enable(cdev, new_interrupts);
468 }
469 
m_can_enable_all_interrupts(struct m_can_classdev * cdev)470 static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
471 {
472 	if (!cdev->net->irq) {
473 		netdev_dbg(cdev->net, "Start hrtimer\n");
474 		hrtimer_start(&cdev->hrtimer,
475 			      ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
476 			      HRTIMER_MODE_REL_PINNED);
477 	}
478 
479 	/* Only interrupt line 0 is used in this driver */
480 	m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
481 }
482 
m_can_disable_all_interrupts(struct m_can_classdev * cdev)483 static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
484 {
485 	m_can_coalescing_disable(cdev);
486 	m_can_write(cdev, M_CAN_ILE, 0x0);
487 
488 	if (!cdev->net->irq) {
489 		netdev_dbg(cdev->net, "Stop hrtimer\n");
490 		hrtimer_try_to_cancel(&cdev->hrtimer);
491 	}
492 }
493 
494 /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
495  * width.
496  */
m_can_get_timestamp(struct m_can_classdev * cdev)497 static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
498 {
499 	u32 tscv;
500 	u32 tsc;
501 
502 	tscv = m_can_read(cdev, M_CAN_TSCV);
503 	tsc = FIELD_GET(TSCV_TSC_MASK, tscv);
504 
505 	return (tsc << 16);
506 }
507 
m_can_clean(struct net_device * net)508 static void m_can_clean(struct net_device *net)
509 {
510 	struct m_can_classdev *cdev = netdev_priv(net);
511 	unsigned long irqflags;
512 
513 	if (cdev->tx_ops) {
514 		for (int i = 0; i != cdev->tx_fifo_size; ++i) {
515 			if (!cdev->tx_ops[i].skb)
516 				continue;
517 
518 			net->stats.tx_errors++;
519 			cdev->tx_ops[i].skb = NULL;
520 		}
521 	}
522 
523 	for (int i = 0; i != cdev->can.echo_skb_max; ++i)
524 		can_free_echo_skb(cdev->net, i, NULL);
525 
526 	netdev_reset_queue(cdev->net);
527 
528 	spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
529 	cdev->tx_fifo_in_flight = 0;
530 	spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
531 }
532 
533 /* For peripherals, pass skb to rx-offload, which will push skb from
534  * napi. For non-peripherals, RX is done in napi already, so push
535  * directly. timestamp is used to ensure good skb ordering in
536  * rx-offload and is ignored for non-peripherals.
537  */
m_can_receive_skb(struct m_can_classdev * cdev,struct sk_buff * skb,u32 timestamp)538 static void m_can_receive_skb(struct m_can_classdev *cdev,
539 			      struct sk_buff *skb,
540 			      u32 timestamp)
541 {
542 	if (cdev->is_peripheral) {
543 		struct net_device_stats *stats = &cdev->net->stats;
544 		int err;
545 
546 		err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
547 						     timestamp);
548 		if (err)
549 			stats->rx_fifo_errors++;
550 	} else {
551 		netif_receive_skb(skb);
552 	}
553 }
554 
m_can_read_fifo(struct net_device * dev,u32 fgi)555 static int m_can_read_fifo(struct net_device *dev, u32 fgi)
556 {
557 	struct net_device_stats *stats = &dev->stats;
558 	struct m_can_classdev *cdev = netdev_priv(dev);
559 	struct canfd_frame *cf;
560 	struct sk_buff *skb;
561 	struct id_and_dlc fifo_header;
562 	u32 timestamp = 0;
563 	int err;
564 
565 	err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
566 	if (err)
567 		goto out_fail;
568 
569 	if (fifo_header.dlc & RX_BUF_FDF)
570 		skb = alloc_canfd_skb(dev, &cf);
571 	else
572 		skb = alloc_can_skb(dev, (struct can_frame **)&cf);
573 	if (!skb) {
574 		stats->rx_dropped++;
575 		return 0;
576 	}
577 
578 	if (fifo_header.dlc & RX_BUF_FDF)
579 		cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F);
580 	else
581 		cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F);
582 
583 	if (fifo_header.id & RX_BUF_XTD)
584 		cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG;
585 	else
586 		cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK;
587 
588 	if (fifo_header.id & RX_BUF_ESI) {
589 		cf->flags |= CANFD_ESI;
590 		netdev_dbg(dev, "ESI Error\n");
591 	}
592 
593 	if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) {
594 		cf->can_id |= CAN_RTR_FLAG;
595 	} else {
596 		if (fifo_header.dlc & RX_BUF_BRS)
597 			cf->flags |= CANFD_BRS;
598 
599 		err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
600 				      cf->data, DIV_ROUND_UP(cf->len, 4));
601 		if (err)
602 			goto out_free_skb;
603 
604 		stats->rx_bytes += cf->len;
605 	}
606 	stats->rx_packets++;
607 
608 	timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
609 
610 	m_can_receive_skb(cdev, skb, timestamp);
611 
612 	return 0;
613 
614 out_free_skb:
615 	kfree_skb(skb);
616 out_fail:
617 	netdev_err(dev, "FIFO read returned %d\n", err);
618 	return err;
619 }
620 
m_can_do_rx_poll(struct net_device * dev,int quota)621 static int m_can_do_rx_poll(struct net_device *dev, int quota)
622 {
623 	struct m_can_classdev *cdev = netdev_priv(dev);
624 	u32 pkts = 0;
625 	u32 rxfs;
626 	u32 rx_count;
627 	u32 fgi;
628 	int ack_fgi = -1;
629 	int i;
630 	int err = 0;
631 
632 	rxfs = m_can_read(cdev, M_CAN_RXF0S);
633 	if (!(rxfs & RXFS_FFL_MASK)) {
634 		netdev_dbg(dev, "no messages in fifo0\n");
635 		return 0;
636 	}
637 
638 	rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs);
639 	fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
640 
641 	for (i = 0; i < rx_count && quota > 0; ++i) {
642 		err = m_can_read_fifo(dev, fgi);
643 		if (err)
644 			break;
645 
646 		quota--;
647 		pkts++;
648 		ack_fgi = fgi;
649 		fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi);
650 	}
651 
652 	if (ack_fgi != -1)
653 		m_can_write(cdev, M_CAN_RXF0A, ack_fgi);
654 
655 	if (err)
656 		return err;
657 
658 	return pkts;
659 }
660 
m_can_handle_lost_msg(struct net_device * dev)661 static int m_can_handle_lost_msg(struct net_device *dev)
662 {
663 	struct m_can_classdev *cdev = netdev_priv(dev);
664 	struct net_device_stats *stats = &dev->stats;
665 	struct sk_buff *skb;
666 	struct can_frame *frame;
667 	u32 timestamp = 0;
668 
669 	netdev_dbg(dev, "msg lost in rxf0\n");
670 
671 	stats->rx_errors++;
672 	stats->rx_over_errors++;
673 
674 	skb = alloc_can_err_skb(dev, &frame);
675 	if (unlikely(!skb))
676 		return 0;
677 
678 	frame->can_id |= CAN_ERR_CRTL;
679 	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
680 
681 	if (cdev->is_peripheral)
682 		timestamp = m_can_get_timestamp(cdev);
683 
684 	m_can_receive_skb(cdev, skb, timestamp);
685 
686 	return 1;
687 }
688 
m_can_handle_lec_err(struct net_device * dev,enum m_can_lec_type lec_type)689 static int m_can_handle_lec_err(struct net_device *dev,
690 				enum m_can_lec_type lec_type)
691 {
692 	struct m_can_classdev *cdev = netdev_priv(dev);
693 	struct net_device_stats *stats = &dev->stats;
694 	struct can_frame *cf;
695 	struct sk_buff *skb;
696 	u32 timestamp = 0;
697 
698 	cdev->can.can_stats.bus_error++;
699 
700 	/* propagate the error condition to the CAN stack */
701 	skb = alloc_can_err_skb(dev, &cf);
702 
703 	/* check for 'last error code' which tells us the
704 	 * type of the last error to occur on the CAN bus
705 	 */
706 	if (likely(skb))
707 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
708 
709 	switch (lec_type) {
710 	case LEC_STUFF_ERROR:
711 		netdev_dbg(dev, "stuff error\n");
712 		stats->rx_errors++;
713 		if (likely(skb))
714 			cf->data[2] |= CAN_ERR_PROT_STUFF;
715 		break;
716 	case LEC_FORM_ERROR:
717 		netdev_dbg(dev, "form error\n");
718 		stats->rx_errors++;
719 		if (likely(skb))
720 			cf->data[2] |= CAN_ERR_PROT_FORM;
721 		break;
722 	case LEC_ACK_ERROR:
723 		netdev_dbg(dev, "ack error\n");
724 		stats->tx_errors++;
725 		if (likely(skb))
726 			cf->data[3] = CAN_ERR_PROT_LOC_ACK;
727 		break;
728 	case LEC_BIT1_ERROR:
729 		netdev_dbg(dev, "bit1 error\n");
730 		stats->tx_errors++;
731 		if (likely(skb))
732 			cf->data[2] |= CAN_ERR_PROT_BIT1;
733 		break;
734 	case LEC_BIT0_ERROR:
735 		netdev_dbg(dev, "bit0 error\n");
736 		stats->tx_errors++;
737 		if (likely(skb))
738 			cf->data[2] |= CAN_ERR_PROT_BIT0;
739 		break;
740 	case LEC_CRC_ERROR:
741 		netdev_dbg(dev, "CRC error\n");
742 		stats->rx_errors++;
743 		if (likely(skb))
744 			cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
745 		break;
746 	default:
747 		break;
748 	}
749 
750 	if (unlikely(!skb))
751 		return 0;
752 
753 	if (cdev->is_peripheral)
754 		timestamp = m_can_get_timestamp(cdev);
755 
756 	m_can_receive_skb(cdev, skb, timestamp);
757 
758 	return 1;
759 }
760 
__m_can_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)761 static int __m_can_get_berr_counter(const struct net_device *dev,
762 				    struct can_berr_counter *bec)
763 {
764 	struct m_can_classdev *cdev = netdev_priv(dev);
765 	unsigned int ecr;
766 
767 	ecr = m_can_read(cdev, M_CAN_ECR);
768 	bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr);
769 	bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr);
770 
771 	return 0;
772 }
773 
m_can_clk_start(struct m_can_classdev * cdev)774 static int m_can_clk_start(struct m_can_classdev *cdev)
775 {
776 	if (cdev->pm_clock_support == 0)
777 		return 0;
778 
779 	return pm_runtime_resume_and_get(cdev->dev);
780 }
781 
m_can_clk_stop(struct m_can_classdev * cdev)782 static void m_can_clk_stop(struct m_can_classdev *cdev)
783 {
784 	if (cdev->pm_clock_support)
785 		pm_runtime_put_sync(cdev->dev);
786 }
787 
m_can_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)788 static int m_can_get_berr_counter(const struct net_device *dev,
789 				  struct can_berr_counter *bec)
790 {
791 	struct m_can_classdev *cdev = netdev_priv(dev);
792 	int err;
793 
794 	/* Avoid waking up the controller if the interface is down */
795 	if (!(dev->flags & IFF_UP))
796 		return 0;
797 
798 	err = m_can_clk_start(cdev);
799 	if (err)
800 		return err;
801 
802 	__m_can_get_berr_counter(dev, bec);
803 
804 	m_can_clk_stop(cdev);
805 
806 	return 0;
807 }
808 
m_can_handle_state_change(struct net_device * dev,enum can_state new_state)809 static int m_can_handle_state_change(struct net_device *dev,
810 				     enum can_state new_state)
811 {
812 	struct m_can_classdev *cdev = netdev_priv(dev);
813 	struct can_frame *cf;
814 	struct sk_buff *skb;
815 	struct can_berr_counter bec;
816 	unsigned int ecr;
817 	u32 timestamp = 0;
818 
819 	switch (new_state) {
820 	case CAN_STATE_ERROR_ACTIVE:
821 		cdev->can.state = CAN_STATE_ERROR_ACTIVE;
822 		break;
823 	case CAN_STATE_ERROR_WARNING:
824 		/* error warning state */
825 		cdev->can.can_stats.error_warning++;
826 		cdev->can.state = CAN_STATE_ERROR_WARNING;
827 		break;
828 	case CAN_STATE_ERROR_PASSIVE:
829 		/* error passive state */
830 		cdev->can.can_stats.error_passive++;
831 		cdev->can.state = CAN_STATE_ERROR_PASSIVE;
832 		break;
833 	case CAN_STATE_BUS_OFF:
834 		/* bus-off state */
835 		cdev->can.state = CAN_STATE_BUS_OFF;
836 		m_can_disable_all_interrupts(cdev);
837 		cdev->can.can_stats.bus_off++;
838 		can_bus_off(dev);
839 		break;
840 	default:
841 		break;
842 	}
843 
844 	/* propagate the error condition to the CAN stack */
845 	skb = alloc_can_err_skb(dev, &cf);
846 	if (unlikely(!skb))
847 		return 0;
848 
849 	__m_can_get_berr_counter(dev, &bec);
850 
851 	switch (new_state) {
852 	case CAN_STATE_ERROR_ACTIVE:
853 		cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
854 		cf->data[1] = CAN_ERR_CRTL_ACTIVE;
855 		cf->data[6] = bec.txerr;
856 		cf->data[7] = bec.rxerr;
857 		break;
858 	case CAN_STATE_ERROR_WARNING:
859 		/* error warning state */
860 		cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
861 		cf->data[1] = (bec.txerr > bec.rxerr) ?
862 			CAN_ERR_CRTL_TX_WARNING :
863 			CAN_ERR_CRTL_RX_WARNING;
864 		cf->data[6] = bec.txerr;
865 		cf->data[7] = bec.rxerr;
866 		break;
867 	case CAN_STATE_ERROR_PASSIVE:
868 		/* error passive state */
869 		cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
870 		ecr = m_can_read(cdev, M_CAN_ECR);
871 		if (ecr & ECR_RP)
872 			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
873 		if (bec.txerr > 127)
874 			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
875 		cf->data[6] = bec.txerr;
876 		cf->data[7] = bec.rxerr;
877 		break;
878 	case CAN_STATE_BUS_OFF:
879 		/* bus-off state */
880 		cf->can_id |= CAN_ERR_BUSOFF;
881 		break;
882 	default:
883 		break;
884 	}
885 
886 	if (cdev->is_peripheral)
887 		timestamp = m_can_get_timestamp(cdev);
888 
889 	m_can_receive_skb(cdev, skb, timestamp);
890 
891 	return 1;
892 }
893 
894 static enum can_state
m_can_state_get_by_psr(struct m_can_classdev * cdev)895 m_can_state_get_by_psr(struct m_can_classdev *cdev)
896 {
897 	u32 reg_psr;
898 
899 	reg_psr = m_can_read(cdev, M_CAN_PSR);
900 
901 	if (reg_psr & PSR_BO)
902 		return CAN_STATE_BUS_OFF;
903 	if (reg_psr & PSR_EP)
904 		return CAN_STATE_ERROR_PASSIVE;
905 	if (reg_psr & PSR_EW)
906 		return CAN_STATE_ERROR_WARNING;
907 
908 	return CAN_STATE_ERROR_ACTIVE;
909 }
910 
m_can_handle_state_errors(struct net_device * dev)911 static int m_can_handle_state_errors(struct net_device *dev)
912 {
913 	struct m_can_classdev *cdev = netdev_priv(dev);
914 	enum can_state new_state;
915 
916 	new_state = m_can_state_get_by_psr(cdev);
917 	if (new_state == cdev->can.state)
918 		return 0;
919 
920 	return m_can_handle_state_change(dev, new_state);
921 }
922 
m_can_handle_other_err(struct net_device * dev,u32 irqstatus)923 static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
924 {
925 	if (irqstatus & IR_WDI)
926 		netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
927 	if (irqstatus & IR_BEU)
928 		netdev_err(dev, "Bit Error Uncorrected\n");
929 	if (irqstatus & IR_BEC)
930 		netdev_err(dev, "Bit Error Corrected\n");
931 	if (irqstatus & IR_TOO)
932 		netdev_err(dev, "Timeout reached\n");
933 	if (irqstatus & IR_MRAF)
934 		netdev_err(dev, "Message RAM access failure occurred\n");
935 }
936 
is_lec_err(u8 lec)937 static inline bool is_lec_err(u8 lec)
938 {
939 	return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE;
940 }
941 
m_can_is_protocol_err(u32 irqstatus)942 static inline bool m_can_is_protocol_err(u32 irqstatus)
943 {
944 	return irqstatus & IR_ERR_LEC_31X;
945 }
946 
m_can_handle_protocol_error(struct net_device * dev,u32 irqstatus)947 static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
948 {
949 	struct net_device_stats *stats = &dev->stats;
950 	struct m_can_classdev *cdev = netdev_priv(dev);
951 	struct can_frame *cf;
952 	struct sk_buff *skb;
953 	u32 timestamp = 0;
954 
955 	/* propagate the error condition to the CAN stack */
956 	skb = alloc_can_err_skb(dev, &cf);
957 
958 	/* update tx error stats since there is protocol error */
959 	stats->tx_errors++;
960 
961 	/* update arbitration lost status */
962 	if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
963 		netdev_dbg(dev, "Protocol error in Arbitration fail\n");
964 		cdev->can.can_stats.arbitration_lost++;
965 		if (skb) {
966 			cf->can_id |= CAN_ERR_LOSTARB;
967 			cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
968 		}
969 	}
970 
971 	if (unlikely(!skb)) {
972 		netdev_dbg(dev, "allocation of skb failed\n");
973 		return 0;
974 	}
975 
976 	if (cdev->is_peripheral)
977 		timestamp = m_can_get_timestamp(cdev);
978 
979 	m_can_receive_skb(cdev, skb, timestamp);
980 
981 	return 1;
982 }
983 
m_can_handle_bus_errors(struct net_device * dev,u32 irqstatus,u32 psr)984 static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
985 				   u32 psr)
986 {
987 	struct m_can_classdev *cdev = netdev_priv(dev);
988 	int work_done = 0;
989 
990 	if (irqstatus & IR_RF0L)
991 		work_done += m_can_handle_lost_msg(dev);
992 
993 	/* handle lec errors on the bus */
994 	if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
995 		u8 lec = FIELD_GET(PSR_LEC_MASK, psr);
996 		u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr);
997 
998 		if (is_lec_err(lec)) {
999 			netdev_dbg(dev, "Arbitration phase error detected\n");
1000 			work_done += m_can_handle_lec_err(dev, lec);
1001 		}
1002 
1003 		if (is_lec_err(dlec)) {
1004 			netdev_dbg(dev, "Data phase error detected\n");
1005 			work_done += m_can_handle_lec_err(dev, dlec);
1006 		}
1007 	}
1008 
1009 	/* handle protocol errors in arbitration phase */
1010 	if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
1011 	    m_can_is_protocol_err(irqstatus))
1012 		work_done += m_can_handle_protocol_error(dev, irqstatus);
1013 
1014 	/* other unproccessed error interrupts */
1015 	m_can_handle_other_err(dev, irqstatus);
1016 
1017 	return work_done;
1018 }
1019 
m_can_rx_handler(struct net_device * dev,int quota,u32 irqstatus)1020 static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
1021 {
1022 	struct m_can_classdev *cdev = netdev_priv(dev);
1023 	int rx_work_or_err;
1024 	int work_done = 0;
1025 
1026 	if (!irqstatus)
1027 		goto end;
1028 
1029 	/* Errata workaround for issue "Needless activation of MRAF irq"
1030 	 * During frame reception while the MCAN is in Error Passive state
1031 	 * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
1032 	 * it may happen that MCAN_IR.MRAF is set although there was no
1033 	 * Message RAM access failure.
1034 	 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
1035 	 * The Message RAM Access Failure interrupt routine needs to check
1036 	 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
1037 	 * In this case, reset MCAN_IR.MRAF. No further action is required.
1038 	 */
1039 	if (cdev->version <= 31 && irqstatus & IR_MRAF &&
1040 	    m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
1041 		struct can_berr_counter bec;
1042 
1043 		__m_can_get_berr_counter(dev, &bec);
1044 		if (bec.rxerr == 127) {
1045 			m_can_write(cdev, M_CAN_IR, IR_MRAF);
1046 			irqstatus &= ~IR_MRAF;
1047 		}
1048 	}
1049 
1050 	if (irqstatus & IR_ERR_STATE)
1051 		work_done += m_can_handle_state_errors(dev);
1052 
1053 	if (irqstatus & IR_ERR_BUS_30X)
1054 		work_done += m_can_handle_bus_errors(dev, irqstatus,
1055 						     m_can_read(cdev, M_CAN_PSR));
1056 
1057 	if (irqstatus & IR_RF0N) {
1058 		rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
1059 		if (rx_work_or_err < 0)
1060 			return rx_work_or_err;
1061 
1062 		work_done += rx_work_or_err;
1063 	}
1064 end:
1065 	return work_done;
1066 }
1067 
m_can_poll(struct napi_struct * napi,int quota)1068 static int m_can_poll(struct napi_struct *napi, int quota)
1069 {
1070 	struct net_device *dev = napi->dev;
1071 	struct m_can_classdev *cdev = netdev_priv(dev);
1072 	int work_done;
1073 	u32 irqstatus;
1074 
1075 	irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
1076 
1077 	work_done = m_can_rx_handler(dev, quota, irqstatus);
1078 
1079 	/* Don't re-enable interrupts if the driver had a fatal error
1080 	 * (e.g., FIFO read failure).
1081 	 */
1082 	if (work_done >= 0 && work_done < quota) {
1083 		napi_complete_done(napi, work_done);
1084 		m_can_enable_all_interrupts(cdev);
1085 	}
1086 
1087 	return work_done;
1088 }
1089 
1090 /* Echo tx skb and update net stats. Peripherals use rx-offload for
1091  * echo. timestamp is used for peripherals to ensure correct ordering
1092  * by rx-offload, and is ignored for non-peripherals.
1093  */
m_can_tx_update_stats(struct m_can_classdev * cdev,unsigned int msg_mark,u32 timestamp)1094 static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
1095 					  unsigned int msg_mark, u32 timestamp)
1096 {
1097 	struct net_device *dev = cdev->net;
1098 	struct net_device_stats *stats = &dev->stats;
1099 	unsigned int frame_len;
1100 
1101 	if (cdev->is_peripheral)
1102 		stats->tx_bytes +=
1103 			can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
1104 								    msg_mark,
1105 								    timestamp,
1106 								    &frame_len);
1107 	else
1108 		stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
1109 
1110 	stats->tx_packets++;
1111 
1112 	return frame_len;
1113 }
1114 
m_can_finish_tx(struct m_can_classdev * cdev,int transmitted,unsigned int transmitted_frame_len)1115 static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
1116 			    unsigned int transmitted_frame_len)
1117 {
1118 	unsigned long irqflags;
1119 
1120 	netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
1121 
1122 	spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
1123 	if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
1124 		netif_wake_queue(cdev->net);
1125 	cdev->tx_fifo_in_flight -= transmitted;
1126 	spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
1127 }
1128 
m_can_start_tx(struct m_can_classdev * cdev)1129 static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
1130 {
1131 	unsigned long irqflags;
1132 	int tx_fifo_in_flight;
1133 
1134 	spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
1135 	tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
1136 	if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
1137 		netif_stop_queue(cdev->net);
1138 		if (tx_fifo_in_flight > cdev->tx_fifo_size) {
1139 			netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
1140 			spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
1141 			return NETDEV_TX_BUSY;
1142 		}
1143 	}
1144 	cdev->tx_fifo_in_flight = tx_fifo_in_flight;
1145 	spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
1146 
1147 	return NETDEV_TX_OK;
1148 }
1149 
m_can_echo_tx_event(struct net_device * dev)1150 static int m_can_echo_tx_event(struct net_device *dev)
1151 {
1152 	u32 txe_count = 0;
1153 	u32 m_can_txefs;
1154 	u32 fgi = 0;
1155 	int ack_fgi = -1;
1156 	int i = 0;
1157 	int err = 0;
1158 	unsigned int msg_mark;
1159 	int processed = 0;
1160 	unsigned int processed_frame_len = 0;
1161 
1162 	struct m_can_classdev *cdev = netdev_priv(dev);
1163 
1164 	/* read tx event fifo status */
1165 	m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
1166 
1167 	/* Get Tx Event fifo element count */
1168 	txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
1169 	fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs);
1170 
1171 	/* Get and process all sent elements */
1172 	for (i = 0; i < txe_count; i++) {
1173 		u32 txe, timestamp = 0;
1174 
1175 		/* get message marker, timestamp */
1176 		err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
1177 		if (err) {
1178 			netdev_err(dev, "TXE FIFO read returned %d\n", err);
1179 			break;
1180 		}
1181 
1182 		msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
1183 		timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
1184 
1185 		ack_fgi = fgi;
1186 		fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
1187 
1188 		/* update stats */
1189 		processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
1190 							     timestamp);
1191 
1192 		++processed;
1193 	}
1194 
1195 	if (ack_fgi != -1)
1196 		m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
1197 							  ack_fgi));
1198 
1199 	m_can_finish_tx(cdev, processed, processed_frame_len);
1200 
1201 	return err;
1202 }
1203 
m_can_coalescing_update(struct m_can_classdev * cdev,u32 ir)1204 static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
1205 {
1206 	u32 new_interrupts = cdev->active_interrupts;
1207 	bool enable_rx_timer = false;
1208 	bool enable_tx_timer = false;
1209 
1210 	if (!cdev->net->irq)
1211 		return;
1212 
1213 	if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
1214 		enable_rx_timer = true;
1215 		new_interrupts &= ~IR_RF0N;
1216 	}
1217 	if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
1218 		enable_tx_timer = true;
1219 		new_interrupts &= ~IR_TEFN;
1220 	}
1221 	if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
1222 		new_interrupts |= IR_RF0N;
1223 	if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
1224 		new_interrupts |= IR_TEFN;
1225 
1226 	m_can_interrupt_enable(cdev, new_interrupts);
1227 	if (enable_rx_timer | enable_tx_timer)
1228 		hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
1229 			      HRTIMER_MODE_REL);
1230 }
1231 
1232 /* This interrupt handler is called either from the interrupt thread or a
1233  * hrtimer. This has implications like cancelling a timer won't be possible
1234  * blocking.
1235  */
m_can_interrupt_handler(struct m_can_classdev * cdev)1236 static int m_can_interrupt_handler(struct m_can_classdev *cdev)
1237 {
1238 	struct net_device *dev = cdev->net;
1239 	u32 ir = 0, ir_read;
1240 	int ret;
1241 
1242 	if (pm_runtime_suspended(cdev->dev))
1243 		return IRQ_NONE;
1244 
1245 	/* The m_can controller signals its interrupt status as a level, but
1246 	 * depending in the integration the CPU may interpret the signal as
1247 	 * edge-triggered (for example with m_can_pci). For these
1248 	 * edge-triggered integrations, we must observe that IR is 0 at least
1249 	 * once to be sure that the next interrupt will generate an edge.
1250 	 */
1251 	while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
1252 		ir |= ir_read;
1253 
1254 		/* ACK all irqs */
1255 		m_can_write(cdev, M_CAN_IR, ir);
1256 
1257 		if (!cdev->irq_edge_triggered)
1258 			break;
1259 	}
1260 
1261 	m_can_coalescing_update(cdev, ir);
1262 	if (!ir)
1263 		return IRQ_NONE;
1264 
1265 	if (cdev->ops->clear_interrupts)
1266 		cdev->ops->clear_interrupts(cdev);
1267 
1268 	/* schedule NAPI in case of
1269 	 * - rx IRQ
1270 	 * - state change IRQ
1271 	 * - bus error IRQ and bus error reporting
1272 	 */
1273 	if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
1274 		cdev->irqstatus = ir;
1275 		if (!cdev->is_peripheral) {
1276 			m_can_disable_all_interrupts(cdev);
1277 			napi_schedule(&cdev->napi);
1278 		} else {
1279 			ret = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, ir);
1280 			if (ret < 0)
1281 				return ret;
1282 		}
1283 	}
1284 
1285 	if (cdev->version == 30) {
1286 		if (ir & IR_TC) {
1287 			/* Transmission Complete Interrupt*/
1288 			u32 timestamp = 0;
1289 			unsigned int frame_len;
1290 
1291 			if (cdev->is_peripheral)
1292 				timestamp = m_can_get_timestamp(cdev);
1293 			frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
1294 			m_can_finish_tx(cdev, 1, frame_len);
1295 		}
1296 	} else  {
1297 		if (ir & (IR_TEFN | IR_TEFW)) {
1298 			/* New TX FIFO Element arrived */
1299 			ret = m_can_echo_tx_event(dev);
1300 			if (ret != 0)
1301 				return ret;
1302 		}
1303 	}
1304 
1305 	if (cdev->is_peripheral)
1306 		can_rx_offload_threaded_irq_finish(&cdev->offload);
1307 
1308 	return IRQ_HANDLED;
1309 }
1310 
m_can_isr(int irq,void * dev_id)1311 static irqreturn_t m_can_isr(int irq, void *dev_id)
1312 {
1313 	struct net_device *dev = (struct net_device *)dev_id;
1314 	struct m_can_classdev *cdev = netdev_priv(dev);
1315 	int ret;
1316 
1317 	ret =  m_can_interrupt_handler(cdev);
1318 	if (ret < 0) {
1319 		m_can_disable_all_interrupts(cdev);
1320 		return IRQ_HANDLED;
1321 	}
1322 
1323 	return ret;
1324 }
1325 
m_can_coalescing_timer(struct hrtimer * timer)1326 static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
1327 {
1328 	struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
1329 
1330 	if (cdev->can.state == CAN_STATE_BUS_OFF ||
1331 	    cdev->can.state == CAN_STATE_STOPPED)
1332 		return HRTIMER_NORESTART;
1333 
1334 	irq_wake_thread(cdev->net->irq, cdev->net);
1335 
1336 	return HRTIMER_NORESTART;
1337 }
1338 
1339 static const struct can_bittiming_const m_can_bittiming_const_30X = {
1340 	.name = KBUILD_MODNAME,
1341 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
1342 	.tseg1_max = 64,
1343 	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
1344 	.tseg2_max = 16,
1345 	.sjw_max = 16,
1346 	.brp_min = 1,
1347 	.brp_max = 1024,
1348 	.brp_inc = 1,
1349 };
1350 
1351 static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
1352 	.name = KBUILD_MODNAME,
1353 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
1354 	.tseg1_max = 16,
1355 	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
1356 	.tseg2_max = 8,
1357 	.sjw_max = 4,
1358 	.brp_min = 1,
1359 	.brp_max = 32,
1360 	.brp_inc = 1,
1361 };
1362 
1363 static const struct can_bittiming_const m_can_bittiming_const_31X = {
1364 	.name = KBUILD_MODNAME,
1365 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
1366 	.tseg1_max = 256,
1367 	.tseg2_min = 2,		/* Time segment 2 = phase_seg2 */
1368 	.tseg2_max = 128,
1369 	.sjw_max = 128,
1370 	.brp_min = 1,
1371 	.brp_max = 512,
1372 	.brp_inc = 1,
1373 };
1374 
1375 static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
1376 	.name = KBUILD_MODNAME,
1377 	.tseg1_min = 1,		/* Time segment 1 = prop_seg + phase_seg1 */
1378 	.tseg1_max = 32,
1379 	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
1380 	.tseg2_max = 16,
1381 	.sjw_max = 16,
1382 	.brp_min = 1,
1383 	.brp_max = 32,
1384 	.brp_inc = 1,
1385 };
1386 
m_can_init_ram(struct m_can_classdev * cdev)1387 static int m_can_init_ram(struct m_can_classdev *cdev)
1388 {
1389 	int end, i, start;
1390 	int err = 0;
1391 
1392 	/* initialize the entire Message RAM in use to avoid possible
1393 	 * ECC/parity checksum errors when reading an uninitialized buffer
1394 	 */
1395 	start = cdev->mcfg[MRAM_SIDF].off;
1396 	end = cdev->mcfg[MRAM_TXB].off +
1397 		cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
1398 
1399 	for (i = start; i < end; i += 4) {
1400 		err = m_can_fifo_write_no_off(cdev, i, 0x0);
1401 		if (err)
1402 			break;
1403 	}
1404 
1405 	return err;
1406 }
1407 
m_can_set_bittiming(struct net_device * dev)1408 static int m_can_set_bittiming(struct net_device *dev)
1409 {
1410 	struct m_can_classdev *cdev = netdev_priv(dev);
1411 	const struct can_bittiming *bt = &cdev->can.bittiming;
1412 	const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
1413 	u16 brp, sjw, tseg1, tseg2;
1414 	u32 reg_btp;
1415 
1416 	brp = bt->brp - 1;
1417 	sjw = bt->sjw - 1;
1418 	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
1419 	tseg2 = bt->phase_seg2 - 1;
1420 	reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
1421 		  FIELD_PREP(NBTP_NSJW_MASK, sjw) |
1422 		  FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
1423 		  FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
1424 	m_can_write(cdev, M_CAN_NBTP, reg_btp);
1425 
1426 	if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
1427 		reg_btp = 0;
1428 		brp = dbt->brp - 1;
1429 		sjw = dbt->sjw - 1;
1430 		tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
1431 		tseg2 = dbt->phase_seg2 - 1;
1432 
1433 		/* TDC is only needed for bitrates beyond 2.5 MBit/s.
1434 		 * This is mentioned in the "Bit Time Requirements for CAN FD"
1435 		 * paper presented at the International CAN Conference 2013
1436 		 */
1437 		if (dbt->bitrate > 2500000) {
1438 			u32 tdco, ssp;
1439 
1440 			/* Use the same value of secondary sampling point
1441 			 * as the data sampling point
1442 			 */
1443 			ssp = dbt->sample_point;
1444 
1445 			/* Equation based on Bosch's M_CAN User Manual's
1446 			 * Transmitter Delay Compensation Section
1447 			 */
1448 			tdco = (cdev->can.clock.freq / 1000) *
1449 				ssp / dbt->bitrate;
1450 
1451 			/* Max valid TDCO value is 127 */
1452 			if (tdco > 127) {
1453 				netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
1454 					    tdco);
1455 				tdco = 127;
1456 			}
1457 
1458 			reg_btp |= DBTP_TDC;
1459 			m_can_write(cdev, M_CAN_TDCR,
1460 				    FIELD_PREP(TDCR_TDCO_MASK, tdco));
1461 		}
1462 
1463 		reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
1464 			FIELD_PREP(DBTP_DSJW_MASK, sjw) |
1465 			FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
1466 			FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
1467 
1468 		m_can_write(cdev, M_CAN_DBTP, reg_btp);
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 /* Configure M_CAN chip:
1475  * - set rx buffer/fifo element size
1476  * - configure rx fifo
1477  * - accept non-matching frame into fifo 0
1478  * - configure tx buffer
1479  *		- >= v3.1.x: TX FIFO is used
1480  * - configure mode
1481  * - setup bittiming
1482  * - configure timestamp generation
1483  */
m_can_chip_config(struct net_device * dev)1484 static int m_can_chip_config(struct net_device *dev)
1485 {
1486 	struct m_can_classdev *cdev = netdev_priv(dev);
1487 	u32 interrupts = IR_ALL_INT;
1488 	u32 cccr, test;
1489 	int err;
1490 
1491 	err = m_can_init_ram(cdev);
1492 	if (err) {
1493 		netdev_err(dev, "Message RAM configuration failed\n");
1494 		return err;
1495 	}
1496 
1497 	/* Disable unused interrupts */
1498 	interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
1499 			IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F |
1500 			IR_TSW);
1501 
1502 	err = m_can_config_enable(cdev);
1503 	if (err)
1504 		return err;
1505 
1506 	/* RX Buffer/FIFO Element Size 64 bytes data field */
1507 	m_can_write(cdev, M_CAN_RXESC,
1508 		    FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) |
1509 		    FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) |
1510 		    FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B));
1511 
1512 	/* Accept Non-matching Frames Into FIFO 0 */
1513 	m_can_write(cdev, M_CAN_GFC, 0x0);
1514 
1515 	if (cdev->version == 30) {
1516 		/* only support one Tx Buffer currently */
1517 		m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) |
1518 			    cdev->mcfg[MRAM_TXB].off);
1519 	} else {
1520 		/* TX FIFO is used for newer IP Core versions */
1521 		m_can_write(cdev, M_CAN_TXBC,
1522 			    FIELD_PREP(TXBC_TFQS_MASK,
1523 				       cdev->mcfg[MRAM_TXB].num) |
1524 			    cdev->mcfg[MRAM_TXB].off);
1525 	}
1526 
1527 	/* support 64 bytes payload */
1528 	m_can_write(cdev, M_CAN_TXESC,
1529 		    FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B));
1530 
1531 	/* TX Event FIFO */
1532 	if (cdev->version == 30) {
1533 		m_can_write(cdev, M_CAN_TXEFC,
1534 			    FIELD_PREP(TXEFC_EFS_MASK, 1) |
1535 			    cdev->mcfg[MRAM_TXE].off);
1536 	} else {
1537 		/* Full TX Event FIFO is used */
1538 		m_can_write(cdev, M_CAN_TXEFC,
1539 			    FIELD_PREP(TXEFC_EFWM_MASK,
1540 				       cdev->tx_max_coalesced_frames_irq) |
1541 			    FIELD_PREP(TXEFC_EFS_MASK,
1542 				       cdev->mcfg[MRAM_TXE].num) |
1543 			    cdev->mcfg[MRAM_TXE].off);
1544 	}
1545 
1546 	/* rx fifo configuration, blocking mode, fifo size 1 */
1547 	m_can_write(cdev, M_CAN_RXF0C,
1548 		    FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
1549 		    FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
1550 		    cdev->mcfg[MRAM_RXF0].off);
1551 
1552 	m_can_write(cdev, M_CAN_RXF1C,
1553 		    FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) |
1554 		    cdev->mcfg[MRAM_RXF1].off);
1555 
1556 	cccr = m_can_read(cdev, M_CAN_CCCR);
1557 	test = m_can_read(cdev, M_CAN_TEST);
1558 	test &= ~TEST_LBCK;
1559 	if (cdev->version == 30) {
1560 		/* Version 3.0.x */
1561 
1562 		cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
1563 			  FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) |
1564 			  FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK)));
1565 
1566 		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
1567 			cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS);
1568 
1569 	} else {
1570 		/* Version 3.1.x or 3.2.x */
1571 		cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1572 			  CCCR_NISO | CCCR_DAR);
1573 
1574 		/* Only 3.2.x has NISO Bit implemented */
1575 		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
1576 			cccr |= CCCR_NISO;
1577 
1578 		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
1579 			cccr |= (CCCR_BRSE | CCCR_FDOE);
1580 	}
1581 
1582 	/* Loopback Mode */
1583 	if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
1584 		cccr |= CCCR_TEST | CCCR_MON;
1585 		test |= TEST_LBCK;
1586 	}
1587 
1588 	/* Enable Monitoring (all versions) */
1589 	if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1590 		cccr |= CCCR_MON;
1591 
1592 	/* Disable Auto Retransmission (all versions) */
1593 	if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
1594 		cccr |= CCCR_DAR;
1595 
1596 	/* Write config */
1597 	m_can_write(cdev, M_CAN_CCCR, cccr);
1598 	m_can_write(cdev, M_CAN_TEST, test);
1599 
1600 	/* Enable interrupts */
1601 	if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
1602 		if (cdev->version == 30)
1603 			interrupts &= ~(IR_ERR_LEC_30X);
1604 		else
1605 			interrupts &= ~(IR_ERR_LEC_31X);
1606 	}
1607 	cdev->active_interrupts = 0;
1608 	m_can_interrupt_enable(cdev, interrupts);
1609 
1610 	/* route all interrupts to INT0 */
1611 	m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
1612 
1613 	/* set bittiming params */
1614 	m_can_set_bittiming(dev);
1615 
1616 	/* enable internal timestamp generation, with a prescaler of 16. The
1617 	 * prescaler is applied to the nominal bit timing
1618 	 */
1619 	m_can_write(cdev, M_CAN_TSCC,
1620 		    FIELD_PREP(TSCC_TCP_MASK, 0xf) |
1621 		    FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
1622 
1623 	err = m_can_config_disable(cdev);
1624 	if (err)
1625 		return err;
1626 
1627 	if (cdev->ops->init)
1628 		cdev->ops->init(cdev);
1629 
1630 	return 0;
1631 }
1632 
m_can_start(struct net_device * dev)1633 static int m_can_start(struct net_device *dev)
1634 {
1635 	struct m_can_classdev *cdev = netdev_priv(dev);
1636 	int ret;
1637 
1638 	/* basic m_can configuration */
1639 	ret = m_can_chip_config(dev);
1640 	if (ret)
1641 		return ret;
1642 
1643 	netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
1644 				       cdev->tx_max_coalesced_frames);
1645 
1646 	cdev->can.state = m_can_state_get_by_psr(cdev);
1647 
1648 	m_can_enable_all_interrupts(cdev);
1649 
1650 	if (cdev->version > 30)
1651 		cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
1652 						 m_can_read(cdev, M_CAN_TXFQS));
1653 
1654 	ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
1655 	if (ret)
1656 		netdev_err(dev, "failed to enter normal mode\n");
1657 
1658 	return ret;
1659 }
1660 
m_can_set_mode(struct net_device * dev,enum can_mode mode)1661 static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
1662 {
1663 	switch (mode) {
1664 	case CAN_MODE_START:
1665 		m_can_clean(dev);
1666 		m_can_start(dev);
1667 		netif_wake_queue(dev);
1668 		break;
1669 	default:
1670 		return -EOPNOTSUPP;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 /* Checks core release number of M_CAN
1677  * returns 0 if an unsupported device is detected
1678  * else it returns the release and step coded as:
1679  * return value = 10 * <release> + 1 * <step>
1680  */
m_can_check_core_release(struct m_can_classdev * cdev)1681 static int m_can_check_core_release(struct m_can_classdev *cdev)
1682 {
1683 	u32 crel_reg;
1684 	u8 rel;
1685 	u8 step;
1686 	int res;
1687 
1688 	/* Read Core Release Version and split into version number
1689 	 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
1690 	 */
1691 	crel_reg = m_can_read(cdev, M_CAN_CREL);
1692 	rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg);
1693 	step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg);
1694 
1695 	if (rel == 3) {
1696 		/* M_CAN v3.x.y: create return value */
1697 		res = 30 + step;
1698 	} else {
1699 		/* Unsupported M_CAN version */
1700 		res = 0;
1701 	}
1702 
1703 	return res;
1704 }
1705 
1706 /* Selectable Non ISO support only in version 3.2.x
1707  * Return 1 if the bit is writable, 0 if it is not, or negative on error.
1708  */
m_can_niso_supported(struct m_can_classdev * cdev)1709 static int m_can_niso_supported(struct m_can_classdev *cdev)
1710 {
1711 	int ret, niso;
1712 
1713 	ret = m_can_config_enable(cdev);
1714 	if (ret)
1715 		return ret;
1716 
1717 	/* First try to set the NISO bit. */
1718 	niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
1719 
1720 	/* Then clear the it again. */
1721 	ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
1722 	if (ret) {
1723 		netdev_err(cdev->net, "failed to revert the NON-ISO bit in CCCR\n");
1724 		return ret;
1725 	}
1726 
1727 	ret = m_can_config_disable(cdev);
1728 	if (ret)
1729 		return ret;
1730 
1731 	return niso == 0;
1732 }
1733 
m_can_dev_setup(struct m_can_classdev * cdev)1734 static int m_can_dev_setup(struct m_can_classdev *cdev)
1735 {
1736 	struct net_device *dev = cdev->net;
1737 	int m_can_version, err, niso;
1738 
1739 	m_can_version = m_can_check_core_release(cdev);
1740 	/* return if unsupported version */
1741 	if (!m_can_version) {
1742 		netdev_err(cdev->net, "Unsupported version number: %2d",
1743 			   m_can_version);
1744 		return -EINVAL;
1745 	}
1746 
1747 	/* Write the INIT bit, in case no hardware reset has happened before
1748 	 * the probe (for example, it was observed that the Intel Elkhart Lake
1749 	 * SoCs do not properly reset the CAN controllers on reboot)
1750 	 */
1751 	err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
1752 	if (err)
1753 		return err;
1754 
1755 	if (!cdev->is_peripheral)
1756 		netif_napi_add(dev, &cdev->napi, m_can_poll);
1757 
1758 	/* Shared properties of all M_CAN versions */
1759 	cdev->version = m_can_version;
1760 	cdev->can.do_set_mode = m_can_set_mode;
1761 	cdev->can.do_get_berr_counter = m_can_get_berr_counter;
1762 
1763 	/* Set M_CAN supported operations */
1764 	cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1765 		CAN_CTRLMODE_LISTENONLY |
1766 		CAN_CTRLMODE_BERR_REPORTING |
1767 		CAN_CTRLMODE_FD |
1768 		CAN_CTRLMODE_ONE_SHOT;
1769 
1770 	/* Set properties depending on M_CAN version */
1771 	switch (cdev->version) {
1772 	case 30:
1773 		/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
1774 		err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1775 		if (err)
1776 			return err;
1777 		cdev->can.bittiming_const = &m_can_bittiming_const_30X;
1778 		cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
1779 		break;
1780 	case 31:
1781 		/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
1782 		err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
1783 		if (err)
1784 			return err;
1785 		cdev->can.bittiming_const = &m_can_bittiming_const_31X;
1786 		cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
1787 		break;
1788 	case 32:
1789 	case 33:
1790 		/* Support both MCAN version v3.2.x and v3.3.0 */
1791 		cdev->can.bittiming_const = &m_can_bittiming_const_31X;
1792 		cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
1793 
1794 		niso = m_can_niso_supported(cdev);
1795 		if (niso < 0)
1796 			return niso;
1797 		if (niso)
1798 			cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
1799 		break;
1800 	default:
1801 		netdev_err(cdev->net, "Unsupported version number: %2d",
1802 			   cdev->version);
1803 		return -EINVAL;
1804 	}
1805 
1806 	return 0;
1807 }
1808 
m_can_stop(struct net_device * dev)1809 static void m_can_stop(struct net_device *dev)
1810 {
1811 	struct m_can_classdev *cdev = netdev_priv(dev);
1812 	int ret;
1813 
1814 	/* disable all interrupts */
1815 	m_can_disable_all_interrupts(cdev);
1816 
1817 	/* Set init mode to disengage from the network */
1818 	ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
1819 	if (ret)
1820 		netdev_err(dev, "failed to enter standby mode: %pe\n",
1821 			   ERR_PTR(ret));
1822 
1823 	/* set the state as STOPPED */
1824 	cdev->can.state = CAN_STATE_STOPPED;
1825 
1826 	if (cdev->ops->deinit) {
1827 		ret = cdev->ops->deinit(cdev);
1828 		if (ret)
1829 			netdev_err(dev, "failed to deinitialize: %pe\n",
1830 				   ERR_PTR(ret));
1831 	}
1832 }
1833 
m_can_close(struct net_device * dev)1834 static int m_can_close(struct net_device *dev)
1835 {
1836 	struct m_can_classdev *cdev = netdev_priv(dev);
1837 
1838 	netif_stop_queue(dev);
1839 
1840 	m_can_stop(dev);
1841 	if (dev->irq)
1842 		free_irq(dev->irq, dev);
1843 
1844 	m_can_clean(dev);
1845 
1846 	if (cdev->is_peripheral) {
1847 		destroy_workqueue(cdev->tx_wq);
1848 		cdev->tx_wq = NULL;
1849 		can_rx_offload_disable(&cdev->offload);
1850 	} else {
1851 		napi_disable(&cdev->napi);
1852 	}
1853 
1854 	close_candev(dev);
1855 
1856 	reset_control_assert(cdev->rst);
1857 	m_can_clk_stop(cdev);
1858 	phy_power_off(cdev->transceiver);
1859 
1860 	return 0;
1861 }
1862 
m_can_tx_handler(struct m_can_classdev * cdev,struct sk_buff * skb)1863 static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
1864 				    struct sk_buff *skb)
1865 {
1866 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1867 	u8 len_padded = DIV_ROUND_UP(cf->len, 4);
1868 	struct m_can_fifo_element fifo_element;
1869 	struct net_device *dev = cdev->net;
1870 	u32 cccr, fdflags;
1871 	int err;
1872 	u32 putidx;
1873 	unsigned int frame_len = can_skb_get_frame_len(skb);
1874 
1875 	/* Generate ID field for TX buffer Element */
1876 	/* Common to all supported M_CAN versions */
1877 	if (cf->can_id & CAN_EFF_FLAG) {
1878 		fifo_element.id = cf->can_id & CAN_EFF_MASK;
1879 		fifo_element.id |= TX_BUF_XTD;
1880 	} else {
1881 		fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
1882 	}
1883 
1884 	if (cf->can_id & CAN_RTR_FLAG)
1885 		fifo_element.id |= TX_BUF_RTR;
1886 
1887 	if (cdev->version == 30) {
1888 		netif_stop_queue(dev);
1889 
1890 		fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
1891 
1892 		/* Write the frame ID, DLC, and payload to the FIFO element. */
1893 		err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
1894 		if (err)
1895 			goto out_fail;
1896 
1897 		err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
1898 				       cf->data, len_padded);
1899 		if (err)
1900 			goto out_fail;
1901 
1902 		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
1903 			cccr = m_can_read(cdev, M_CAN_CCCR);
1904 			cccr &= ~CCCR_CMR_MASK;
1905 			if (can_is_canfd_skb(skb)) {
1906 				if (cf->flags & CANFD_BRS)
1907 					cccr |= FIELD_PREP(CCCR_CMR_MASK,
1908 							   CCCR_CMR_CANFD_BRS);
1909 				else
1910 					cccr |= FIELD_PREP(CCCR_CMR_MASK,
1911 							   CCCR_CMR_CANFD);
1912 			} else {
1913 				cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN);
1914 			}
1915 			m_can_write(cdev, M_CAN_CCCR, cccr);
1916 		}
1917 		m_can_write(cdev, M_CAN_TXBTIE, 0x1);
1918 
1919 		can_put_echo_skb(skb, dev, 0, frame_len);
1920 
1921 		m_can_write(cdev, M_CAN_TXBAR, 0x1);
1922 		/* End of xmit function for version 3.0.x */
1923 	} else {
1924 		/* Transmit routine for version >= v3.1.x */
1925 
1926 		/* get put index for frame */
1927 		putidx = cdev->tx_fifo_putidx;
1928 
1929 		/* Construct DLC Field, with CAN-FD configuration.
1930 		 * Use the put index of the fifo as the message marker,
1931 		 * used in the TX interrupt for sending the correct echo frame.
1932 		 */
1933 
1934 		/* get CAN FD configuration of frame */
1935 		fdflags = 0;
1936 		if (can_is_canfd_skb(skb)) {
1937 			fdflags |= TX_BUF_FDF;
1938 			if (cf->flags & CANFD_BRS)
1939 				fdflags |= TX_BUF_BRS;
1940 		}
1941 
1942 		fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
1943 			FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
1944 			fdflags | TX_BUF_EFC;
1945 
1946 		memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
1947 			       cf->len, 0);
1948 
1949 		err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
1950 				       &fifo_element, 2 + len_padded);
1951 		if (err)
1952 			goto out_fail;
1953 
1954 		/* Push loopback echo.
1955 		 * Will be looped back on TX interrupt based on message marker
1956 		 */
1957 		can_put_echo_skb(skb, dev, putidx, frame_len);
1958 
1959 		if (cdev->is_peripheral) {
1960 			/* Delay enabling TX FIFO element */
1961 			cdev->tx_peripheral_submit |= BIT(putidx);
1962 		} else {
1963 			/* Enable TX FIFO element to start transfer  */
1964 			m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
1965 		}
1966 		cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
1967 					0 : cdev->tx_fifo_putidx);
1968 	}
1969 
1970 	return NETDEV_TX_OK;
1971 
1972 out_fail:
1973 	netdev_err(dev, "FIFO write returned %d\n", err);
1974 	m_can_disable_all_interrupts(cdev);
1975 	return NETDEV_TX_BUSY;
1976 }
1977 
m_can_tx_submit(struct m_can_classdev * cdev)1978 static void m_can_tx_submit(struct m_can_classdev *cdev)
1979 {
1980 	m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
1981 	cdev->tx_peripheral_submit = 0;
1982 }
1983 
m_can_tx_work_queue(struct work_struct * ws)1984 static void m_can_tx_work_queue(struct work_struct *ws)
1985 {
1986 	struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
1987 	struct m_can_classdev *cdev = op->cdev;
1988 	struct sk_buff *skb = op->skb;
1989 
1990 	op->skb = NULL;
1991 	m_can_tx_handler(cdev, skb);
1992 	if (op->submit)
1993 		m_can_tx_submit(cdev);
1994 }
1995 
m_can_tx_queue_skb(struct m_can_classdev * cdev,struct sk_buff * skb,bool submit)1996 static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
1997 			       bool submit)
1998 {
1999 	cdev->tx_ops[cdev->next_tx_op].skb = skb;
2000 	cdev->tx_ops[cdev->next_tx_op].submit = submit;
2001 	queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
2002 
2003 	++cdev->next_tx_op;
2004 	if (cdev->next_tx_op >= cdev->tx_fifo_size)
2005 		cdev->next_tx_op = 0;
2006 }
2007 
m_can_start_peripheral_xmit(struct m_can_classdev * cdev,struct sk_buff * skb)2008 static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
2009 					       struct sk_buff *skb)
2010 {
2011 	bool submit;
2012 
2013 	++cdev->nr_txs_without_submit;
2014 	if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
2015 	    !netdev_xmit_more()) {
2016 		cdev->nr_txs_without_submit = 0;
2017 		submit = true;
2018 	} else {
2019 		submit = false;
2020 	}
2021 	m_can_tx_queue_skb(cdev, skb, submit);
2022 
2023 	return NETDEV_TX_OK;
2024 }
2025 
m_can_start_xmit(struct sk_buff * skb,struct net_device * dev)2026 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
2027 				    struct net_device *dev)
2028 {
2029 	struct m_can_classdev *cdev = netdev_priv(dev);
2030 	unsigned int frame_len;
2031 	netdev_tx_t ret;
2032 
2033 	if (can_dev_dropped_skb(dev, skb))
2034 		return NETDEV_TX_OK;
2035 
2036 	frame_len = can_skb_get_frame_len(skb);
2037 
2038 	if (cdev->can.state == CAN_STATE_BUS_OFF) {
2039 		m_can_clean(cdev->net);
2040 		return NETDEV_TX_OK;
2041 	}
2042 
2043 	ret = m_can_start_tx(cdev);
2044 	if (ret != NETDEV_TX_OK)
2045 		return ret;
2046 
2047 	netdev_sent_queue(dev, frame_len);
2048 
2049 	if (cdev->is_peripheral)
2050 		ret = m_can_start_peripheral_xmit(cdev, skb);
2051 	else
2052 		ret = m_can_tx_handler(cdev, skb);
2053 
2054 	if (ret != NETDEV_TX_OK)
2055 		netdev_completed_queue(dev, 1, frame_len);
2056 
2057 	return ret;
2058 }
2059 
m_can_polling_timer(struct hrtimer * timer)2060 static enum hrtimer_restart m_can_polling_timer(struct hrtimer *timer)
2061 {
2062 	struct m_can_classdev *cdev = container_of(timer, struct
2063 						   m_can_classdev, hrtimer);
2064 	int ret;
2065 
2066 	if (cdev->can.state == CAN_STATE_BUS_OFF ||
2067 	    cdev->can.state == CAN_STATE_STOPPED)
2068 		return HRTIMER_NORESTART;
2069 
2070 	ret = m_can_interrupt_handler(cdev);
2071 
2072 	/* On error or if napi is scheduled to read, stop the timer */
2073 	if (ret < 0 || napi_is_scheduled(&cdev->napi))
2074 		return HRTIMER_NORESTART;
2075 
2076 	hrtimer_forward_now(timer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS));
2077 
2078 	return HRTIMER_RESTART;
2079 }
2080 
m_can_open(struct net_device * dev)2081 static int m_can_open(struct net_device *dev)
2082 {
2083 	struct m_can_classdev *cdev = netdev_priv(dev);
2084 	int err;
2085 
2086 	err = phy_power_on(cdev->transceiver);
2087 	if (err)
2088 		return err;
2089 
2090 	err = m_can_clk_start(cdev);
2091 	if (err)
2092 		goto out_phy_power_off;
2093 
2094 	err = reset_control_deassert(cdev->rst);
2095 	if (err)
2096 		goto exit_disable_clks;
2097 
2098 	/* open the can device */
2099 	err = open_candev(dev);
2100 	if (err) {
2101 		netdev_err(dev, "failed to open can device\n");
2102 		goto out_reset_control_assert;
2103 	}
2104 
2105 	if (cdev->is_peripheral)
2106 		can_rx_offload_enable(&cdev->offload);
2107 	else
2108 		napi_enable(&cdev->napi);
2109 
2110 	/* register interrupt handler */
2111 	if (cdev->is_peripheral) {
2112 		cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
2113 						      WQ_FREEZABLE | WQ_MEM_RECLAIM);
2114 		if (!cdev->tx_wq) {
2115 			err = -ENOMEM;
2116 			goto out_wq_fail;
2117 		}
2118 
2119 		for (int i = 0; i != cdev->tx_fifo_size; ++i) {
2120 			cdev->tx_ops[i].cdev = cdev;
2121 			INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
2122 		}
2123 
2124 		err = request_threaded_irq(dev->irq, NULL, m_can_isr,
2125 					   IRQF_ONESHOT,
2126 					   dev->name, dev);
2127 	} else if (dev->irq) {
2128 		err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
2129 				  dev);
2130 	}
2131 
2132 	if (err < 0) {
2133 		netdev_err(dev, "failed to request interrupt\n");
2134 		goto exit_irq_fail;
2135 	}
2136 
2137 	/* start the m_can controller */
2138 	err = m_can_start(dev);
2139 	if (err)
2140 		goto exit_start_fail;
2141 
2142 	netif_start_queue(dev);
2143 
2144 	return 0;
2145 
2146 exit_start_fail:
2147 	if (cdev->is_peripheral || dev->irq)
2148 		free_irq(dev->irq, dev);
2149 exit_irq_fail:
2150 	if (cdev->is_peripheral)
2151 		destroy_workqueue(cdev->tx_wq);
2152 out_wq_fail:
2153 	if (cdev->is_peripheral)
2154 		can_rx_offload_disable(&cdev->offload);
2155 	else
2156 		napi_disable(&cdev->napi);
2157 	close_candev(dev);
2158 out_reset_control_assert:
2159 	reset_control_assert(cdev->rst);
2160 exit_disable_clks:
2161 	m_can_clk_stop(cdev);
2162 out_phy_power_off:
2163 	phy_power_off(cdev->transceiver);
2164 	return err;
2165 }
2166 
2167 static const struct net_device_ops m_can_netdev_ops = {
2168 	.ndo_open = m_can_open,
2169 	.ndo_stop = m_can_close,
2170 	.ndo_start_xmit = m_can_start_xmit,
2171 };
2172 
m_can_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * ext_ack)2173 static int m_can_get_coalesce(struct net_device *dev,
2174 			      struct ethtool_coalesce *ec,
2175 			      struct kernel_ethtool_coalesce *kec,
2176 			      struct netlink_ext_ack *ext_ack)
2177 {
2178 	struct m_can_classdev *cdev = netdev_priv(dev);
2179 
2180 	ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
2181 	ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
2182 	ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
2183 	ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
2184 	ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
2185 
2186 	return 0;
2187 }
2188 
m_can_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kec,struct netlink_ext_ack * ext_ack)2189 static int m_can_set_coalesce(struct net_device *dev,
2190 			      struct ethtool_coalesce *ec,
2191 			      struct kernel_ethtool_coalesce *kec,
2192 			      struct netlink_ext_ack *ext_ack)
2193 {
2194 	struct m_can_classdev *cdev = netdev_priv(dev);
2195 
2196 	if (cdev->can.state != CAN_STATE_STOPPED) {
2197 		netdev_err(dev, "Device is in use, please shut it down first\n");
2198 		return -EBUSY;
2199 	}
2200 
2201 	if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
2202 		netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
2203 			   ec->rx_max_coalesced_frames_irq,
2204 			   cdev->mcfg[MRAM_RXF0].num);
2205 		return -EINVAL;
2206 	}
2207 	if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
2208 		netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
2209 		return -EINVAL;
2210 	}
2211 	if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
2212 		netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
2213 			   ec->tx_max_coalesced_frames_irq,
2214 			   cdev->mcfg[MRAM_TXE].num);
2215 		return -EINVAL;
2216 	}
2217 	if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
2218 		netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
2219 			   ec->tx_max_coalesced_frames_irq,
2220 			   cdev->mcfg[MRAM_TXB].num);
2221 		return -EINVAL;
2222 	}
2223 	if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
2224 		netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
2225 		return -EINVAL;
2226 	}
2227 	if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
2228 		netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
2229 			   ec->tx_max_coalesced_frames,
2230 			   cdev->mcfg[MRAM_TXE].num);
2231 		return -EINVAL;
2232 	}
2233 	if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
2234 		netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
2235 			   ec->tx_max_coalesced_frames,
2236 			   cdev->mcfg[MRAM_TXB].num);
2237 		return -EINVAL;
2238 	}
2239 	if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
2240 	    ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
2241 		netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
2242 			   ec->rx_coalesce_usecs_irq,
2243 			   ec->tx_coalesce_usecs_irq);
2244 		return -EINVAL;
2245 	}
2246 
2247 	cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
2248 	cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
2249 	cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
2250 	cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
2251 	cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
2252 
2253 	if (cdev->rx_coalesce_usecs_irq)
2254 		cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
2255 	else
2256 		cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
2257 
2258 	return 0;
2259 }
2260 
m_can_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2261 static void m_can_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2262 {
2263 	struct m_can_classdev *cdev = netdev_priv(dev);
2264 
2265 	wol->supported = device_can_wakeup(cdev->dev) ? WAKE_PHY : 0;
2266 	wol->wolopts = device_may_wakeup(cdev->dev) ? WAKE_PHY : 0;
2267 }
2268 
m_can_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2269 static int m_can_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2270 {
2271 	struct m_can_classdev *cdev = netdev_priv(dev);
2272 	bool wol_enable = !!(wol->wolopts & WAKE_PHY);
2273 	int ret;
2274 
2275 	if (wol->wolopts & ~WAKE_PHY)
2276 		return -EINVAL;
2277 
2278 	if (wol_enable == device_may_wakeup(cdev->dev))
2279 		return 0;
2280 
2281 	ret = device_set_wakeup_enable(cdev->dev, wol_enable);
2282 	if (ret) {
2283 		netdev_err(cdev->net, "Failed to set wakeup enable %pE\n",
2284 			   ERR_PTR(ret));
2285 		return ret;
2286 	}
2287 
2288 	if (!IS_ERR_OR_NULL(cdev->pinctrl_state_wakeup)) {
2289 		if (wol_enable)
2290 			ret = pinctrl_select_state(cdev->pinctrl, cdev->pinctrl_state_wakeup);
2291 		else
2292 			ret = pinctrl_pm_select_default_state(cdev->dev);
2293 
2294 		if (ret) {
2295 			netdev_err(cdev->net, "Failed to select pinctrl state %pE\n",
2296 				   ERR_PTR(ret));
2297 			goto err_wakeup_enable;
2298 		}
2299 	}
2300 
2301 	return 0;
2302 
2303 err_wakeup_enable:
2304 	/* Revert wakeup enable */
2305 	device_set_wakeup_enable(cdev->dev, !wol_enable);
2306 
2307 	return ret;
2308 }
2309 
2310 static const struct ethtool_ops m_can_ethtool_ops_coalescing = {
2311 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
2312 		ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
2313 		ETHTOOL_COALESCE_TX_USECS_IRQ |
2314 		ETHTOOL_COALESCE_TX_MAX_FRAMES |
2315 		ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
2316 	.get_ts_info = ethtool_op_get_ts_info,
2317 	.get_coalesce = m_can_get_coalesce,
2318 	.set_coalesce = m_can_set_coalesce,
2319 	.get_wol = m_can_get_wol,
2320 	.set_wol = m_can_set_wol,
2321 };
2322 
2323 static const struct ethtool_ops m_can_ethtool_ops = {
2324 	.get_ts_info = ethtool_op_get_ts_info,
2325 	.get_wol = m_can_get_wol,
2326 	.set_wol = m_can_set_wol,
2327 };
2328 
register_m_can_dev(struct m_can_classdev * cdev)2329 static int register_m_can_dev(struct m_can_classdev *cdev)
2330 {
2331 	struct net_device *dev = cdev->net;
2332 
2333 	dev->flags |= IFF_ECHO;	/* we support local echo */
2334 	dev->netdev_ops = &m_can_netdev_ops;
2335 	if (dev->irq && cdev->is_peripheral)
2336 		dev->ethtool_ops = &m_can_ethtool_ops_coalescing;
2337 	else
2338 		dev->ethtool_ops = &m_can_ethtool_ops;
2339 
2340 	return register_candev(dev);
2341 }
2342 
m_can_check_mram_cfg(struct m_can_classdev * cdev,u32 mram_max_size)2343 int m_can_check_mram_cfg(struct m_can_classdev *cdev, u32 mram_max_size)
2344 {
2345 	u32 total_size;
2346 
2347 	total_size = cdev->mcfg[MRAM_TXB].off - cdev->mcfg[MRAM_SIDF].off +
2348 			cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
2349 	if (total_size > mram_max_size) {
2350 		netdev_err(cdev->net, "Total size of mram config(%u) exceeds mram(%u)\n",
2351 			   total_size, mram_max_size);
2352 		return -EINVAL;
2353 	}
2354 
2355 	return 0;
2356 }
2357 EXPORT_SYMBOL_GPL(m_can_check_mram_cfg);
2358 
m_can_of_parse_mram(struct m_can_classdev * cdev,const u32 * mram_config_vals)2359 static void m_can_of_parse_mram(struct m_can_classdev *cdev,
2360 				const u32 *mram_config_vals)
2361 {
2362 	cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
2363 	cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
2364 	cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
2365 		cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
2366 	cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
2367 	cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
2368 		cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
2369 	cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
2370 		FIELD_MAX(RXFC_FS_MASK);
2371 	cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
2372 		cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
2373 	cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
2374 		FIELD_MAX(RXFC_FS_MASK);
2375 	cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
2376 		cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
2377 	cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
2378 	cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
2379 		cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
2380 	cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
2381 	cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
2382 		cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
2383 	cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
2384 		FIELD_MAX(TXBC_NDTB_MASK);
2385 
2386 	netdev_dbg(cdev->net,
2387 		   "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
2388 		   cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
2389 		   cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
2390 		   cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
2391 		   cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
2392 		   cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
2393 		   cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
2394 		   cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
2395 }
2396 
m_can_class_get_clocks(struct m_can_classdev * cdev)2397 int m_can_class_get_clocks(struct m_can_classdev *cdev)
2398 {
2399 	int ret = 0;
2400 
2401 	cdev->hclk = devm_clk_get(cdev->dev, "hclk");
2402 	cdev->cclk = devm_clk_get(cdev->dev, "cclk");
2403 
2404 	if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
2405 		netdev_err(cdev->net, "no clock found\n");
2406 		ret = -ENODEV;
2407 	}
2408 
2409 	return ret;
2410 }
2411 EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
2412 
m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev * class_dev)2413 static bool m_can_class_wakeup_pinctrl_enabled(struct m_can_classdev *class_dev)
2414 {
2415 	return device_may_wakeup(class_dev->dev) && class_dev->pinctrl_state_wakeup;
2416 }
2417 
m_can_class_parse_pinctrl(struct m_can_classdev * class_dev)2418 static int m_can_class_parse_pinctrl(struct m_can_classdev *class_dev)
2419 {
2420 	struct device *dev = class_dev->dev;
2421 	int ret;
2422 
2423 	class_dev->pinctrl = devm_pinctrl_get(dev);
2424 	if (IS_ERR(class_dev->pinctrl)) {
2425 		ret = PTR_ERR(class_dev->pinctrl);
2426 		class_dev->pinctrl = NULL;
2427 
2428 		if (ret == -ENODEV)
2429 			return 0;
2430 
2431 		return dev_err_probe(dev, ret, "Failed to get pinctrl\n");
2432 	}
2433 
2434 	class_dev->pinctrl_state_wakeup =
2435 		pinctrl_lookup_state(class_dev->pinctrl, "wakeup");
2436 	if (IS_ERR(class_dev->pinctrl_state_wakeup)) {
2437 		ret = PTR_ERR(class_dev->pinctrl_state_wakeup);
2438 		class_dev->pinctrl_state_wakeup = NULL;
2439 
2440 		if (ret == -ENODEV)
2441 			return 0;
2442 
2443 		return dev_err_probe(dev, ret, "Failed to lookup pinctrl wakeup state\n");
2444 	}
2445 
2446 	return 0;
2447 }
2448 
m_can_class_allocate_dev(struct device * dev,int sizeof_priv)2449 struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
2450 						int sizeof_priv)
2451 {
2452 	struct m_can_classdev *class_dev = NULL;
2453 	u32 mram_config_vals[MRAM_CFG_LEN];
2454 	struct net_device *net_dev;
2455 	u32 tx_fifo_size;
2456 	int ret;
2457 
2458 	ret = fwnode_property_read_u32_array(dev_fwnode(dev),
2459 					     "bosch,mram-cfg",
2460 					     mram_config_vals,
2461 					     sizeof(mram_config_vals) / 4);
2462 	if (ret) {
2463 		dev_err(dev, "Could not get Message RAM configuration.");
2464 		return ERR_PTR(ret);
2465 	}
2466 
2467 	if (dev->of_node && of_property_read_bool(dev->of_node, "wakeup-source"))
2468 		device_set_wakeup_capable(dev, true);
2469 
2470 	/* Get TX FIFO size
2471 	 * Defines the total amount of echo buffers for loopback
2472 	 */
2473 	tx_fifo_size = mram_config_vals[7];
2474 
2475 	/* allocate the m_can device */
2476 	net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
2477 	if (!net_dev) {
2478 		dev_err(dev, "Failed to allocate CAN device");
2479 		return ERR_PTR(-ENOMEM);
2480 	}
2481 
2482 	class_dev = netdev_priv(net_dev);
2483 	class_dev->net = net_dev;
2484 	class_dev->dev = dev;
2485 	SET_NETDEV_DEV(net_dev, dev);
2486 
2487 	m_can_of_parse_mram(class_dev, mram_config_vals);
2488 	spin_lock_init(&class_dev->tx_handling_spinlock);
2489 
2490 	ret = m_can_class_parse_pinctrl(class_dev);
2491 	if (ret)
2492 		goto err_free_candev;
2493 
2494 	return class_dev;
2495 
2496 err_free_candev:
2497 	free_candev(net_dev);
2498 	return ERR_PTR(ret);
2499 }
2500 EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
2501 
m_can_class_free_dev(struct net_device * net)2502 void m_can_class_free_dev(struct net_device *net)
2503 {
2504 	free_candev(net);
2505 }
2506 EXPORT_SYMBOL_GPL(m_can_class_free_dev);
2507 
m_can_class_register(struct m_can_classdev * cdev)2508 int m_can_class_register(struct m_can_classdev *cdev)
2509 {
2510 	int ret;
2511 
2512 	cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
2513 					cdev->mcfg[MRAM_TXE].num));
2514 	if (cdev->is_peripheral) {
2515 		cdev->tx_ops =
2516 			devm_kzalloc(cdev->dev,
2517 				     cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
2518 				     GFP_KERNEL);
2519 		if (!cdev->tx_ops)
2520 			return -ENOMEM;
2521 	}
2522 
2523 	cdev->rst = devm_reset_control_get_optional_shared(cdev->dev, NULL);
2524 	if (IS_ERR(cdev->rst))
2525 		return dev_err_probe(cdev->dev, PTR_ERR(cdev->rst),
2526 				     "Failed to get reset line\n");
2527 
2528 	ret = m_can_clk_start(cdev);
2529 	if (ret)
2530 		return ret;
2531 
2532 	ret = reset_control_deassert(cdev->rst);
2533 	if (ret)
2534 		goto clk_disable;
2535 
2536 	if (cdev->is_peripheral) {
2537 		ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
2538 						NAPI_POLL_WEIGHT);
2539 		if (ret)
2540 			goto out_reset_control_assert;
2541 	}
2542 
2543 	if (!cdev->net->irq) {
2544 		netdev_dbg(cdev->net, "Polling enabled, initialize hrtimer");
2545 		hrtimer_setup(&cdev->hrtimer, m_can_polling_timer, CLOCK_MONOTONIC,
2546 			      HRTIMER_MODE_REL_PINNED);
2547 	} else {
2548 		hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
2549 			      HRTIMER_MODE_REL);
2550 	}
2551 
2552 	ret = m_can_dev_setup(cdev);
2553 	if (ret)
2554 		goto rx_offload_del;
2555 
2556 	ret = register_m_can_dev(cdev);
2557 	if (ret) {
2558 		netdev_err(cdev->net, "registering %s failed (err=%d)\n",
2559 			   cdev->net->name, ret);
2560 		goto rx_offload_del;
2561 	}
2562 
2563 	of_can_transceiver(cdev->net);
2564 
2565 	netdev_info(cdev->net, "device registered (irq=%d, version=%d)\n",
2566 		    cdev->net->irq, cdev->version);
2567 
2568 	/* Probe finished
2569 	 * Assert reset and stop clocks.
2570 	 * They will be reactivated once the M_CAN device is opened
2571 	 */
2572 	reset_control_assert(cdev->rst);
2573 	m_can_clk_stop(cdev);
2574 
2575 	return 0;
2576 
2577 rx_offload_del:
2578 	if (cdev->is_peripheral)
2579 		can_rx_offload_del(&cdev->offload);
2580 out_reset_control_assert:
2581 	reset_control_assert(cdev->rst);
2582 clk_disable:
2583 	m_can_clk_stop(cdev);
2584 
2585 	return ret;
2586 }
2587 EXPORT_SYMBOL_GPL(m_can_class_register);
2588 
m_can_class_unregister(struct m_can_classdev * cdev)2589 void m_can_class_unregister(struct m_can_classdev *cdev)
2590 {
2591 	unregister_candev(cdev->net);
2592 	if (cdev->is_peripheral)
2593 		can_rx_offload_del(&cdev->offload);
2594 }
2595 EXPORT_SYMBOL_GPL(m_can_class_unregister);
2596 
m_can_class_suspend(struct device * dev)2597 int m_can_class_suspend(struct device *dev)
2598 {
2599 	struct m_can_classdev *cdev = dev_get_drvdata(dev);
2600 	struct net_device *ndev = cdev->net;
2601 	int ret = 0;
2602 
2603 	if (netif_running(ndev)) {
2604 		netif_stop_queue(ndev);
2605 		netif_device_detach(ndev);
2606 
2607 		/* leave the chip running with rx interrupt enabled if it is
2608 		 * used as a wake-up source. Coalescing needs to be reset then,
2609 		 * the timer is cancelled here, interrupts are done in resume.
2610 		 */
2611 		if (cdev->pm_wake_source) {
2612 			hrtimer_cancel(&cdev->hrtimer);
2613 			m_can_write(cdev, M_CAN_IE, IR_RF0N);
2614 
2615 			if (cdev->ops->deinit)
2616 				ret = cdev->ops->deinit(cdev);
2617 		} else {
2618 			m_can_stop(ndev);
2619 		}
2620 
2621 		m_can_clk_stop(cdev);
2622 		cdev->can.state = CAN_STATE_SLEEPING;
2623 	}
2624 
2625 	if (!m_can_class_wakeup_pinctrl_enabled(cdev))
2626 		pinctrl_pm_select_sleep_state(dev);
2627 
2628 	return ret;
2629 }
2630 EXPORT_SYMBOL_GPL(m_can_class_suspend);
2631 
m_can_class_resume(struct device * dev)2632 int m_can_class_resume(struct device *dev)
2633 {
2634 	struct m_can_classdev *cdev = dev_get_drvdata(dev);
2635 	struct net_device *ndev = cdev->net;
2636 	int ret = 0;
2637 
2638 	if (!m_can_class_wakeup_pinctrl_enabled(cdev))
2639 		pinctrl_pm_select_default_state(dev);
2640 
2641 	if (netif_running(ndev)) {
2642 		ret = m_can_clk_start(cdev);
2643 		if (ret)
2644 			return ret;
2645 
2646 		if (cdev->pm_wake_source) {
2647 			/* Restore active interrupts but disable coalescing as
2648 			 * we may have missed important waterlevel interrupts
2649 			 * between suspend and resume. Timers are already
2650 			 * stopped in suspend. Here we enable all interrupts
2651 			 * again.
2652 			 */
2653 			cdev->active_interrupts |= IR_RF0N | IR_TEFN;
2654 
2655 			if (cdev->ops->init)
2656 				ret = cdev->ops->init(cdev);
2657 
2658 			cdev->can.state = m_can_state_get_by_psr(cdev);
2659 
2660 			m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
2661 		} else {
2662 			ret  = m_can_start(ndev);
2663 			if (ret) {
2664 				m_can_clk_stop(cdev);
2665 				return ret;
2666 			}
2667 		}
2668 
2669 		netif_device_attach(ndev);
2670 		netif_start_queue(ndev);
2671 	}
2672 
2673 	return ret;
2674 }
2675 EXPORT_SYMBOL_GPL(m_can_class_resume);
2676 
2677 MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
2678 MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
2679 MODULE_LICENSE("GPL v2");
2680 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
2681