xref: /linux/drivers/net/can/kvaser_pciefd.c (revision c7170e7672e52cf38f5979416d20b9133a10726e)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3  * Parts of this driver are based on the following:
4  *  - Kvaser linux pciefd driver (version 5.42)
5  *  - PEAK linux canfd driver
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/can/dev.h>
10 #include <linux/device.h>
11 #include <linux/ethtool.h>
12 #include <linux/iopoll.h>
13 #include <linux/kernel.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/pci.h>
18 #include <linux/timer.h>
19 
20 MODULE_LICENSE("Dual BSD/GPL");
21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
23 
24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
25 
26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
28 #define KVASER_PCIEFD_MAX_ERR_REP 256U
29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
31 #define KVASER_PCIEFD_DMA_COUNT 2U
32 
33 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
34 
35 #define KVASER_PCIEFD_VENDOR 0x1a07
36 /* Altera based devices */
37 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
38 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
39 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f
40 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010
41 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011
42 
43 /* SmartFusion2 based devices */
44 #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012
45 #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013
46 #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014
47 #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
48 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
49 
50 /* Xilinx based devices */
51 #define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
52 
53 /* Altera SerDes Enable 64-bit DMA address translation */
54 #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
55 
56 /* SmartFusion2 SerDes LSB address translation mask */
57 #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
58 
59 /* Xilinx SerDes LSB address translation mask */
60 #define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
61 
62 /* Kvaser KCAN CAN controller registers */
63 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
64 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
65 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
66 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
67 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
68 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
69 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414
70 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
71 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
72 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
73 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
74 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
75 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
76 /* System identification and information registers */
77 #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8
78 #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc
79 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10
80 #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14
81 /* Shared receive buffer FIFO registers */
82 #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4
83 /* Shared receive buffer registers */
84 #define KVASER_PCIEFD_SRB_CMD_REG 0x0
85 #define KVASER_PCIEFD_SRB_IEN_REG 0x04
86 #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c
87 #define KVASER_PCIEFD_SRB_STAT_REG 0x10
88 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14
89 #define KVASER_PCIEFD_SRB_CTRL_REG 0x18
90 
91 /* System build information fields */
92 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24)
93 #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16)
94 #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0)
95 #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1)
96 
97 /* Reset DMA buffer 0, 1 and FIFO offset */
98 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
99 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
100 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
101 
102 /* DMA underflow, buffer 0 and 1 */
103 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
104 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
105 /* DMA overflow, buffer 0 and 1 */
106 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
107 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
108 /* DMA packet done, buffer 0 and 1 */
109 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
110 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
111 
112 /* Got DMA support */
113 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
114 /* DMA idle */
115 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
116 
117 /* SRB current packet level */
118 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0)
119 
120 /* DMA Enable */
121 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
122 
123 /* KCAN CTRL packet types */
124 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29)
125 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4
126 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5
127 
128 /* Command sequence number */
129 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16)
130 /* Command bits */
131 #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0)
132 /* Abort, flush and reset */
133 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
134 /* Request status packet */
135 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
136 
137 /* Transmitter unaligned */
138 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
139 /* Tx FIFO empty */
140 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
141 /* Tx FIFO overflow */
142 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
143 /* Tx buffer flush done */
144 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
145 /* Abort done */
146 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
147 /* Rx FIFO overflow */
148 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
149 /* FDF bit when controller is in classic CAN mode */
150 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
151 /* Bus parameter protection error */
152 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
153 /* Tx FIFO unaligned end */
154 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
155 /* Tx FIFO unaligned read */
156 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
157 
158 /* Tx FIFO size */
159 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16)
160 /* Tx FIFO current packet level */
161 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0)
162 
163 /* Current status packet sequence number */
164 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24)
165 /* Controller got CAN FD capability */
166 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
167 /* Controller got one-shot capability */
168 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
169 /* Controller in reset mode */
170 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
171 /* Reset mode request */
172 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
173 /* Bus off */
174 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
175 /* Idle state. Controller in reset mode and no abort or flush pending */
176 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
177 /* Abort request */
178 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
179 /* Controller is bus off */
180 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \
181 	(KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \
182 	 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM)
183 
184 /* Classic CAN mode */
185 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
186 /* Active error flag enable. Clear to force error passive */
187 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
188 /* Acknowledgment packet type */
189 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
190 /* CAN FD non-ISO */
191 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
192 /* Error packet enable */
193 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
194 /* Listen only mode */
195 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
196 /* Reset mode */
197 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
198 
199 /* BTRN and BTRD fields */
200 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26)
201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17)
202 #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13)
203 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0)
204 
205 /* PWM Control fields */
206 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16)
207 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0)
208 
209 /* KCAN packet type IDs */
210 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0
211 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1
212 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2
213 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3
214 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4
215 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5
216 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6
217 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8
218 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9
219 
220 /* Common KCAN packet definitions, second word */
221 #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28)
222 #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25)
223 #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0)
224 
225 /* KCAN Transmit/Receive data packet, first word */
226 #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
227 #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
228 #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0)
229 /* KCAN Transmit data packet, second word */
230 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
231 #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
232 /* KCAN Transmit/Receive data packet, second word */
233 #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
234 #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
235 #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
236 #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8)
237 
238 /* KCAN Transmit acknowledge packet, first word */
239 #define KVASER_PCIEFD_APACKET_NACK BIT(11)
240 #define KVASER_PCIEFD_APACKET_ABL BIT(10)
241 #define KVASER_PCIEFD_APACKET_CT BIT(9)
242 #define KVASER_PCIEFD_APACKET_FLU BIT(8)
243 
244 /* KCAN Status packet, first word */
245 #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
246 #define KVASER_PCIEFD_SPACK_IRM BIT(21)
247 #define KVASER_PCIEFD_SPACK_IDET BIT(20)
248 #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
249 #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8)
250 #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0)
251 /* KCAN Status packet, second word */
252 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
253 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
254 #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
255 
256 /* KCAN Error detected packet, second word */
257 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
258 
259 /* Macros for calculating addresses of registers */
260 #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \
261 	((pcie)->reg_base + (pcie)->driver_data->address_offset->block)
262 #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \
263 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien))
264 #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \
265 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq))
266 #define KVASER_PCIEFD_SERDES_ADDR(pcie) \
267 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes))
268 #define KVASER_PCIEFD_SYSID_ADDR(pcie) \
269 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid))
270 #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \
271 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback))
272 #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \
273 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo))
274 #define KVASER_PCIEFD_SRB_ADDR(pcie) \
275 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb))
276 #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \
277 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0))
278 #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \
279 	(KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1))
280 #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \
281 	(KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)))
282 #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \
283 	(KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie)))
284 
285 struct kvaser_pciefd;
286 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
287 					       dma_addr_t addr, int index);
288 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
289 					    dma_addr_t addr, int index);
290 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
291 					       dma_addr_t addr, int index);
292 
293 struct kvaser_pciefd_address_offset {
294 	u32 serdes;
295 	u32 pci_ien;
296 	u32 pci_irq;
297 	u32 sysid;
298 	u32 loopback;
299 	u32 kcan_srb_fifo;
300 	u32 kcan_srb;
301 	u32 kcan_ch0;
302 	u32 kcan_ch1;
303 };
304 
305 struct kvaser_pciefd_dev_ops {
306 	void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie,
307 					    dma_addr_t addr, int index);
308 };
309 
310 struct kvaser_pciefd_irq_mask {
311 	u32 kcan_rx0;
312 	u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS];
313 	u32 all;
314 };
315 
316 struct kvaser_pciefd_driver_data {
317 	const struct kvaser_pciefd_address_offset *address_offset;
318 	const struct kvaser_pciefd_irq_mask *irq_mask;
319 	const struct kvaser_pciefd_dev_ops *ops;
320 };
321 
322 static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = {
323 	.serdes = 0x1000,
324 	.pci_ien = 0x50,
325 	.pci_irq = 0x40,
326 	.sysid = 0x1f020,
327 	.loopback = 0x1f000,
328 	.kcan_srb_fifo = 0x1f200,
329 	.kcan_srb = 0x1f400,
330 	.kcan_ch0 = 0x10000,
331 	.kcan_ch1 = 0x11000,
332 };
333 
334 static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = {
335 	.serdes = 0x280c8,
336 	.pci_ien = 0x102004,
337 	.pci_irq = 0x102008,
338 	.sysid = 0x100000,
339 	.loopback = 0x103000,
340 	.kcan_srb_fifo = 0x120000,
341 	.kcan_srb = 0x121000,
342 	.kcan_ch0 = 0x140000,
343 	.kcan_ch1 = 0x142000,
344 };
345 
346 static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
347 	.serdes = 0x00208,
348 	.pci_ien = 0x102004,
349 	.pci_irq = 0x102008,
350 	.sysid = 0x100000,
351 	.loopback = 0x103000,
352 	.kcan_srb_fifo = 0x120000,
353 	.kcan_srb = 0x121000,
354 	.kcan_ch0 = 0x140000,
355 	.kcan_ch1 = 0x142000,
356 };
357 
358 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
359 	.kcan_rx0 = BIT(4),
360 	.kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
361 	.all = GENMASK(4, 0),
362 };
363 
364 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
365 	.kcan_rx0 = BIT(4),
366 	.kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
367 	.all = GENMASK(19, 16) | BIT(4),
368 };
369 
370 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
371 	.kcan_rx0 = BIT(4),
372 	.kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
373 	.all = GENMASK(19, 16) | BIT(4),
374 };
375 
376 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
377 	.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
378 };
379 
380 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
381 	.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
382 };
383 
384 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
385 	.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
386 };
387 
388 static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
389 	.address_offset = &kvaser_pciefd_altera_address_offset,
390 	.irq_mask = &kvaser_pciefd_altera_irq_mask,
391 	.ops = &kvaser_pciefd_altera_dev_ops,
392 };
393 
394 static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
395 	.address_offset = &kvaser_pciefd_sf2_address_offset,
396 	.irq_mask = &kvaser_pciefd_sf2_irq_mask,
397 	.ops = &kvaser_pciefd_sf2_dev_ops,
398 };
399 
400 static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
401 	.address_offset = &kvaser_pciefd_xilinx_address_offset,
402 	.irq_mask = &kvaser_pciefd_xilinx_irq_mask,
403 	.ops = &kvaser_pciefd_xilinx_dev_ops,
404 };
405 
406 struct kvaser_pciefd_can {
407 	struct can_priv can;
408 	struct kvaser_pciefd *kv_pcie;
409 	void __iomem *reg_base;
410 	struct can_berr_counter bec;
411 	u8 cmd_seq;
412 	int err_rep_cnt;
413 	int echo_idx;
414 	spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
415 	spinlock_t echo_lock; /* Locks the message echo buffer */
416 	struct timer_list bec_poll_timer;
417 	struct completion start_comp, flush_comp;
418 };
419 
420 struct kvaser_pciefd {
421 	struct pci_dev *pci;
422 	void __iomem *reg_base;
423 	struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
424 	const struct kvaser_pciefd_driver_data *driver_data;
425 	void *dma_data[KVASER_PCIEFD_DMA_COUNT];
426 	u8 nr_channels;
427 	u32 bus_freq;
428 	u32 freq;
429 	u32 freq_to_ticks_div;
430 };
431 
432 struct kvaser_pciefd_rx_packet {
433 	u32 header[2];
434 	u64 timestamp;
435 };
436 
437 struct kvaser_pciefd_tx_packet {
438 	u32 header[2];
439 	u8 data[64];
440 };
441 
442 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
443 	.name = KVASER_PCIEFD_DRV_NAME,
444 	.tseg1_min = 1,
445 	.tseg1_max = 512,
446 	.tseg2_min = 1,
447 	.tseg2_max = 32,
448 	.sjw_max = 16,
449 	.brp_min = 1,
450 	.brp_max = 8192,
451 	.brp_inc = 1,
452 };
453 
454 static struct pci_device_id kvaser_pciefd_id_table[] = {
455 	{
456 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID),
457 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
458 	},
459 	{
460 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID),
461 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
462 	},
463 	{
464 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID),
465 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
466 	},
467 	{
468 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID),
469 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
470 	},
471 	{
472 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID),
473 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data,
474 	},
475 	{
476 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID),
477 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
478 	},
479 	{
480 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID),
481 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
482 	},
483 	{
484 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID),
485 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
486 	},
487 	{
488 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID),
489 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
490 	},
491 	{
492 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID),
493 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
494 	},
495 	{
496 		PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
497 		.driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
498 	},
499 	{
500 		0,
501 	},
502 };
503 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
504 
505 static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd)
506 {
507 	iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) |
508 		  FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq),
509 		  can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
510 }
511 
512 static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
513 {
514 	kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ);
515 }
516 
517 static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can)
518 {
519 	kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT);
520 }
521 
522 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
523 {
524 	u32 mode;
525 	unsigned long irq;
526 
527 	spin_lock_irqsave(&can->lock, irq);
528 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
529 	if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
530 		mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
531 		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
532 	}
533 	spin_unlock_irqrestore(&can->lock, irq);
534 }
535 
536 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
537 {
538 	u32 mode;
539 	unsigned long irq;
540 
541 	spin_lock_irqsave(&can->lock, irq);
542 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
543 	mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
544 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
545 	spin_unlock_irqrestore(&can->lock, irq);
546 }
547 
548 static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
549 {
550 	u32 msk;
551 
552 	msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
553 	      KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
554 	      KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
555 	      KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
556 	      KVASER_PCIEFD_KCAN_IRQ_TAR;
557 
558 	iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
559 }
560 
561 static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie,
562 						   struct sk_buff *skb, u64 timestamp)
563 {
564 	skb_hwtstamps(skb)->hwtstamp =
565 		ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div));
566 }
567 
568 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
569 {
570 	u32 mode;
571 	unsigned long irq;
572 
573 	spin_lock_irqsave(&can->lock, irq);
574 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
575 	if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
576 		mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
577 		if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
578 			mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
579 		else
580 			mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
581 	} else {
582 		mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
583 		mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
584 	}
585 
586 	if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
587 		mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
588 	else
589 		mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
590 	mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
591 	mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
592 	/* Use ACK packet type */
593 	mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
594 	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
595 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
596 
597 	spin_unlock_irqrestore(&can->lock, irq);
598 }
599 
600 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
601 {
602 	u32 status;
603 	unsigned long irq;
604 
605 	spin_lock_irqsave(&can->lock, irq);
606 	iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
607 	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
608 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
609 	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
610 	if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
611 		/* If controller is already idle, run abort, flush and reset */
612 		kvaser_pciefd_abort_flush_reset(can);
613 	} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
614 		u32 mode;
615 
616 		/* Put controller in reset mode */
617 		mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
618 		mode |= KVASER_PCIEFD_KCAN_MODE_RM;
619 		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
620 	}
621 	spin_unlock_irqrestore(&can->lock, irq);
622 }
623 
624 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
625 {
626 	u32 mode;
627 	unsigned long irq;
628 
629 	del_timer(&can->bec_poll_timer);
630 	if (!completion_done(&can->flush_comp))
631 		kvaser_pciefd_start_controller_flush(can);
632 
633 	if (!wait_for_completion_timeout(&can->flush_comp,
634 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
635 		netdev_err(can->can.dev, "Timeout during bus on flush\n");
636 		return -ETIMEDOUT;
637 	}
638 
639 	spin_lock_irqsave(&can->lock, irq);
640 	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
641 	iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
642 	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
643 		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
644 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
645 	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
646 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
647 	spin_unlock_irqrestore(&can->lock, irq);
648 
649 	if (!wait_for_completion_timeout(&can->start_comp,
650 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
651 		netdev_err(can->can.dev, "Timeout during bus on reset\n");
652 		return -ETIMEDOUT;
653 	}
654 	/* Reset interrupt handling */
655 	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
656 	iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
657 
658 	kvaser_pciefd_set_tx_irq(can);
659 	kvaser_pciefd_setup_controller(can);
660 	can->can.state = CAN_STATE_ERROR_ACTIVE;
661 	netif_wake_queue(can->can.dev);
662 	can->bec.txerr = 0;
663 	can->bec.rxerr = 0;
664 	can->err_rep_cnt = 0;
665 
666 	return 0;
667 }
668 
669 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
670 {
671 	u8 top;
672 	u32 pwm_ctrl;
673 	unsigned long irq;
674 
675 	spin_lock_irqsave(&can->lock, irq);
676 	pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
677 	top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl);
678 	/* Set duty cycle to zero */
679 	pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
680 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
681 	spin_unlock_irqrestore(&can->lock, irq);
682 }
683 
684 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
685 {
686 	int top, trigger;
687 	u32 pwm_ctrl;
688 	unsigned long irq;
689 
690 	kvaser_pciefd_pwm_stop(can);
691 	spin_lock_irqsave(&can->lock, irq);
692 	/* Set frequency to 500 KHz */
693 	top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
694 
695 	pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top);
696 	pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
697 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
698 
699 	/* Set duty cycle to 95 */
700 	trigger = (100 * top - 95 * (top + 1) + 50) / 100;
701 	pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger);
702 	pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top);
703 	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
704 	spin_unlock_irqrestore(&can->lock, irq);
705 }
706 
707 static int kvaser_pciefd_open(struct net_device *netdev)
708 {
709 	int err;
710 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
711 
712 	err = open_candev(netdev);
713 	if (err)
714 		return err;
715 
716 	err = kvaser_pciefd_bus_on(can);
717 	if (err) {
718 		close_candev(netdev);
719 		return err;
720 	}
721 
722 	return 0;
723 }
724 
725 static int kvaser_pciefd_stop(struct net_device *netdev)
726 {
727 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
728 	int ret = 0;
729 
730 	/* Don't interrupt ongoing flush */
731 	if (!completion_done(&can->flush_comp))
732 		kvaser_pciefd_start_controller_flush(can);
733 
734 	if (!wait_for_completion_timeout(&can->flush_comp,
735 					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
736 		netdev_err(can->can.dev, "Timeout during stop\n");
737 		ret = -ETIMEDOUT;
738 	} else {
739 		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
740 		del_timer(&can->bec_poll_timer);
741 	}
742 	can->can.state = CAN_STATE_STOPPED;
743 	close_candev(netdev);
744 
745 	return ret;
746 }
747 
748 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
749 					   struct kvaser_pciefd_can *can,
750 					   struct sk_buff *skb)
751 {
752 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
753 	int packet_size;
754 	int seq = can->echo_idx;
755 
756 	memset(p, 0, sizeof(*p));
757 	if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
758 		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
759 
760 	if (cf->can_id & CAN_RTR_FLAG)
761 		p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
762 
763 	if (cf->can_id & CAN_EFF_FLAG)
764 		p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
765 
766 	p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id);
767 	p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
768 
769 	if (can_is_canfd_skb(skb)) {
770 		p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
771 					   can_fd_len2dlc(cf->len));
772 		p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
773 		if (cf->flags & CANFD_BRS)
774 			p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
775 		if (cf->flags & CANFD_ESI)
776 			p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
777 	} else {
778 		p->header[1] |=
779 			FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
780 				   can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
781 	}
782 
783 	p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
784 
785 	packet_size = cf->len;
786 	memcpy(p->data, cf->data, packet_size);
787 
788 	return DIV_ROUND_UP(packet_size, 4);
789 }
790 
791 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
792 					    struct net_device *netdev)
793 {
794 	struct kvaser_pciefd_can *can = netdev_priv(netdev);
795 	unsigned long irq_flags;
796 	struct kvaser_pciefd_tx_packet packet;
797 	int nr_words;
798 	u8 count;
799 
800 	if (can_dev_dropped_skb(netdev, skb))
801 		return NETDEV_TX_OK;
802 
803 	nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
804 
805 	spin_lock_irqsave(&can->echo_lock, irq_flags);
806 	/* Prepare and save echo skb in internal slot */
807 	can_put_echo_skb(skb, netdev, can->echo_idx, 0);
808 
809 	/* Move echo index to the next slot */
810 	can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
811 
812 	/* Write header to fifo */
813 	iowrite32(packet.header[0],
814 		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
815 	iowrite32(packet.header[1],
816 		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
817 
818 	if (nr_words) {
819 		u32 data_last = ((u32 *)packet.data)[nr_words - 1];
820 
821 		/* Write data to fifo, except last word */
822 		iowrite32_rep(can->reg_base +
823 			      KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
824 			      nr_words - 1);
825 		/* Write last word to end of fifo */
826 		__raw_writel(data_last, can->reg_base +
827 			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
828 	} else {
829 		/* Complete write to fifo */
830 		__raw_writel(0, can->reg_base +
831 			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
832 	}
833 
834 	count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
835 			  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
836 	/* No room for a new message, stop the queue until at least one
837 	 * successful transmit
838 	 */
839 	if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
840 		netif_stop_queue(netdev);
841 	spin_unlock_irqrestore(&can->echo_lock, irq_flags);
842 
843 	return NETDEV_TX_OK;
844 }
845 
846 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
847 {
848 	u32 mode, test, btrn;
849 	unsigned long irq_flags;
850 	int ret;
851 	struct can_bittiming *bt;
852 
853 	if (data)
854 		bt = &can->can.data_bittiming;
855 	else
856 		bt = &can->can.bittiming;
857 
858 	btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) |
859 	       FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) |
860 	       FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) |
861 	       FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1);
862 
863 	spin_lock_irqsave(&can->lock, irq_flags);
864 	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
865 	/* Put the circuit in reset mode */
866 	iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
867 		  can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
868 
869 	/* Can only set bittiming if in reset mode */
870 	ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
871 				 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10);
872 	if (ret) {
873 		spin_unlock_irqrestore(&can->lock, irq_flags);
874 		return -EBUSY;
875 	}
876 
877 	if (data)
878 		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
879 	else
880 		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
881 	/* Restore previous reset mode status */
882 	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
883 	spin_unlock_irqrestore(&can->lock, irq_flags);
884 
885 	return 0;
886 }
887 
888 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
889 {
890 	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
891 }
892 
893 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
894 {
895 	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
896 }
897 
898 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
899 {
900 	struct kvaser_pciefd_can *can = netdev_priv(ndev);
901 	int ret = 0;
902 
903 	switch (mode) {
904 	case CAN_MODE_START:
905 		if (!can->can.restart_ms)
906 			ret = kvaser_pciefd_bus_on(can);
907 		break;
908 	default:
909 		return -EOPNOTSUPP;
910 	}
911 
912 	return ret;
913 }
914 
915 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
916 					  struct can_berr_counter *bec)
917 {
918 	struct kvaser_pciefd_can *can = netdev_priv(ndev);
919 
920 	bec->rxerr = can->bec.rxerr;
921 	bec->txerr = can->bec.txerr;
922 
923 	return 0;
924 }
925 
926 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
927 {
928 	struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
929 
930 	kvaser_pciefd_enable_err_gen(can);
931 	kvaser_pciefd_request_status(can);
932 	can->err_rep_cnt = 0;
933 }
934 
935 static const struct net_device_ops kvaser_pciefd_netdev_ops = {
936 	.ndo_open = kvaser_pciefd_open,
937 	.ndo_stop = kvaser_pciefd_stop,
938 	.ndo_eth_ioctl = can_eth_ioctl_hwts,
939 	.ndo_start_xmit = kvaser_pciefd_start_xmit,
940 	.ndo_change_mtu = can_change_mtu,
941 };
942 
943 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
944 	.get_ts_info = can_ethtool_op_get_ts_info_hwts,
945 };
946 
947 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
948 {
949 	int i;
950 
951 	for (i = 0; i < pcie->nr_channels; i++) {
952 		struct net_device *netdev;
953 		struct kvaser_pciefd_can *can;
954 		u32 status, tx_nr_packets_max;
955 
956 		netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
957 				      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
958 		if (!netdev)
959 			return -ENOMEM;
960 
961 		can = netdev_priv(netdev);
962 		netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
963 		netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
964 		can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i);
965 		can->kv_pcie = pcie;
966 		can->cmd_seq = 0;
967 		can->err_rep_cnt = 0;
968 		can->bec.txerr = 0;
969 		can->bec.rxerr = 0;
970 
971 		init_completion(&can->start_comp);
972 		init_completion(&can->flush_comp);
973 		timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0);
974 
975 		/* Disable Bus load reporting */
976 		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
977 
978 		tx_nr_packets_max =
979 			FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
980 				  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
981 
982 		can->can.clock.freq = pcie->freq;
983 		can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
984 		can->echo_idx = 0;
985 		spin_lock_init(&can->echo_lock);
986 		spin_lock_init(&can->lock);
987 
988 		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
989 		can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
990 		can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
991 		can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
992 		can->can.do_set_mode = kvaser_pciefd_set_mode;
993 		can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
994 		can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
995 					      CAN_CTRLMODE_FD |
996 					      CAN_CTRLMODE_FD_NON_ISO |
997 					      CAN_CTRLMODE_CC_LEN8_DLC;
998 
999 		status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1000 		if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
1001 			dev_err(&pcie->pci->dev,
1002 				"CAN FD not supported as expected %d\n", i);
1003 
1004 			free_candev(netdev);
1005 			return -ENODEV;
1006 		}
1007 
1008 		if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
1009 			can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
1010 
1011 		netdev->flags |= IFF_ECHO;
1012 		SET_NETDEV_DEV(netdev, &pcie->pci->dev);
1013 
1014 		iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1015 		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1016 			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1017 
1018 		pcie->can[i] = can;
1019 		kvaser_pciefd_pwm_start(can);
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1026 {
1027 	int i;
1028 
1029 	for (i = 0; i < pcie->nr_channels; i++) {
1030 		int err = register_candev(pcie->can[i]->can.dev);
1031 
1032 		if (err) {
1033 			int j;
1034 
1035 			/* Unregister all successfully registered devices. */
1036 			for (j = 0; j < i; j++)
1037 				unregister_candev(pcie->can[j]->can.dev);
1038 			return err;
1039 		}
1040 	}
1041 
1042 	return 0;
1043 }
1044 
1045 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
1046 					       dma_addr_t addr, int index)
1047 {
1048 	void __iomem *serdes_base;
1049 	u32 word1, word2;
1050 
1051 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1052 	word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT;
1053 	word2 = addr >> 32;
1054 #else
1055 	word1 = addr;
1056 	word2 = 0;
1057 #endif
1058 	serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
1059 	iowrite32(word1, serdes_base);
1060 	iowrite32(word2, serdes_base + 0x4);
1061 }
1062 
1063 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
1064 					    dma_addr_t addr, int index)
1065 {
1066 	void __iomem *serdes_base;
1067 	u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK;
1068 	u32 msb = 0x0;
1069 
1070 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1071 	msb = addr >> 32;
1072 #endif
1073 	serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index;
1074 	iowrite32(lsb, serdes_base);
1075 	iowrite32(msb, serdes_base + 0x4);
1076 }
1077 
1078 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
1079 					       dma_addr_t addr, int index)
1080 {
1081 	void __iomem *serdes_base;
1082 	u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
1083 	u32 msb = 0x0;
1084 
1085 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1086 	msb = addr >> 32;
1087 #endif
1088 	serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
1089 	iowrite32(msb, serdes_base);
1090 	iowrite32(lsb, serdes_base + 0x4);
1091 }
1092 
1093 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1094 {
1095 	int i;
1096 	u32 srb_status;
1097 	u32 srb_packet_count;
1098 	dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1099 
1100 	/* Disable the DMA */
1101 	iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1102 	for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1103 		pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
1104 							KVASER_PCIEFD_DMA_SIZE,
1105 							&dma_addr[i],
1106 							GFP_KERNEL);
1107 
1108 		if (!pcie->dma_data[i] || !dma_addr[i]) {
1109 			dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1110 				KVASER_PCIEFD_DMA_SIZE);
1111 			return -ENOMEM;
1112 		}
1113 		pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i);
1114 	}
1115 
1116 	/* Reset Rx FIFO, and both DMA buffers */
1117 	iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1118 		  KVASER_PCIEFD_SRB_CMD_RDB1,
1119 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1120 	/* Empty Rx FIFO */
1121 	srb_packet_count =
1122 		FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK,
1123 			  ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) +
1124 				   KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG));
1125 	while (srb_packet_count) {
1126 		/* Drop current packet in FIFO */
1127 		ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
1128 		srb_packet_count--;
1129 	}
1130 
1131 	srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
1132 	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1133 		dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1134 		return -EIO;
1135 	}
1136 
1137 	/* Enable the DMA */
1138 	iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1139 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1140 
1141 	return 0;
1142 }
1143 
1144 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1145 {
1146 	u32 version, srb_status, build;
1147 
1148 	version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG);
1149 	pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS,
1150 				FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version));
1151 
1152 	build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG);
1153 	dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n",
1154 		FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version),
1155 		FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version),
1156 		FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build));
1157 
1158 	srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG);
1159 	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1160 		dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n");
1161 		return -ENODEV;
1162 	}
1163 
1164 	pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1165 	pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1166 	pcie->freq_to_ticks_div = pcie->freq / 1000000;
1167 	if (pcie->freq_to_ticks_div == 0)
1168 		pcie->freq_to_ticks_div = 1;
1169 	/* Turn off all loopback functionality */
1170 	iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie));
1171 
1172 	return 0;
1173 }
1174 
1175 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1176 					    struct kvaser_pciefd_rx_packet *p,
1177 					    __le32 *data)
1178 {
1179 	struct sk_buff *skb;
1180 	struct canfd_frame *cf;
1181 	struct can_priv *priv;
1182 	u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1183 	u8 dlc;
1184 
1185 	if (ch_id >= pcie->nr_channels)
1186 		return -EIO;
1187 
1188 	priv = &pcie->can[ch_id]->can;
1189 	dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]);
1190 
1191 	if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1192 		skb = alloc_canfd_skb(priv->dev, &cf);
1193 		if (!skb) {
1194 			priv->dev->stats.rx_dropped++;
1195 			return -ENOMEM;
1196 		}
1197 
1198 		cf->len = can_fd_dlc2len(dlc);
1199 		if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1200 			cf->flags |= CANFD_BRS;
1201 		if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1202 			cf->flags |= CANFD_ESI;
1203 	} else {
1204 		skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1205 		if (!skb) {
1206 			priv->dev->stats.rx_dropped++;
1207 			return -ENOMEM;
1208 		}
1209 		can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
1210 	}
1211 
1212 	cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]);
1213 	if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1214 		cf->can_id |= CAN_EFF_FLAG;
1215 
1216 	if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
1217 		cf->can_id |= CAN_RTR_FLAG;
1218 	} else {
1219 		memcpy(cf->data, data, cf->len);
1220 		priv->dev->stats.rx_bytes += cf->len;
1221 	}
1222 	priv->dev->stats.rx_packets++;
1223 	kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1224 
1225 	return netif_rx(skb);
1226 }
1227 
1228 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1229 				       struct can_frame *cf,
1230 				       enum can_state new_state,
1231 				       enum can_state tx_state,
1232 				       enum can_state rx_state)
1233 {
1234 	can_change_state(can->can.dev, cf, tx_state, rx_state);
1235 
1236 	if (new_state == CAN_STATE_BUS_OFF) {
1237 		struct net_device *ndev = can->can.dev;
1238 		unsigned long irq_flags;
1239 
1240 		spin_lock_irqsave(&can->lock, irq_flags);
1241 		netif_stop_queue(can->can.dev);
1242 		spin_unlock_irqrestore(&can->lock, irq_flags);
1243 		/* Prevent CAN controller from auto recover from bus off */
1244 		if (!can->can.restart_ms) {
1245 			kvaser_pciefd_start_controller_flush(can);
1246 			can_bus_off(ndev);
1247 		}
1248 	}
1249 }
1250 
1251 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1252 					  struct can_berr_counter *bec,
1253 					  enum can_state *new_state,
1254 					  enum can_state *tx_state,
1255 					  enum can_state *rx_state)
1256 {
1257 	if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1258 	    p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1259 		*new_state = CAN_STATE_BUS_OFF;
1260 	else if (bec->txerr >= 255 || bec->rxerr >= 255)
1261 		*new_state = CAN_STATE_BUS_OFF;
1262 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1263 		*new_state = CAN_STATE_ERROR_PASSIVE;
1264 	else if (bec->txerr >= 128 || bec->rxerr >= 128)
1265 		*new_state = CAN_STATE_ERROR_PASSIVE;
1266 	else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1267 		*new_state = CAN_STATE_ERROR_WARNING;
1268 	else if (bec->txerr >= 96 || bec->rxerr >= 96)
1269 		*new_state = CAN_STATE_ERROR_WARNING;
1270 	else
1271 		*new_state = CAN_STATE_ERROR_ACTIVE;
1272 
1273 	*tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1274 	*rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1275 }
1276 
1277 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1278 					struct kvaser_pciefd_rx_packet *p)
1279 {
1280 	struct can_berr_counter bec;
1281 	enum can_state old_state, new_state, tx_state, rx_state;
1282 	struct net_device *ndev = can->can.dev;
1283 	struct sk_buff *skb;
1284 	struct can_frame *cf = NULL;
1285 
1286 	old_state = can->can.state;
1287 
1288 	bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
1289 	bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
1290 
1291 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
1292 	skb = alloc_can_err_skb(ndev, &cf);
1293 	if (new_state != old_state) {
1294 		kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
1295 		if (old_state == CAN_STATE_BUS_OFF &&
1296 		    new_state == CAN_STATE_ERROR_ACTIVE &&
1297 		    can->can.restart_ms) {
1298 			can->can.can_stats.restarts++;
1299 			if (skb)
1300 				cf->can_id |= CAN_ERR_RESTARTED;
1301 		}
1302 	}
1303 
1304 	can->err_rep_cnt++;
1305 	can->can.can_stats.bus_error++;
1306 	if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
1307 		ndev->stats.tx_errors++;
1308 	else
1309 		ndev->stats.rx_errors++;
1310 
1311 	can->bec.txerr = bec.txerr;
1312 	can->bec.rxerr = bec.rxerr;
1313 
1314 	if (!skb) {
1315 		ndev->stats.rx_dropped++;
1316 		return -ENOMEM;
1317 	}
1318 
1319 	kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1320 	cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
1321 	cf->data[6] = bec.txerr;
1322 	cf->data[7] = bec.rxerr;
1323 
1324 	netif_rx(skb);
1325 
1326 	return 0;
1327 }
1328 
1329 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1330 					     struct kvaser_pciefd_rx_packet *p)
1331 {
1332 	struct kvaser_pciefd_can *can;
1333 	u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1334 
1335 	if (ch_id >= pcie->nr_channels)
1336 		return -EIO;
1337 
1338 	can = pcie->can[ch_id];
1339 	kvaser_pciefd_rx_error_frame(can, p);
1340 	if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1341 		/* Do not report more errors, until bec_poll_timer expires */
1342 		kvaser_pciefd_disable_err_gen(can);
1343 	/* Start polling the error counters */
1344 	mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1345 
1346 	return 0;
1347 }
1348 
1349 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1350 					    struct kvaser_pciefd_rx_packet *p)
1351 {
1352 	struct can_berr_counter bec;
1353 	enum can_state old_state, new_state, tx_state, rx_state;
1354 
1355 	old_state = can->can.state;
1356 
1357 	bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]);
1358 	bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
1359 
1360 	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
1361 	if (new_state != old_state) {
1362 		struct net_device *ndev = can->can.dev;
1363 		struct sk_buff *skb;
1364 		struct can_frame *cf;
1365 
1366 		skb = alloc_can_err_skb(ndev, &cf);
1367 		if (!skb) {
1368 			ndev->stats.rx_dropped++;
1369 			return -ENOMEM;
1370 		}
1371 
1372 		kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
1373 		if (old_state == CAN_STATE_BUS_OFF &&
1374 		    new_state == CAN_STATE_ERROR_ACTIVE &&
1375 		    can->can.restart_ms) {
1376 			can->can.can_stats.restarts++;
1377 			cf->can_id |= CAN_ERR_RESTARTED;
1378 		}
1379 
1380 		kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1381 
1382 		cf->data[6] = bec.txerr;
1383 		cf->data[7] = bec.rxerr;
1384 
1385 		netif_rx(skb);
1386 	}
1387 	can->bec.txerr = bec.txerr;
1388 	can->bec.rxerr = bec.rxerr;
1389 	/* Check if we need to poll the error counters */
1390 	if (bec.txerr || bec.rxerr)
1391 		mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1392 
1393 	return 0;
1394 }
1395 
1396 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1397 					      struct kvaser_pciefd_rx_packet *p)
1398 {
1399 	struct kvaser_pciefd_can *can;
1400 	u8 cmdseq;
1401 	u32 status;
1402 	u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1403 
1404 	if (ch_id >= pcie->nr_channels)
1405 		return -EIO;
1406 
1407 	can = pcie->can[ch_id];
1408 
1409 	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1410 	cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status);
1411 
1412 	/* Reset done, start abort and flush */
1413 	if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1414 	    p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1415 	    p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1416 	    cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
1417 	    status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1418 		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1419 			  can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1420 		kvaser_pciefd_abort_flush_reset(can);
1421 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1422 		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1423 		   cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) &&
1424 		   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1425 		/* Reset detected, send end of flush if no packet are in FIFO */
1426 		u8 count;
1427 
1428 		count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
1429 				  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
1430 		if (!count)
1431 			iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK,
1432 					     KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH),
1433 				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1434 	} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1435 		   cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) {
1436 		/* Response to status request received */
1437 		kvaser_pciefd_handle_status_resp(can, p);
1438 		if (can->can.state != CAN_STATE_BUS_OFF &&
1439 		    can->can.state != CAN_STATE_ERROR_ACTIVE) {
1440 			mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1441 		}
1442 	} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1443 		   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) {
1444 		/* Reset to bus on detected */
1445 		if (!completion_done(&can->start_comp))
1446 			complete(&can->start_comp);
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1453 					     struct kvaser_pciefd_rx_packet *p)
1454 {
1455 	struct sk_buff *skb;
1456 	struct can_frame *cf;
1457 
1458 	skb = alloc_can_err_skb(can->can.dev, &cf);
1459 	can->can.dev->stats.tx_errors++;
1460 	if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1461 		if (skb)
1462 			cf->can_id |= CAN_ERR_LOSTARB;
1463 		can->can.can_stats.arbitration_lost++;
1464 	} else if (skb) {
1465 		cf->can_id |= CAN_ERR_ACK;
1466 	}
1467 
1468 	if (skb) {
1469 		cf->can_id |= CAN_ERR_BUSERROR;
1470 		kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
1471 		netif_rx(skb);
1472 	} else {
1473 		can->can.dev->stats.rx_dropped++;
1474 		netdev_warn(can->can.dev, "No memory left for err_skb\n");
1475 	}
1476 }
1477 
1478 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1479 					   struct kvaser_pciefd_rx_packet *p)
1480 {
1481 	struct kvaser_pciefd_can *can;
1482 	bool one_shot_fail = false;
1483 	u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1484 
1485 	if (ch_id >= pcie->nr_channels)
1486 		return -EIO;
1487 
1488 	can = pcie->can[ch_id];
1489 	/* Ignore control packet ACK */
1490 	if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1491 		return 0;
1492 
1493 	if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1494 		kvaser_pciefd_handle_nack_packet(can, p);
1495 		one_shot_fail = true;
1496 	}
1497 
1498 	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1499 		netdev_dbg(can->can.dev, "Packet was flushed\n");
1500 	} else {
1501 		int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
1502 		int len;
1503 		u8 count;
1504 		struct sk_buff *skb;
1505 
1506 		skb = can->can.echo_skb[echo_idx];
1507 		if (skb)
1508 			kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
1509 		len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
1510 		count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
1511 				  ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
1512 
1513 		if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
1514 			netif_wake_queue(can->can.dev);
1515 
1516 		if (!one_shot_fail) {
1517 			can->can.dev->stats.tx_bytes += len;
1518 			can->can.dev->stats.tx_packets++;
1519 		}
1520 	}
1521 
1522 	return 0;
1523 }
1524 
1525 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1526 					      struct kvaser_pciefd_rx_packet *p)
1527 {
1528 	struct kvaser_pciefd_can *can;
1529 	u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]);
1530 
1531 	if (ch_id >= pcie->nr_channels)
1532 		return -EIO;
1533 
1534 	can = pcie->can[ch_id];
1535 
1536 	if (!completion_done(&can->flush_comp))
1537 		complete(&can->flush_comp);
1538 
1539 	return 0;
1540 }
1541 
1542 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1543 				     int dma_buf)
1544 {
1545 	__le32 *buffer = pcie->dma_data[dma_buf];
1546 	__le64 timestamp;
1547 	struct kvaser_pciefd_rx_packet packet;
1548 	struct kvaser_pciefd_rx_packet *p = &packet;
1549 	u8 type;
1550 	int pos = *start_pos;
1551 	int size;
1552 	int ret = 0;
1553 
1554 	size = le32_to_cpu(buffer[pos++]);
1555 	if (!size) {
1556 		*start_pos = 0;
1557 		return 0;
1558 	}
1559 
1560 	p->header[0] = le32_to_cpu(buffer[pos++]);
1561 	p->header[1] = le32_to_cpu(buffer[pos++]);
1562 
1563 	/* Read 64-bit timestamp */
1564 	memcpy(&timestamp, &buffer[pos], sizeof(__le64));
1565 	pos += 2;
1566 	p->timestamp = le64_to_cpu(timestamp);
1567 
1568 	type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]);
1569 	switch (type) {
1570 	case KVASER_PCIEFD_PACK_TYPE_DATA:
1571 		ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1572 		if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1573 			u8 data_len;
1574 
1575 			data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK,
1576 							    p->header[1]));
1577 			pos += DIV_ROUND_UP(data_len, 4);
1578 		}
1579 		break;
1580 
1581 	case KVASER_PCIEFD_PACK_TYPE_ACK:
1582 		ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1583 		break;
1584 
1585 	case KVASER_PCIEFD_PACK_TYPE_STATUS:
1586 		ret = kvaser_pciefd_handle_status_packet(pcie, p);
1587 		break;
1588 
1589 	case KVASER_PCIEFD_PACK_TYPE_ERROR:
1590 		ret = kvaser_pciefd_handle_error_packet(pcie, p);
1591 		break;
1592 
1593 	case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1594 		ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1595 		break;
1596 
1597 	case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1598 	case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1599 	case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1600 	case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1601 		dev_info(&pcie->pci->dev,
1602 			 "Received unexpected packet type 0x%08X\n", type);
1603 		break;
1604 
1605 	default:
1606 		dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1607 		ret = -EIO;
1608 		break;
1609 	}
1610 
1611 	if (ret)
1612 		return ret;
1613 
1614 	/* Position does not point to the end of the package,
1615 	 * corrupted packet size?
1616 	 */
1617 	if ((*start_pos + size) != pos)
1618 		return -EIO;
1619 
1620 	/* Point to the next packet header, if any */
1621 	*start_pos = pos;
1622 
1623 	return ret;
1624 }
1625 
1626 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1627 {
1628 	int pos = 0;
1629 	int res = 0;
1630 
1631 	do {
1632 		res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1633 	} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1634 
1635 	return res;
1636 }
1637 
1638 static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1639 {
1640 	u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1641 
1642 	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1643 		kvaser_pciefd_read_buffer(pcie, 0);
1644 		/* Reset DMA buffer 0 */
1645 		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1646 			  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1647 	}
1648 
1649 	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1650 		kvaser_pciefd_read_buffer(pcie, 1);
1651 		/* Reset DMA buffer 1 */
1652 		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1653 			  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1654 	}
1655 
1656 	if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1657 	    irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1658 	    irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1659 	    irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1660 		dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1661 
1662 	iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1663 }
1664 
1665 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1666 {
1667 	u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1668 
1669 	if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1670 		netdev_err(can->can.dev, "Tx FIFO overflow\n");
1671 
1672 	if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1673 		netdev_err(can->can.dev,
1674 			   "Fail to change bittiming, when not in reset mode\n");
1675 
1676 	if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1677 		netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1678 
1679 	if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1680 		netdev_err(can->can.dev, "Rx FIFO overflow\n");
1681 
1682 	iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1683 }
1684 
1685 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1686 {
1687 	struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1688 	const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
1689 	u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
1690 	int i;
1691 
1692 	if (!(board_irq & irq_mask->all))
1693 		return IRQ_NONE;
1694 
1695 	if (board_irq & irq_mask->kcan_rx0)
1696 		kvaser_pciefd_receive_irq(pcie);
1697 
1698 	for (i = 0; i < pcie->nr_channels; i++) {
1699 		if (!pcie->can[i]) {
1700 			dev_err(&pcie->pci->dev,
1701 				"IRQ mask points to unallocated controller\n");
1702 			break;
1703 		}
1704 
1705 		/* Check that mask matches channel (i) IRQ mask */
1706 		if (board_irq & irq_mask->kcan_tx[i])
1707 			kvaser_pciefd_transmit_irq(pcie->can[i]);
1708 	}
1709 
1710 	return IRQ_HANDLED;
1711 }
1712 
1713 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1714 {
1715 	int i;
1716 
1717 	for (i = 0; i < pcie->nr_channels; i++) {
1718 		struct kvaser_pciefd_can *can = pcie->can[i];
1719 
1720 		if (can) {
1721 			iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1722 			kvaser_pciefd_pwm_stop(can);
1723 			free_candev(can->can.dev);
1724 		}
1725 	}
1726 }
1727 
1728 static int kvaser_pciefd_probe(struct pci_dev *pdev,
1729 			       const struct pci_device_id *id)
1730 {
1731 	int err;
1732 	struct kvaser_pciefd *pcie;
1733 	const struct kvaser_pciefd_irq_mask *irq_mask;
1734 	void __iomem *irq_en_base;
1735 
1736 	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1737 	if (!pcie)
1738 		return -ENOMEM;
1739 
1740 	pci_set_drvdata(pdev, pcie);
1741 	pcie->pci = pdev;
1742 	pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
1743 	irq_mask = pcie->driver_data->irq_mask;
1744 
1745 	err = pci_enable_device(pdev);
1746 	if (err)
1747 		return err;
1748 
1749 	err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1750 	if (err)
1751 		goto err_disable_pci;
1752 
1753 	pcie->reg_base = pci_iomap(pdev, 0, 0);
1754 	if (!pcie->reg_base) {
1755 		err = -ENOMEM;
1756 		goto err_release_regions;
1757 	}
1758 
1759 	err = kvaser_pciefd_setup_board(pcie);
1760 	if (err)
1761 		goto err_pci_iounmap;
1762 
1763 	err = kvaser_pciefd_setup_dma(pcie);
1764 	if (err)
1765 		goto err_pci_iounmap;
1766 
1767 	pci_set_master(pdev);
1768 
1769 	err = kvaser_pciefd_setup_can_ctrls(pcie);
1770 	if (err)
1771 		goto err_teardown_can_ctrls;
1772 
1773 	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1774 			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1775 	if (err)
1776 		goto err_teardown_can_ctrls;
1777 
1778 	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1779 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
1780 
1781 	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1782 		  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1783 		  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1784 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
1785 
1786 	/* Enable PCI interrupts */
1787 	irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
1788 	iowrite32(irq_mask->all, irq_en_base);
1789 	/* Ready the DMA buffers */
1790 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1791 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1792 	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1793 		  KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
1794 
1795 	err = kvaser_pciefd_reg_candev(pcie);
1796 	if (err)
1797 		goto err_free_irq;
1798 
1799 	return 0;
1800 
1801 err_free_irq:
1802 	/* Disable PCI interrupts */
1803 	iowrite32(0, irq_en_base);
1804 	free_irq(pcie->pci->irq, pcie);
1805 
1806 err_teardown_can_ctrls:
1807 	kvaser_pciefd_teardown_can_ctrls(pcie);
1808 	iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1809 	pci_clear_master(pdev);
1810 
1811 err_pci_iounmap:
1812 	pci_iounmap(pdev, pcie->reg_base);
1813 
1814 err_release_regions:
1815 	pci_release_regions(pdev);
1816 
1817 err_disable_pci:
1818 	pci_disable_device(pdev);
1819 
1820 	return err;
1821 }
1822 
1823 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1824 {
1825 	int i;
1826 
1827 	for (i = 0; i < pcie->nr_channels; i++) {
1828 		struct kvaser_pciefd_can *can = pcie->can[i];
1829 
1830 		if (can) {
1831 			iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1832 			unregister_candev(can->can.dev);
1833 			del_timer(&can->bec_poll_timer);
1834 			kvaser_pciefd_pwm_stop(can);
1835 			free_candev(can->can.dev);
1836 		}
1837 	}
1838 }
1839 
1840 static void kvaser_pciefd_remove(struct pci_dev *pdev)
1841 {
1842 	struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1843 
1844 	kvaser_pciefd_remove_all_ctrls(pcie);
1845 
1846 	/* Disable interrupts */
1847 	iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
1848 	iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
1849 
1850 	free_irq(pcie->pci->irq, pcie);
1851 
1852 	pci_iounmap(pdev, pcie->reg_base);
1853 	pci_release_regions(pdev);
1854 	pci_disable_device(pdev);
1855 }
1856 
1857 static struct pci_driver kvaser_pciefd = {
1858 	.name = KVASER_PCIEFD_DRV_NAME,
1859 	.id_table = kvaser_pciefd_id_table,
1860 	.probe = kvaser_pciefd_probe,
1861 	.remove = kvaser_pciefd_remove,
1862 };
1863 
1864 module_pci_driver(kvaser_pciefd)
1865