1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. 3 * Parts of this driver are based on the following: 4 * - Kvaser linux pciefd driver (version 5.42) 5 * - PEAK linux canfd driver 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/can/dev.h> 10 #include <linux/device.h> 11 #include <linux/ethtool.h> 12 #include <linux/iopoll.h> 13 #include <linux/kernel.h> 14 #include <linux/minmax.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/pci.h> 18 #include <linux/timer.h> 19 20 MODULE_LICENSE("Dual BSD/GPL"); 21 MODULE_AUTHOR("Kvaser AB <support@kvaser.com>"); 22 MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); 23 24 #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" 25 26 #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) 27 #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) 28 #define KVASER_PCIEFD_MAX_ERR_REP 256U 29 #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U 30 #define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL 31 #define KVASER_PCIEFD_DMA_COUNT 2U 32 #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) 33 34 #define KVASER_PCIEFD_VENDOR 0x1a07 35 36 /* Altera based devices */ 37 #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d 38 #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e 39 #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f 40 #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 41 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 42 43 /* SmartFusion2 based devices */ 44 #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 45 #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 46 #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 47 #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 48 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 49 50 /* Xilinx based devices */ 51 #define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017 52 #define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019 53 54 /* Altera SerDes Enable 64-bit DMA address translation */ 55 #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) 56 57 /* SmartFusion2 SerDes LSB address translation mask */ 58 #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) 59 60 /* Xilinx SerDes LSB address translation mask */ 61 #define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12) 62 63 /* Kvaser KCAN CAN controller registers */ 64 #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 65 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 66 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 67 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 68 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 69 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 70 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 71 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 72 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 73 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 74 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 75 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 76 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 77 /* System identification and information registers */ 78 #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 79 #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc 80 #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 81 #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 82 /* Shared receive buffer FIFO registers */ 83 #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 84 /* Shared receive buffer registers */ 85 #define KVASER_PCIEFD_SRB_CMD_REG 0x0 86 #define KVASER_PCIEFD_SRB_IEN_REG 0x04 87 #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c 88 #define KVASER_PCIEFD_SRB_STAT_REG 0x10 89 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 90 #define KVASER_PCIEFD_SRB_CTRL_REG 0x18 91 92 /* System build information fields */ 93 #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) 94 #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) 95 #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) 96 #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) 97 98 /* Reset DMA buffer 0, 1 and FIFO offset */ 99 #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) 100 #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) 101 #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) 102 103 /* DMA underflow, buffer 0 and 1 */ 104 #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) 105 #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) 106 /* DMA overflow, buffer 0 and 1 */ 107 #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) 108 #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) 109 /* DMA packet done, buffer 0 and 1 */ 110 #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) 111 #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) 112 113 /* Got DMA support */ 114 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) 115 /* DMA idle */ 116 #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) 117 118 /* SRB current packet level */ 119 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) 120 121 /* DMA Enable */ 122 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) 123 124 /* KCAN CTRL packet types */ 125 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) 126 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 127 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 128 129 /* Command sequence number */ 130 #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) 131 /* Command bits */ 132 #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) 133 /* Abort, flush and reset */ 134 #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) 135 /* Request status packet */ 136 #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) 137 138 /* Transmitter unaligned */ 139 #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) 140 /* Tx FIFO empty */ 141 #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) 142 /* Tx FIFO overflow */ 143 #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) 144 /* Tx buffer flush done */ 145 #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) 146 /* Abort done */ 147 #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) 148 /* Rx FIFO overflow */ 149 #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) 150 /* FDF bit when controller is in classic CAN mode */ 151 #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) 152 /* Bus parameter protection error */ 153 #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) 154 /* Tx FIFO unaligned end */ 155 #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) 156 /* Tx FIFO unaligned read */ 157 #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) 158 159 /* Tx FIFO size */ 160 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) 161 /* Tx FIFO current packet level */ 162 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) 163 164 /* Current status packet sequence number */ 165 #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) 166 /* Controller got CAN FD capability */ 167 #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) 168 /* Controller got one-shot capability */ 169 #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) 170 /* Controller in reset mode */ 171 #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) 172 /* Reset mode request */ 173 #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) 174 /* Bus off */ 175 #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) 176 /* Idle state. Controller in reset mode and no abort or flush pending */ 177 #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) 178 /* Abort request */ 179 #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) 180 /* Controller is bus off */ 181 #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ 182 (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ 183 KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) 184 185 /* Classic CAN mode */ 186 #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) 187 /* Active error flag enable. Clear to force error passive */ 188 #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) 189 /* Acknowledgment packet type */ 190 #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) 191 /* CAN FD non-ISO */ 192 #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) 193 /* Error packet enable */ 194 #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) 195 /* Listen only mode */ 196 #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) 197 /* Reset mode */ 198 #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) 199 200 /* BTRN and BTRD fields */ 201 #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) 202 #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) 203 #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) 204 #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) 205 206 /* PWM Control fields */ 207 #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) 208 #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) 209 210 /* KCAN packet type IDs */ 211 #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 212 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 213 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 214 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 215 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 216 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 217 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 218 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 219 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 220 221 /* Common KCAN packet definitions, second word */ 222 #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) 223 #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) 224 #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) 225 226 /* KCAN Transmit/Receive data packet, first word */ 227 #define KVASER_PCIEFD_RPACKET_IDE BIT(30) 228 #define KVASER_PCIEFD_RPACKET_RTR BIT(29) 229 #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) 230 /* KCAN Transmit data packet, second word */ 231 #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) 232 #define KVASER_PCIEFD_TPACKET_SMS BIT(16) 233 /* KCAN Transmit/Receive data packet, second word */ 234 #define KVASER_PCIEFD_RPACKET_FDF BIT(15) 235 #define KVASER_PCIEFD_RPACKET_BRS BIT(14) 236 #define KVASER_PCIEFD_RPACKET_ESI BIT(13) 237 #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) 238 239 /* KCAN Transmit acknowledge packet, first word */ 240 #define KVASER_PCIEFD_APACKET_NACK BIT(11) 241 #define KVASER_PCIEFD_APACKET_ABL BIT(10) 242 #define KVASER_PCIEFD_APACKET_CT BIT(9) 243 #define KVASER_PCIEFD_APACKET_FLU BIT(8) 244 245 /* KCAN Status packet, first word */ 246 #define KVASER_PCIEFD_SPACK_RMCD BIT(22) 247 #define KVASER_PCIEFD_SPACK_IRM BIT(21) 248 #define KVASER_PCIEFD_SPACK_IDET BIT(20) 249 #define KVASER_PCIEFD_SPACK_BOFF BIT(16) 250 #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) 251 #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) 252 /* KCAN Status packet, second word */ 253 #define KVASER_PCIEFD_SPACK_EPLR BIT(24) 254 #define KVASER_PCIEFD_SPACK_EWLR BIT(23) 255 #define KVASER_PCIEFD_SPACK_AUTO BIT(21) 256 257 /* KCAN Error detected packet, second word */ 258 #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) 259 260 /* Macros for calculating addresses of registers */ 261 #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ 262 ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) 263 #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ 264 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) 265 #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ 266 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) 267 #define KVASER_PCIEFD_SERDES_ADDR(pcie) \ 268 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) 269 #define KVASER_PCIEFD_SYSID_ADDR(pcie) \ 270 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) 271 #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ 272 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) 273 #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ 274 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) 275 #define KVASER_PCIEFD_SRB_ADDR(pcie) \ 276 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) 277 #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ 278 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) 279 #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ 280 (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) 281 #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ 282 (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) 283 #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ 284 (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) 285 286 struct kvaser_pciefd; 287 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 288 dma_addr_t addr, int index); 289 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 290 dma_addr_t addr, int index); 291 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, 292 dma_addr_t addr, int index); 293 294 struct kvaser_pciefd_address_offset { 295 u32 serdes; 296 u32 pci_ien; 297 u32 pci_irq; 298 u32 sysid; 299 u32 loopback; 300 u32 kcan_srb_fifo; 301 u32 kcan_srb; 302 u32 kcan_ch0; 303 u32 kcan_ch1; 304 }; 305 306 struct kvaser_pciefd_dev_ops { 307 void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, 308 dma_addr_t addr, int index); 309 }; 310 311 struct kvaser_pciefd_irq_mask { 312 u32 kcan_rx0; 313 u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 314 u32 all; 315 }; 316 317 struct kvaser_pciefd_driver_data { 318 const struct kvaser_pciefd_address_offset *address_offset; 319 const struct kvaser_pciefd_irq_mask *irq_mask; 320 const struct kvaser_pciefd_dev_ops *ops; 321 }; 322 323 static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { 324 .serdes = 0x1000, 325 .pci_ien = 0x50, 326 .pci_irq = 0x40, 327 .sysid = 0x1f020, 328 .loopback = 0x1f000, 329 .kcan_srb_fifo = 0x1f200, 330 .kcan_srb = 0x1f400, 331 .kcan_ch0 = 0x10000, 332 .kcan_ch1 = 0x11000, 333 }; 334 335 static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { 336 .serdes = 0x280c8, 337 .pci_ien = 0x102004, 338 .pci_irq = 0x102008, 339 .sysid = 0x100000, 340 .loopback = 0x103000, 341 .kcan_srb_fifo = 0x120000, 342 .kcan_srb = 0x121000, 343 .kcan_ch0 = 0x140000, 344 .kcan_ch1 = 0x142000, 345 }; 346 347 static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = { 348 .serdes = 0x00208, 349 .pci_ien = 0x102004, 350 .pci_irq = 0x102008, 351 .sysid = 0x100000, 352 .loopback = 0x103000, 353 .kcan_srb_fifo = 0x120000, 354 .kcan_srb = 0x121000, 355 .kcan_ch0 = 0x140000, 356 .kcan_ch1 = 0x142000, 357 }; 358 359 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { 360 .kcan_rx0 = BIT(4), 361 .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, 362 .all = GENMASK(4, 0), 363 }; 364 365 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { 366 .kcan_rx0 = BIT(4), 367 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, 368 .all = GENMASK(19, 16) | BIT(4), 369 }; 370 371 static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = { 372 .kcan_rx0 = BIT(4), 373 .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) }, 374 .all = GENMASK(23, 16) | BIT(4), 375 }; 376 377 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { 378 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, 379 }; 380 381 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { 382 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, 383 }; 384 385 static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = { 386 .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx, 387 }; 388 389 static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { 390 .address_offset = &kvaser_pciefd_altera_address_offset, 391 .irq_mask = &kvaser_pciefd_altera_irq_mask, 392 .ops = &kvaser_pciefd_altera_dev_ops, 393 }; 394 395 static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { 396 .address_offset = &kvaser_pciefd_sf2_address_offset, 397 .irq_mask = &kvaser_pciefd_sf2_irq_mask, 398 .ops = &kvaser_pciefd_sf2_dev_ops, 399 }; 400 401 static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = { 402 .address_offset = &kvaser_pciefd_xilinx_address_offset, 403 .irq_mask = &kvaser_pciefd_xilinx_irq_mask, 404 .ops = &kvaser_pciefd_xilinx_dev_ops, 405 }; 406 407 struct kvaser_pciefd_can { 408 struct can_priv can; 409 struct kvaser_pciefd *kv_pcie; 410 void __iomem *reg_base; 411 struct can_berr_counter bec; 412 u8 cmd_seq; 413 int err_rep_cnt; 414 int echo_idx; 415 spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ 416 spinlock_t echo_lock; /* Locks the message echo buffer */ 417 struct timer_list bec_poll_timer; 418 struct completion start_comp, flush_comp; 419 }; 420 421 struct kvaser_pciefd { 422 struct pci_dev *pci; 423 void __iomem *reg_base; 424 struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; 425 const struct kvaser_pciefd_driver_data *driver_data; 426 void *dma_data[KVASER_PCIEFD_DMA_COUNT]; 427 u8 nr_channels; 428 u32 bus_freq; 429 u32 freq; 430 u32 freq_to_ticks_div; 431 }; 432 433 struct kvaser_pciefd_rx_packet { 434 u32 header[2]; 435 u64 timestamp; 436 }; 437 438 struct kvaser_pciefd_tx_packet { 439 u32 header[2]; 440 u8 data[64]; 441 }; 442 443 static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { 444 .name = KVASER_PCIEFD_DRV_NAME, 445 .tseg1_min = 1, 446 .tseg1_max = 512, 447 .tseg2_min = 1, 448 .tseg2_max = 32, 449 .sjw_max = 16, 450 .brp_min = 1, 451 .brp_max = 8192, 452 .brp_inc = 1, 453 }; 454 455 static struct pci_device_id kvaser_pciefd_id_table[] = { 456 { 457 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), 458 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 459 }, 460 { 461 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), 462 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 463 }, 464 { 465 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), 466 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 467 }, 468 { 469 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), 470 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 471 }, 472 { 473 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), 474 .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, 475 }, 476 { 477 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), 478 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 479 }, 480 { 481 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), 482 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 483 }, 484 { 485 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), 486 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 487 }, 488 { 489 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), 490 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 491 }, 492 { 493 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), 494 .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, 495 }, 496 { 497 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID), 498 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, 499 }, 500 { 501 PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID), 502 .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data, 503 }, 504 { 505 0, 506 }, 507 }; 508 MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); 509 510 static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) 511 { 512 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | 513 FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), 514 can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); 515 } 516 517 static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) 518 { 519 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); 520 } 521 522 static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) 523 { 524 kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); 525 } 526 527 static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) 528 { 529 u32 mode; 530 unsigned long irq; 531 532 spin_lock_irqsave(&can->lock, irq); 533 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 534 if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { 535 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 536 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 537 } 538 spin_unlock_irqrestore(&can->lock, irq); 539 } 540 541 static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) 542 { 543 u32 mode; 544 unsigned long irq; 545 546 spin_lock_irqsave(&can->lock, irq); 547 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 548 mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; 549 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 550 spin_unlock_irqrestore(&can->lock, irq); 551 } 552 553 static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) 554 { 555 u32 msk; 556 557 msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | 558 KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | 559 KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | 560 KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | 561 KVASER_PCIEFD_KCAN_IRQ_TAR; 562 563 iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 564 } 565 566 static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, 567 struct sk_buff *skb, u64 timestamp) 568 { 569 skb_hwtstamps(skb)->hwtstamp = 570 ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); 571 } 572 573 static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) 574 { 575 u32 mode; 576 unsigned long irq; 577 578 spin_lock_irqsave(&can->lock, irq); 579 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 580 if (can->can.ctrlmode & CAN_CTRLMODE_FD) { 581 mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; 582 if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 583 mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; 584 else 585 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 586 } else { 587 mode |= KVASER_PCIEFD_KCAN_MODE_CCM; 588 mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; 589 } 590 591 if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 592 mode |= KVASER_PCIEFD_KCAN_MODE_LOM; 593 else 594 mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; 595 mode |= KVASER_PCIEFD_KCAN_MODE_EEN; 596 mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; 597 /* Use ACK packet type */ 598 mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; 599 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 600 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 601 602 spin_unlock_irqrestore(&can->lock, irq); 603 } 604 605 static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) 606 { 607 u32 status; 608 unsigned long irq; 609 610 spin_lock_irqsave(&can->lock, irq); 611 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 612 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 613 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 614 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 615 if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 616 /* If controller is already idle, run abort, flush and reset */ 617 kvaser_pciefd_abort_flush_reset(can); 618 } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { 619 u32 mode; 620 621 /* Put controller in reset mode */ 622 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 623 mode |= KVASER_PCIEFD_KCAN_MODE_RM; 624 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 625 } 626 spin_unlock_irqrestore(&can->lock, irq); 627 } 628 629 static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) 630 { 631 u32 mode; 632 unsigned long irq; 633 634 del_timer(&can->bec_poll_timer); 635 if (!completion_done(&can->flush_comp)) 636 kvaser_pciefd_start_controller_flush(can); 637 638 if (!wait_for_completion_timeout(&can->flush_comp, 639 KVASER_PCIEFD_WAIT_TIMEOUT)) { 640 netdev_err(can->can.dev, "Timeout during bus on flush\n"); 641 return -ETIMEDOUT; 642 } 643 644 spin_lock_irqsave(&can->lock, irq); 645 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 646 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 647 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 648 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 649 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 650 mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; 651 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 652 spin_unlock_irqrestore(&can->lock, irq); 653 654 if (!wait_for_completion_timeout(&can->start_comp, 655 KVASER_PCIEFD_WAIT_TIMEOUT)) { 656 netdev_err(can->can.dev, "Timeout during bus on reset\n"); 657 return -ETIMEDOUT; 658 } 659 /* Reset interrupt handling */ 660 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 661 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 662 663 kvaser_pciefd_set_tx_irq(can); 664 kvaser_pciefd_setup_controller(can); 665 can->can.state = CAN_STATE_ERROR_ACTIVE; 666 netif_wake_queue(can->can.dev); 667 can->bec.txerr = 0; 668 can->bec.rxerr = 0; 669 can->err_rep_cnt = 0; 670 671 return 0; 672 } 673 674 static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) 675 { 676 u8 top; 677 u32 pwm_ctrl; 678 unsigned long irq; 679 680 spin_lock_irqsave(&can->lock, irq); 681 pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 682 top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); 683 /* Set duty cycle to zero */ 684 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 685 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 686 spin_unlock_irqrestore(&can->lock, irq); 687 } 688 689 static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) 690 { 691 int top, trigger; 692 u32 pwm_ctrl; 693 unsigned long irq; 694 695 kvaser_pciefd_pwm_stop(can); 696 spin_lock_irqsave(&can->lock, irq); 697 /* Set frequency to 500 KHz */ 698 top = can->kv_pcie->bus_freq / (2 * 500000) - 1; 699 700 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); 701 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 702 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 703 704 /* Set duty cycle to 95 */ 705 trigger = (100 * top - 95 * (top + 1) + 50) / 100; 706 pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); 707 pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); 708 iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); 709 spin_unlock_irqrestore(&can->lock, irq); 710 } 711 712 static int kvaser_pciefd_open(struct net_device *netdev) 713 { 714 int ret; 715 struct kvaser_pciefd_can *can = netdev_priv(netdev); 716 717 ret = open_candev(netdev); 718 if (ret) 719 return ret; 720 721 ret = kvaser_pciefd_bus_on(can); 722 if (ret) { 723 close_candev(netdev); 724 return ret; 725 } 726 727 return 0; 728 } 729 730 static int kvaser_pciefd_stop(struct net_device *netdev) 731 { 732 struct kvaser_pciefd_can *can = netdev_priv(netdev); 733 int ret = 0; 734 735 /* Don't interrupt ongoing flush */ 736 if (!completion_done(&can->flush_comp)) 737 kvaser_pciefd_start_controller_flush(can); 738 739 if (!wait_for_completion_timeout(&can->flush_comp, 740 KVASER_PCIEFD_WAIT_TIMEOUT)) { 741 netdev_err(can->can.dev, "Timeout during stop\n"); 742 ret = -ETIMEDOUT; 743 } else { 744 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 745 del_timer(&can->bec_poll_timer); 746 } 747 can->can.state = CAN_STATE_STOPPED; 748 close_candev(netdev); 749 750 return ret; 751 } 752 753 static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, 754 struct kvaser_pciefd_can *can, 755 struct sk_buff *skb) 756 { 757 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 758 int packet_size; 759 int seq = can->echo_idx; 760 761 memset(p, 0, sizeof(*p)); 762 if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 763 p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; 764 765 if (cf->can_id & CAN_RTR_FLAG) 766 p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; 767 768 if (cf->can_id & CAN_EFF_FLAG) 769 p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; 770 771 p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); 772 p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; 773 774 if (can_is_canfd_skb(skb)) { 775 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 776 can_fd_len2dlc(cf->len)); 777 p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; 778 if (cf->flags & CANFD_BRS) 779 p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; 780 if (cf->flags & CANFD_ESI) 781 p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; 782 } else { 783 p->header[1] |= 784 FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, 785 can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); 786 } 787 788 p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); 789 790 packet_size = cf->len; 791 memcpy(p->data, cf->data, packet_size); 792 793 return DIV_ROUND_UP(packet_size, 4); 794 } 795 796 static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, 797 struct net_device *netdev) 798 { 799 struct kvaser_pciefd_can *can = netdev_priv(netdev); 800 unsigned long irq_flags; 801 struct kvaser_pciefd_tx_packet packet; 802 int nr_words; 803 u8 count; 804 805 if (can_dev_dropped_skb(netdev, skb)) 806 return NETDEV_TX_OK; 807 808 nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); 809 810 spin_lock_irqsave(&can->echo_lock, irq_flags); 811 /* Prepare and save echo skb in internal slot */ 812 can_put_echo_skb(skb, netdev, can->echo_idx, 0); 813 814 /* Move echo index to the next slot */ 815 can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; 816 817 /* Write header to fifo */ 818 iowrite32(packet.header[0], 819 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 820 iowrite32(packet.header[1], 821 can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); 822 823 if (nr_words) { 824 u32 data_last = ((u32 *)packet.data)[nr_words - 1]; 825 826 /* Write data to fifo, except last word */ 827 iowrite32_rep(can->reg_base + 828 KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, 829 nr_words - 1); 830 /* Write last word to end of fifo */ 831 __raw_writel(data_last, can->reg_base + 832 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 833 } else { 834 /* Complete write to fifo */ 835 __raw_writel(0, can->reg_base + 836 KVASER_PCIEFD_KCAN_FIFO_LAST_REG); 837 } 838 839 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 840 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 841 /* No room for a new message, stop the queue until at least one 842 * successful transmit 843 */ 844 if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) 845 netif_stop_queue(netdev); 846 spin_unlock_irqrestore(&can->echo_lock, irq_flags); 847 848 return NETDEV_TX_OK; 849 } 850 851 static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) 852 { 853 u32 mode, test, btrn; 854 unsigned long irq_flags; 855 int ret; 856 struct can_bittiming *bt; 857 858 if (data) 859 bt = &can->can.data_bittiming; 860 else 861 bt = &can->can.bittiming; 862 863 btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | 864 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | 865 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | 866 FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); 867 868 spin_lock_irqsave(&can->lock, irq_flags); 869 mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 870 /* Put the circuit in reset mode */ 871 iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, 872 can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 873 874 /* Can only set bittiming if in reset mode */ 875 ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, 876 test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); 877 if (ret) { 878 spin_unlock_irqrestore(&can->lock, irq_flags); 879 return -EBUSY; 880 } 881 882 if (data) 883 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); 884 else 885 iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); 886 /* Restore previous reset mode status */ 887 iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); 888 spin_unlock_irqrestore(&can->lock, irq_flags); 889 890 return 0; 891 } 892 893 static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) 894 { 895 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); 896 } 897 898 static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) 899 { 900 return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); 901 } 902 903 static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) 904 { 905 struct kvaser_pciefd_can *can = netdev_priv(ndev); 906 int ret = 0; 907 908 switch (mode) { 909 case CAN_MODE_START: 910 if (!can->can.restart_ms) 911 ret = kvaser_pciefd_bus_on(can); 912 break; 913 default: 914 return -EOPNOTSUPP; 915 } 916 917 return ret; 918 } 919 920 static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, 921 struct can_berr_counter *bec) 922 { 923 struct kvaser_pciefd_can *can = netdev_priv(ndev); 924 925 bec->rxerr = can->bec.rxerr; 926 bec->txerr = can->bec.txerr; 927 928 return 0; 929 } 930 931 static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) 932 { 933 struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); 934 935 kvaser_pciefd_enable_err_gen(can); 936 kvaser_pciefd_request_status(can); 937 can->err_rep_cnt = 0; 938 } 939 940 static const struct net_device_ops kvaser_pciefd_netdev_ops = { 941 .ndo_open = kvaser_pciefd_open, 942 .ndo_stop = kvaser_pciefd_stop, 943 .ndo_eth_ioctl = can_eth_ioctl_hwts, 944 .ndo_start_xmit = kvaser_pciefd_start_xmit, 945 .ndo_change_mtu = can_change_mtu, 946 }; 947 948 static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { 949 .get_ts_info = can_ethtool_op_get_ts_info_hwts, 950 }; 951 952 static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) 953 { 954 int i; 955 956 for (i = 0; i < pcie->nr_channels; i++) { 957 struct net_device *netdev; 958 struct kvaser_pciefd_can *can; 959 u32 status, tx_nr_packets_max; 960 961 netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), 962 KVASER_PCIEFD_CAN_TX_MAX_COUNT); 963 if (!netdev) 964 return -ENOMEM; 965 966 can = netdev_priv(netdev); 967 netdev->netdev_ops = &kvaser_pciefd_netdev_ops; 968 netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; 969 can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); 970 can->kv_pcie = pcie; 971 can->cmd_seq = 0; 972 can->err_rep_cnt = 0; 973 can->bec.txerr = 0; 974 can->bec.rxerr = 0; 975 976 init_completion(&can->start_comp); 977 init_completion(&can->flush_comp); 978 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); 979 980 /* Disable Bus load reporting */ 981 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 982 983 tx_nr_packets_max = 984 FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, 985 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 986 987 can->can.clock.freq = pcie->freq; 988 can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); 989 can->echo_idx = 0; 990 spin_lock_init(&can->echo_lock); 991 spin_lock_init(&can->lock); 992 993 can->can.bittiming_const = &kvaser_pciefd_bittiming_const; 994 can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; 995 can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; 996 can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; 997 can->can.do_set_mode = kvaser_pciefd_set_mode; 998 can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; 999 can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 1000 CAN_CTRLMODE_FD | 1001 CAN_CTRLMODE_FD_NON_ISO | 1002 CAN_CTRLMODE_CC_LEN8_DLC; 1003 1004 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1005 if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { 1006 dev_err(&pcie->pci->dev, 1007 "CAN FD not supported as expected %d\n", i); 1008 1009 free_candev(netdev); 1010 return -ENODEV; 1011 } 1012 1013 if (status & KVASER_PCIEFD_KCAN_STAT_CAP) 1014 can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; 1015 1016 netdev->flags |= IFF_ECHO; 1017 SET_NETDEV_DEV(netdev, &pcie->pci->dev); 1018 1019 iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1020 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1021 can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1022 1023 pcie->can[i] = can; 1024 kvaser_pciefd_pwm_start(can); 1025 } 1026 1027 return 0; 1028 } 1029 1030 static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) 1031 { 1032 int i; 1033 1034 for (i = 0; i < pcie->nr_channels; i++) { 1035 int ret = register_candev(pcie->can[i]->can.dev); 1036 1037 if (ret) { 1038 int j; 1039 1040 /* Unregister all successfully registered devices. */ 1041 for (j = 0; j < i; j++) 1042 unregister_candev(pcie->can[j]->can.dev); 1043 return ret; 1044 } 1045 } 1046 1047 return 0; 1048 } 1049 1050 static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, 1051 dma_addr_t addr, int index) 1052 { 1053 void __iomem *serdes_base; 1054 u32 word1, word2; 1055 1056 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) { 1057 word1 = lower_32_bits(addr) | KVASER_PCIEFD_ALTERA_DMA_64BIT; 1058 word2 = upper_32_bits(addr); 1059 } else { 1060 word1 = addr; 1061 word2 = 0; 1062 } 1063 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 1064 iowrite32(word1, serdes_base); 1065 iowrite32(word2, serdes_base + 0x4); 1066 } 1067 1068 static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, 1069 dma_addr_t addr, int index) 1070 { 1071 void __iomem *serdes_base; 1072 u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; 1073 u32 msb = 0x0; 1074 1075 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 1076 msb = upper_32_bits(addr); 1077 1078 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; 1079 iowrite32(lsb, serdes_base); 1080 iowrite32(msb, serdes_base + 0x4); 1081 } 1082 1083 static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie, 1084 dma_addr_t addr, int index) 1085 { 1086 void __iomem *serdes_base; 1087 u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK; 1088 u32 msb = 0x0; 1089 1090 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 1091 msb = upper_32_bits(addr); 1092 1093 serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; 1094 iowrite32(msb, serdes_base); 1095 iowrite32(lsb, serdes_base + 0x4); 1096 } 1097 1098 static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) 1099 { 1100 int i; 1101 u32 srb_status; 1102 u32 srb_packet_count; 1103 dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; 1104 1105 /* Disable the DMA */ 1106 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1107 1108 dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64)); 1109 1110 for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { 1111 pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, 1112 KVASER_PCIEFD_DMA_SIZE, 1113 &dma_addr[i], 1114 GFP_KERNEL); 1115 1116 if (!pcie->dma_data[i] || !dma_addr[i]) { 1117 dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", 1118 KVASER_PCIEFD_DMA_SIZE); 1119 return -ENOMEM; 1120 } 1121 pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); 1122 } 1123 1124 /* Reset Rx FIFO, and both DMA buffers */ 1125 iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | 1126 KVASER_PCIEFD_SRB_CMD_RDB1, 1127 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1128 /* Empty Rx FIFO */ 1129 srb_packet_count = 1130 FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, 1131 ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + 1132 KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); 1133 while (srb_packet_count) { 1134 /* Drop current packet in FIFO */ 1135 ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); 1136 srb_packet_count--; 1137 } 1138 1139 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1140 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { 1141 dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); 1142 return -EIO; 1143 } 1144 1145 /* Enable the DMA */ 1146 iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, 1147 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1148 1149 return 0; 1150 } 1151 1152 static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) 1153 { 1154 u32 version, srb_status, build; 1155 1156 version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); 1157 pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, 1158 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); 1159 1160 build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); 1161 dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", 1162 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), 1163 FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), 1164 FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); 1165 1166 srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); 1167 if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { 1168 dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); 1169 return -ENODEV; 1170 } 1171 1172 pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); 1173 pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); 1174 pcie->freq_to_ticks_div = pcie->freq / 1000000; 1175 if (pcie->freq_to_ticks_div == 0) 1176 pcie->freq_to_ticks_div = 1; 1177 /* Turn off all loopback functionality */ 1178 iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); 1179 1180 return 0; 1181 } 1182 1183 static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, 1184 struct kvaser_pciefd_rx_packet *p, 1185 __le32 *data) 1186 { 1187 struct sk_buff *skb; 1188 struct canfd_frame *cf; 1189 struct can_priv *priv; 1190 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1191 u8 dlc; 1192 1193 if (ch_id >= pcie->nr_channels) 1194 return -EIO; 1195 1196 priv = &pcie->can[ch_id]->can; 1197 dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); 1198 1199 if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { 1200 skb = alloc_canfd_skb(priv->dev, &cf); 1201 if (!skb) { 1202 priv->dev->stats.rx_dropped++; 1203 return -ENOMEM; 1204 } 1205 1206 cf->len = can_fd_dlc2len(dlc); 1207 if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) 1208 cf->flags |= CANFD_BRS; 1209 if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) 1210 cf->flags |= CANFD_ESI; 1211 } else { 1212 skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); 1213 if (!skb) { 1214 priv->dev->stats.rx_dropped++; 1215 return -ENOMEM; 1216 } 1217 can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); 1218 } 1219 1220 cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); 1221 if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) 1222 cf->can_id |= CAN_EFF_FLAG; 1223 1224 if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { 1225 cf->can_id |= CAN_RTR_FLAG; 1226 } else { 1227 memcpy(cf->data, data, cf->len); 1228 priv->dev->stats.rx_bytes += cf->len; 1229 } 1230 priv->dev->stats.rx_packets++; 1231 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1232 1233 return netif_rx(skb); 1234 } 1235 1236 static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, 1237 struct can_frame *cf, 1238 enum can_state new_state, 1239 enum can_state tx_state, 1240 enum can_state rx_state) 1241 { 1242 can_change_state(can->can.dev, cf, tx_state, rx_state); 1243 1244 if (new_state == CAN_STATE_BUS_OFF) { 1245 struct net_device *ndev = can->can.dev; 1246 unsigned long irq_flags; 1247 1248 spin_lock_irqsave(&can->lock, irq_flags); 1249 netif_stop_queue(can->can.dev); 1250 spin_unlock_irqrestore(&can->lock, irq_flags); 1251 /* Prevent CAN controller from auto recover from bus off */ 1252 if (!can->can.restart_ms) { 1253 kvaser_pciefd_start_controller_flush(can); 1254 can_bus_off(ndev); 1255 } 1256 } 1257 } 1258 1259 static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, 1260 struct can_berr_counter *bec, 1261 enum can_state *new_state, 1262 enum can_state *tx_state, 1263 enum can_state *rx_state) 1264 { 1265 if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || 1266 p->header[0] & KVASER_PCIEFD_SPACK_IRM) 1267 *new_state = CAN_STATE_BUS_OFF; 1268 else if (bec->txerr >= 255 || bec->rxerr >= 255) 1269 *new_state = CAN_STATE_BUS_OFF; 1270 else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) 1271 *new_state = CAN_STATE_ERROR_PASSIVE; 1272 else if (bec->txerr >= 128 || bec->rxerr >= 128) 1273 *new_state = CAN_STATE_ERROR_PASSIVE; 1274 else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) 1275 *new_state = CAN_STATE_ERROR_WARNING; 1276 else if (bec->txerr >= 96 || bec->rxerr >= 96) 1277 *new_state = CAN_STATE_ERROR_WARNING; 1278 else 1279 *new_state = CAN_STATE_ERROR_ACTIVE; 1280 1281 *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; 1282 *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; 1283 } 1284 1285 static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, 1286 struct kvaser_pciefd_rx_packet *p) 1287 { 1288 struct can_berr_counter bec; 1289 enum can_state old_state, new_state, tx_state, rx_state; 1290 struct net_device *ndev = can->can.dev; 1291 struct sk_buff *skb; 1292 struct can_frame *cf = NULL; 1293 1294 old_state = can->can.state; 1295 1296 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1297 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1298 1299 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1300 skb = alloc_can_err_skb(ndev, &cf); 1301 if (new_state != old_state) { 1302 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1303 if (old_state == CAN_STATE_BUS_OFF && 1304 new_state == CAN_STATE_ERROR_ACTIVE && 1305 can->can.restart_ms) { 1306 can->can.can_stats.restarts++; 1307 if (skb) 1308 cf->can_id |= CAN_ERR_RESTARTED; 1309 } 1310 } 1311 1312 can->err_rep_cnt++; 1313 can->can.can_stats.bus_error++; 1314 if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) 1315 ndev->stats.tx_errors++; 1316 else 1317 ndev->stats.rx_errors++; 1318 1319 can->bec.txerr = bec.txerr; 1320 can->bec.rxerr = bec.rxerr; 1321 1322 if (!skb) { 1323 ndev->stats.rx_dropped++; 1324 return -ENOMEM; 1325 } 1326 1327 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1328 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; 1329 cf->data[6] = bec.txerr; 1330 cf->data[7] = bec.rxerr; 1331 1332 netif_rx(skb); 1333 1334 return 0; 1335 } 1336 1337 static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, 1338 struct kvaser_pciefd_rx_packet *p) 1339 { 1340 struct kvaser_pciefd_can *can; 1341 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1342 1343 if (ch_id >= pcie->nr_channels) 1344 return -EIO; 1345 1346 can = pcie->can[ch_id]; 1347 kvaser_pciefd_rx_error_frame(can, p); 1348 if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) 1349 /* Do not report more errors, until bec_poll_timer expires */ 1350 kvaser_pciefd_disable_err_gen(can); 1351 /* Start polling the error counters */ 1352 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1353 1354 return 0; 1355 } 1356 1357 static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, 1358 struct kvaser_pciefd_rx_packet *p) 1359 { 1360 struct can_berr_counter bec; 1361 enum can_state old_state, new_state, tx_state, rx_state; 1362 1363 old_state = can->can.state; 1364 1365 bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); 1366 bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); 1367 1368 kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); 1369 if (new_state != old_state) { 1370 struct net_device *ndev = can->can.dev; 1371 struct sk_buff *skb; 1372 struct can_frame *cf; 1373 1374 skb = alloc_can_err_skb(ndev, &cf); 1375 if (!skb) { 1376 ndev->stats.rx_dropped++; 1377 return -ENOMEM; 1378 } 1379 1380 kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); 1381 if (old_state == CAN_STATE_BUS_OFF && 1382 new_state == CAN_STATE_ERROR_ACTIVE && 1383 can->can.restart_ms) { 1384 can->can.can_stats.restarts++; 1385 cf->can_id |= CAN_ERR_RESTARTED; 1386 } 1387 1388 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1389 1390 cf->data[6] = bec.txerr; 1391 cf->data[7] = bec.rxerr; 1392 1393 netif_rx(skb); 1394 } 1395 can->bec.txerr = bec.txerr; 1396 can->bec.rxerr = bec.rxerr; 1397 /* Check if we need to poll the error counters */ 1398 if (bec.txerr || bec.rxerr) 1399 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1400 1401 return 0; 1402 } 1403 1404 static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, 1405 struct kvaser_pciefd_rx_packet *p) 1406 { 1407 struct kvaser_pciefd_can *can; 1408 u8 cmdseq; 1409 u32 status; 1410 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1411 1412 if (ch_id >= pcie->nr_channels) 1413 return -EIO; 1414 1415 can = pcie->can[ch_id]; 1416 1417 status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); 1418 cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); 1419 1420 /* Reset done, start abort and flush */ 1421 if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1422 p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1423 p->header[1] & KVASER_PCIEFD_SPACK_AUTO && 1424 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1425 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1426 iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, 1427 can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1428 kvaser_pciefd_abort_flush_reset(can); 1429 } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && 1430 p->header[0] & KVASER_PCIEFD_SPACK_IRM && 1431 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && 1432 status & KVASER_PCIEFD_KCAN_STAT_IDLE) { 1433 /* Reset detected, send end of flush if no packet are in FIFO */ 1434 u8 count; 1435 1436 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1437 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1438 if (!count) 1439 iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, 1440 KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), 1441 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); 1442 } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && 1443 cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { 1444 /* Response to status request received */ 1445 kvaser_pciefd_handle_status_resp(can, p); 1446 if (can->can.state != CAN_STATE_BUS_OFF && 1447 can->can.state != CAN_STATE_ERROR_ACTIVE) { 1448 mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); 1449 } 1450 } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && 1451 !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { 1452 /* Reset to bus on detected */ 1453 if (!completion_done(&can->start_comp)) 1454 complete(&can->start_comp); 1455 } 1456 1457 return 0; 1458 } 1459 1460 static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, 1461 struct kvaser_pciefd_rx_packet *p) 1462 { 1463 struct sk_buff *skb; 1464 struct can_frame *cf; 1465 1466 skb = alloc_can_err_skb(can->can.dev, &cf); 1467 can->can.dev->stats.tx_errors++; 1468 if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { 1469 if (skb) 1470 cf->can_id |= CAN_ERR_LOSTARB; 1471 can->can.can_stats.arbitration_lost++; 1472 } else if (skb) { 1473 cf->can_id |= CAN_ERR_ACK; 1474 } 1475 1476 if (skb) { 1477 cf->can_id |= CAN_ERR_BUSERROR; 1478 kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); 1479 netif_rx(skb); 1480 } else { 1481 can->can.dev->stats.rx_dropped++; 1482 netdev_warn(can->can.dev, "No memory left for err_skb\n"); 1483 } 1484 } 1485 1486 static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, 1487 struct kvaser_pciefd_rx_packet *p) 1488 { 1489 struct kvaser_pciefd_can *can; 1490 bool one_shot_fail = false; 1491 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1492 1493 if (ch_id >= pcie->nr_channels) 1494 return -EIO; 1495 1496 can = pcie->can[ch_id]; 1497 /* Ignore control packet ACK */ 1498 if (p->header[0] & KVASER_PCIEFD_APACKET_CT) 1499 return 0; 1500 1501 if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { 1502 kvaser_pciefd_handle_nack_packet(can, p); 1503 one_shot_fail = true; 1504 } 1505 1506 if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { 1507 netdev_dbg(can->can.dev, "Packet was flushed\n"); 1508 } else { 1509 int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); 1510 int len; 1511 u8 count; 1512 struct sk_buff *skb; 1513 1514 skb = can->can.echo_skb[echo_idx]; 1515 if (skb) 1516 kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); 1517 len = can_get_echo_skb(can->can.dev, echo_idx, NULL); 1518 count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, 1519 ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); 1520 1521 if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) 1522 netif_wake_queue(can->can.dev); 1523 1524 if (!one_shot_fail) { 1525 can->can.dev->stats.tx_bytes += len; 1526 can->can.dev->stats.tx_packets++; 1527 } 1528 } 1529 1530 return 0; 1531 } 1532 1533 static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, 1534 struct kvaser_pciefd_rx_packet *p) 1535 { 1536 struct kvaser_pciefd_can *can; 1537 u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); 1538 1539 if (ch_id >= pcie->nr_channels) 1540 return -EIO; 1541 1542 can = pcie->can[ch_id]; 1543 1544 if (!completion_done(&can->flush_comp)) 1545 complete(&can->flush_comp); 1546 1547 return 0; 1548 } 1549 1550 static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, 1551 int dma_buf) 1552 { 1553 __le32 *buffer = pcie->dma_data[dma_buf]; 1554 __le64 timestamp; 1555 struct kvaser_pciefd_rx_packet packet; 1556 struct kvaser_pciefd_rx_packet *p = &packet; 1557 u8 type; 1558 int pos = *start_pos; 1559 int size; 1560 int ret = 0; 1561 1562 size = le32_to_cpu(buffer[pos++]); 1563 if (!size) { 1564 *start_pos = 0; 1565 return 0; 1566 } 1567 1568 p->header[0] = le32_to_cpu(buffer[pos++]); 1569 p->header[1] = le32_to_cpu(buffer[pos++]); 1570 1571 /* Read 64-bit timestamp */ 1572 memcpy(×tamp, &buffer[pos], sizeof(__le64)); 1573 pos += 2; 1574 p->timestamp = le64_to_cpu(timestamp); 1575 1576 type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); 1577 switch (type) { 1578 case KVASER_PCIEFD_PACK_TYPE_DATA: 1579 ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); 1580 if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { 1581 u8 data_len; 1582 1583 data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, 1584 p->header[1])); 1585 pos += DIV_ROUND_UP(data_len, 4); 1586 } 1587 break; 1588 1589 case KVASER_PCIEFD_PACK_TYPE_ACK: 1590 ret = kvaser_pciefd_handle_ack_packet(pcie, p); 1591 break; 1592 1593 case KVASER_PCIEFD_PACK_TYPE_STATUS: 1594 ret = kvaser_pciefd_handle_status_packet(pcie, p); 1595 break; 1596 1597 case KVASER_PCIEFD_PACK_TYPE_ERROR: 1598 ret = kvaser_pciefd_handle_error_packet(pcie, p); 1599 break; 1600 1601 case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: 1602 ret = kvaser_pciefd_handle_eflush_packet(pcie, p); 1603 break; 1604 1605 case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: 1606 case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: 1607 case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: 1608 case KVASER_PCIEFD_PACK_TYPE_TXRQ: 1609 dev_info(&pcie->pci->dev, 1610 "Received unexpected packet type 0x%08X\n", type); 1611 break; 1612 1613 default: 1614 dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); 1615 ret = -EIO; 1616 break; 1617 } 1618 1619 if (ret) 1620 return ret; 1621 1622 /* Position does not point to the end of the package, 1623 * corrupted packet size? 1624 */ 1625 if (unlikely((*start_pos + size) != pos)) 1626 return -EIO; 1627 1628 /* Point to the next packet header, if any */ 1629 *start_pos = pos; 1630 1631 return ret; 1632 } 1633 1634 static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) 1635 { 1636 int pos = 0; 1637 int res = 0; 1638 1639 do { 1640 res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); 1641 } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); 1642 1643 return res; 1644 } 1645 1646 static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) 1647 { 1648 u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1649 1650 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) 1651 kvaser_pciefd_read_buffer(pcie, 0); 1652 1653 if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) 1654 kvaser_pciefd_read_buffer(pcie, 1); 1655 1656 if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || 1657 irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || 1658 irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || 1659 irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) 1660 dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); 1661 1662 iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1663 return irq; 1664 } 1665 1666 static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) 1667 { 1668 u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1669 1670 if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) 1671 netdev_err(can->can.dev, "Tx FIFO overflow\n"); 1672 1673 if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) 1674 netdev_err(can->can.dev, 1675 "Fail to change bittiming, when not in reset mode\n"); 1676 1677 if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) 1678 netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); 1679 1680 if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) 1681 netdev_err(can->can.dev, "Rx FIFO overflow\n"); 1682 1683 iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); 1684 } 1685 1686 static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) 1687 { 1688 struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; 1689 const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; 1690 u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); 1691 u32 srb_irq = 0; 1692 u32 srb_release = 0; 1693 int i; 1694 1695 if (!(pci_irq & irq_mask->all)) 1696 return IRQ_NONE; 1697 1698 if (pci_irq & irq_mask->kcan_rx0) 1699 srb_irq = kvaser_pciefd_receive_irq(pcie); 1700 1701 for (i = 0; i < pcie->nr_channels; i++) { 1702 if (pci_irq & irq_mask->kcan_tx[i]) 1703 kvaser_pciefd_transmit_irq(pcie->can[i]); 1704 } 1705 1706 if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) 1707 srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0; 1708 1709 if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) 1710 srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1; 1711 1712 if (srb_release) 1713 iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1714 1715 return IRQ_HANDLED; 1716 } 1717 1718 static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) 1719 { 1720 int i; 1721 1722 for (i = 0; i < pcie->nr_channels; i++) { 1723 struct kvaser_pciefd_can *can = pcie->can[i]; 1724 1725 if (can) { 1726 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1727 kvaser_pciefd_pwm_stop(can); 1728 free_candev(can->can.dev); 1729 } 1730 } 1731 } 1732 1733 static int kvaser_pciefd_probe(struct pci_dev *pdev, 1734 const struct pci_device_id *id) 1735 { 1736 int ret; 1737 struct kvaser_pciefd *pcie; 1738 const struct kvaser_pciefd_irq_mask *irq_mask; 1739 void __iomem *irq_en_base; 1740 1741 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); 1742 if (!pcie) 1743 return -ENOMEM; 1744 1745 pci_set_drvdata(pdev, pcie); 1746 pcie->pci = pdev; 1747 pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; 1748 irq_mask = pcie->driver_data->irq_mask; 1749 1750 ret = pci_enable_device(pdev); 1751 if (ret) 1752 return ret; 1753 1754 ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); 1755 if (ret) 1756 goto err_disable_pci; 1757 1758 pcie->reg_base = pci_iomap(pdev, 0, 0); 1759 if (!pcie->reg_base) { 1760 ret = -ENOMEM; 1761 goto err_release_regions; 1762 } 1763 1764 ret = kvaser_pciefd_setup_board(pcie); 1765 if (ret) 1766 goto err_pci_iounmap; 1767 1768 ret = kvaser_pciefd_setup_dma(pcie); 1769 if (ret) 1770 goto err_pci_iounmap; 1771 1772 pci_set_master(pdev); 1773 1774 ret = kvaser_pciefd_setup_can_ctrls(pcie); 1775 if (ret) 1776 goto err_teardown_can_ctrls; 1777 1778 ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI); 1779 if (ret < 0) { 1780 dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n"); 1781 goto err_teardown_can_ctrls; 1782 } 1783 1784 ret = pci_irq_vector(pcie->pci, 0); 1785 if (ret < 0) 1786 goto err_pci_free_irq_vectors; 1787 1788 pcie->pci->irq = ret; 1789 1790 ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, 1791 IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); 1792 if (ret) { 1793 dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq); 1794 goto err_pci_free_irq_vectors; 1795 } 1796 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, 1797 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); 1798 1799 iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | 1800 KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | 1801 KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, 1802 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); 1803 1804 /* Enable PCI interrupts */ 1805 irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); 1806 iowrite32(irq_mask->all, irq_en_base); 1807 /* Ready the DMA buffers */ 1808 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, 1809 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1810 iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, 1811 KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); 1812 1813 ret = kvaser_pciefd_reg_candev(pcie); 1814 if (ret) 1815 goto err_free_irq; 1816 1817 return 0; 1818 1819 err_free_irq: 1820 /* Disable PCI interrupts */ 1821 iowrite32(0, irq_en_base); 1822 free_irq(pcie->pci->irq, pcie); 1823 1824 err_pci_free_irq_vectors: 1825 pci_free_irq_vectors(pcie->pci); 1826 1827 err_teardown_can_ctrls: 1828 kvaser_pciefd_teardown_can_ctrls(pcie); 1829 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1830 pci_clear_master(pdev); 1831 1832 err_pci_iounmap: 1833 pci_iounmap(pdev, pcie->reg_base); 1834 1835 err_release_regions: 1836 pci_release_regions(pdev); 1837 1838 err_disable_pci: 1839 pci_disable_device(pdev); 1840 1841 return ret; 1842 } 1843 1844 static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) 1845 { 1846 int i; 1847 1848 for (i = 0; i < pcie->nr_channels; i++) { 1849 struct kvaser_pciefd_can *can = pcie->can[i]; 1850 1851 if (can) { 1852 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1853 unregister_candev(can->can.dev); 1854 del_timer(&can->bec_poll_timer); 1855 kvaser_pciefd_pwm_stop(can); 1856 free_candev(can->can.dev); 1857 } 1858 } 1859 } 1860 1861 static void kvaser_pciefd_remove(struct pci_dev *pdev) 1862 { 1863 struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); 1864 1865 kvaser_pciefd_remove_all_ctrls(pcie); 1866 1867 /* Disable interrupts */ 1868 iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); 1869 iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); 1870 1871 free_irq(pcie->pci->irq, pcie); 1872 pci_free_irq_vectors(pcie->pci); 1873 pci_iounmap(pdev, pcie->reg_base); 1874 pci_release_regions(pdev); 1875 pci_disable_device(pdev); 1876 } 1877 1878 static struct pci_driver kvaser_pciefd = { 1879 .name = KVASER_PCIEFD_DRV_NAME, 1880 .id_table = kvaser_pciefd_id_table, 1881 .probe = kvaser_pciefd_probe, 1882 .remove = kvaser_pciefd_remove, 1883 }; 1884 1885 module_pci_driver(kvaser_pciefd) 1886