1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Xilinx CAN device driver 3 * 4 * Copyright (C) 2012 - 2022 Xilinx, Inc. 5 * Copyright (C) 2009 PetaLogix. All rights reserved. 6 * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy 7 * 8 * Description: 9 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/netdevice.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/platform_device.h> 24 #include <linux/skbuff.h> 25 #include <linux/spinlock.h> 26 #include <linux/string.h> 27 #include <linux/types.h> 28 #include <linux/can/dev.h> 29 #include <linux/can/error.h> 30 #include <linux/pm_runtime.h> 31 32 #define DRIVER_NAME "xilinx_can" 33 34 /* CAN registers set */ 35 enum xcan_reg { 36 XCAN_SRR_OFFSET = 0x00, /* Software reset */ 37 XCAN_MSR_OFFSET = 0x04, /* Mode select */ 38 XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ 39 XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ 40 XCAN_ECR_OFFSET = 0x10, /* Error counter */ 41 XCAN_ESR_OFFSET = 0x14, /* Error status */ 42 XCAN_SR_OFFSET = 0x18, /* Status */ 43 XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ 44 XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ 45 XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ 46 47 /* not on CAN FD cores */ 48 XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ 49 XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ 50 XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ 51 52 /* only on CAN FD cores */ 53 XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate 54 * Prescaler 55 */ 56 XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ 57 XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ 58 XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ 59 XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ 60 XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ 61 XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ 62 XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ 63 XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ 64 XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ 65 }; 66 67 #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) 68 #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) 69 #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) 70 #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) 71 #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) 72 73 #define XCAN_CANFD_FRAME_SIZE 0x48 74 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ 75 XCAN_CANFD_FRAME_SIZE * (n)) 76 #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ 77 XCAN_CANFD_FRAME_SIZE * (n)) 78 #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ 79 XCAN_CANFD_FRAME_SIZE * (n)) 80 81 /* the single TX mailbox used by this driver on CAN FD HW */ 82 #define XCAN_TX_MAILBOX_IDX 0 83 84 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ 85 #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ 86 #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ 87 #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ 88 #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ 89 #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ 90 #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */ 91 #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */ 92 #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ 93 #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ 94 #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ 95 #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ 96 #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ 97 #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ 98 #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ 99 #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ 100 #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ 101 #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ 102 #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ 103 #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ 104 #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ 105 #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */ 106 #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ 107 #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ 108 #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ 109 #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ 110 #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ 111 #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ 112 #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ 113 #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ 114 #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ 115 #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ 116 #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ 117 #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ 118 #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ 119 #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ 120 #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ 121 #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ 122 #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ 123 #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ 124 #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ 125 #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ 126 #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ 127 #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ 128 #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ 129 #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ 130 #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ 131 #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ 132 #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ 133 #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ 134 #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ 135 #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ 136 #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ 137 138 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 139 #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */ 140 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 141 #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ 142 #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ 143 #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ 144 #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ 145 #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ 146 #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ 147 #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ 148 149 /* CAN frame length constants */ 150 #define XCAN_FRAME_MAX_DATA_LEN 8 151 #define XCANFD_DW_BYTES 4 152 #define XCAN_TIMEOUT (1 * HZ) 153 154 /* TX-FIFO-empty interrupt available */ 155 #define XCAN_FLAG_TXFEMP 0x0001 156 /* RX Match Not Finished interrupt available */ 157 #define XCAN_FLAG_RXMNF 0x0002 158 /* Extended acceptance filters with control at 0xE0 */ 159 #define XCAN_FLAG_EXT_FILTERS 0x0004 160 /* TX mailboxes instead of TX FIFO */ 161 #define XCAN_FLAG_TX_MAILBOXES 0x0008 162 /* RX FIFO with each buffer in separate registers at 0x1100 163 * instead of the regular FIFO at 0x50 164 */ 165 #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 166 #define XCAN_FLAG_CANFD_2 0x0020 167 168 enum xcan_ip_type { 169 XAXI_CAN = 0, 170 XZYNQ_CANPS, 171 XAXI_CANFD, 172 XAXI_CANFD_2_0, 173 }; 174 175 struct xcan_devtype_data { 176 enum xcan_ip_type cantype; 177 unsigned int flags; 178 const struct can_bittiming_const *bittiming_const; 179 const char *bus_clk_name; 180 unsigned int btr_ts2_shift; 181 unsigned int btr_sjw_shift; 182 }; 183 184 /** 185 * struct xcan_priv - This definition define CAN driver instance 186 * @can: CAN private data structure. 187 * @tx_lock: Lock for synchronizing TX interrupt handling 188 * @tx_head: Tx CAN packets ready to send on the queue 189 * @tx_tail: Tx CAN packets successfully sended on the queue 190 * @tx_max: Maximum number packets the driver can send 191 * @napi: NAPI structure 192 * @read_reg: For reading data from CAN registers 193 * @write_reg: For writing data to CAN registers 194 * @dev: Network device data structure 195 * @reg_base: Ioremapped address to registers 196 * @irq_flags: For request_irq() 197 * @bus_clk: Pointer to struct clk 198 * @can_clk: Pointer to struct clk 199 * @devtype: Device type specific constants 200 */ 201 struct xcan_priv { 202 struct can_priv can; 203 spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ 204 unsigned int tx_head; 205 unsigned int tx_tail; 206 unsigned int tx_max; 207 struct napi_struct napi; 208 u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); 209 void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, 210 u32 val); 211 struct device *dev; 212 void __iomem *reg_base; 213 unsigned long irq_flags; 214 struct clk *bus_clk; 215 struct clk *can_clk; 216 struct xcan_devtype_data devtype; 217 }; 218 219 /* CAN Bittiming constants as per Xilinx CAN specs */ 220 static const struct can_bittiming_const xcan_bittiming_const = { 221 .name = DRIVER_NAME, 222 .tseg1_min = 1, 223 .tseg1_max = 16, 224 .tseg2_min = 1, 225 .tseg2_max = 8, 226 .sjw_max = 4, 227 .brp_min = 1, 228 .brp_max = 256, 229 .brp_inc = 1, 230 }; 231 232 /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ 233 static const struct can_bittiming_const xcan_bittiming_const_canfd = { 234 .name = DRIVER_NAME, 235 .tseg1_min = 1, 236 .tseg1_max = 64, 237 .tseg2_min = 1, 238 .tseg2_max = 16, 239 .sjw_max = 16, 240 .brp_min = 1, 241 .brp_max = 256, 242 .brp_inc = 1, 243 }; 244 245 /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ 246 static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { 247 .name = DRIVER_NAME, 248 .tseg1_min = 1, 249 .tseg1_max = 16, 250 .tseg2_min = 1, 251 .tseg2_max = 8, 252 .sjw_max = 8, 253 .brp_min = 1, 254 .brp_max = 256, 255 .brp_inc = 1, 256 }; 257 258 /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ 259 static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { 260 .name = DRIVER_NAME, 261 .tseg1_min = 1, 262 .tseg1_max = 256, 263 .tseg2_min = 1, 264 .tseg2_max = 128, 265 .sjw_max = 128, 266 .brp_min = 1, 267 .brp_max = 256, 268 .brp_inc = 1, 269 }; 270 271 /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ 272 static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { 273 .name = DRIVER_NAME, 274 .tseg1_min = 1, 275 .tseg1_max = 32, 276 .tseg2_min = 1, 277 .tseg2_max = 16, 278 .sjw_max = 16, 279 .brp_min = 1, 280 .brp_max = 256, 281 .brp_inc = 1, 282 }; 283 284 /* Transmission Delay Compensation constants for CANFD 1.0 */ 285 static const struct can_tdc_const xcan_tdc_const_canfd = { 286 .tdcv_min = 0, 287 .tdcv_max = 0, /* Manual mode not supported. */ 288 .tdco_min = 0, 289 .tdco_max = 32, 290 .tdcf_min = 0, /* Filter window not supported */ 291 .tdcf_max = 0, 292 }; 293 294 /* Transmission Delay Compensation constants for CANFD 2.0 */ 295 static const struct can_tdc_const xcan_tdc_const_canfd2 = { 296 .tdcv_min = 0, 297 .tdcv_max = 0, /* Manual mode not supported. */ 298 .tdco_min = 0, 299 .tdco_max = 64, 300 .tdcf_min = 0, /* Filter window not supported */ 301 .tdcf_max = 0, 302 }; 303 304 /** 305 * xcan_write_reg_le - Write a value to the device register little endian 306 * @priv: Driver private data structure 307 * @reg: Register offset 308 * @val: Value to write at the Register offset 309 * 310 * Write data to the paricular CAN register 311 */ 312 static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, 313 u32 val) 314 { 315 iowrite32(val, priv->reg_base + reg); 316 } 317 318 /** 319 * xcan_read_reg_le - Read a value from the device register little endian 320 * @priv: Driver private data structure 321 * @reg: Register offset 322 * 323 * Read data from the particular CAN register 324 * Return: value read from the CAN register 325 */ 326 static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) 327 { 328 return ioread32(priv->reg_base + reg); 329 } 330 331 /** 332 * xcan_write_reg_be - Write a value to the device register big endian 333 * @priv: Driver private data structure 334 * @reg: Register offset 335 * @val: Value to write at the Register offset 336 * 337 * Write data to the paricular CAN register 338 */ 339 static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, 340 u32 val) 341 { 342 iowrite32be(val, priv->reg_base + reg); 343 } 344 345 /** 346 * xcan_read_reg_be - Read a value from the device register big endian 347 * @priv: Driver private data structure 348 * @reg: Register offset 349 * 350 * Read data from the particular CAN register 351 * Return: value read from the CAN register 352 */ 353 static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) 354 { 355 return ioread32be(priv->reg_base + reg); 356 } 357 358 /** 359 * xcan_rx_int_mask - Get the mask for the receive interrupt 360 * @priv: Driver private data structure 361 * 362 * Return: The receive interrupt mask used by the driver on this HW 363 */ 364 static u32 xcan_rx_int_mask(const struct xcan_priv *priv) 365 { 366 /* RXNEMP is better suited for our use case as it cannot be cleared 367 * while the FIFO is non-empty, but CAN FD HW does not have it 368 */ 369 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 370 return XCAN_IXR_RXOK_MASK; 371 else 372 return XCAN_IXR_RXNEMP_MASK; 373 } 374 375 /** 376 * set_reset_mode - Resets the CAN device mode 377 * @ndev: Pointer to net_device structure 378 * 379 * This is the driver reset mode routine.The driver 380 * enters into configuration mode. 381 * 382 * Return: 0 on success and failure value on error 383 */ 384 static int set_reset_mode(struct net_device *ndev) 385 { 386 struct xcan_priv *priv = netdev_priv(ndev); 387 unsigned long timeout; 388 389 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 390 391 timeout = jiffies + XCAN_TIMEOUT; 392 while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { 393 if (time_after(jiffies, timeout)) { 394 netdev_warn(ndev, "timed out for config mode\n"); 395 return -ETIMEDOUT; 396 } 397 usleep_range(500, 10000); 398 } 399 400 /* reset clears FIFOs */ 401 priv->tx_head = 0; 402 priv->tx_tail = 0; 403 404 return 0; 405 } 406 407 /** 408 * xcan_set_bittiming - CAN set bit timing routine 409 * @ndev: Pointer to net_device structure 410 * 411 * This is the driver set bittiming routine. 412 * Return: 0 on success and failure value on error 413 */ 414 static int xcan_set_bittiming(struct net_device *ndev) 415 { 416 struct xcan_priv *priv = netdev_priv(ndev); 417 struct can_bittiming *bt = &priv->can.bittiming; 418 struct can_bittiming *dbt = &priv->can.data_bittiming; 419 u32 btr0, btr1; 420 u32 is_config_mode; 421 422 /* Check whether Xilinx CAN is in configuration mode. 423 * It cannot set bit timing if Xilinx CAN is not in configuration mode. 424 */ 425 is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & 426 XCAN_SR_CONFIG_MASK; 427 if (!is_config_mode) { 428 netdev_alert(ndev, 429 "BUG! Cannot set bittiming - CAN is not in config mode\n"); 430 return -EPERM; 431 } 432 433 /* Setting Baud Rate prescaler value in BRPR Register */ 434 btr0 = (bt->brp - 1); 435 436 /* Setting Time Segment 1 in BTR Register */ 437 btr1 = (bt->prop_seg + bt->phase_seg1 - 1); 438 439 /* Setting Time Segment 2 in BTR Register */ 440 btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 441 442 /* Setting Synchronous jump width in BTR Register */ 443 btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; 444 445 priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); 446 priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); 447 448 if (priv->devtype.cantype == XAXI_CANFD || 449 priv->devtype.cantype == XAXI_CANFD_2_0) { 450 /* Setting Baud Rate prescaler value in F_BRPR Register */ 451 btr0 = dbt->brp - 1; 452 if (can_tdc_is_enabled(&priv->can)) { 453 if (priv->devtype.cantype == XAXI_CANFD) 454 btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) | 455 XCAN_BRPR_TDC_ENABLE; 456 else 457 btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) | 458 XCAN_BRPR_TDC_ENABLE; 459 } 460 461 /* Setting Time Segment 1 in BTR Register */ 462 btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; 463 464 /* Setting Time Segment 2 in BTR Register */ 465 btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; 466 467 /* Setting Synchronous jump width in BTR Register */ 468 btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; 469 470 priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); 471 priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); 472 } 473 474 netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", 475 priv->read_reg(priv, XCAN_BRPR_OFFSET), 476 priv->read_reg(priv, XCAN_BTR_OFFSET)); 477 478 return 0; 479 } 480 481 /** 482 * xcan_chip_start - This the drivers start routine 483 * @ndev: Pointer to net_device structure 484 * 485 * This is the drivers start routine. 486 * Based on the State of the CAN device it puts 487 * the CAN device into a proper mode. 488 * 489 * Return: 0 on success and failure value on error 490 */ 491 static int xcan_chip_start(struct net_device *ndev) 492 { 493 struct xcan_priv *priv = netdev_priv(ndev); 494 u32 reg_msr; 495 int err; 496 u32 ier; 497 498 /* Check if it is in reset mode */ 499 err = set_reset_mode(ndev); 500 if (err < 0) 501 return err; 502 503 err = xcan_set_bittiming(ndev); 504 if (err < 0) 505 return err; 506 507 /* Enable interrupts 508 * 509 * We enable the ERROR interrupt even with 510 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no 511 * dedicated interrupt for a state change to 512 * ERROR_WARNING/ERROR_PASSIVE. 513 */ 514 ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | 515 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | 516 XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 517 XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); 518 519 if (priv->devtype.flags & XCAN_FLAG_RXMNF) 520 ier |= XCAN_IXR_RXMNF_MASK; 521 522 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 523 524 /* Check whether it is loopback mode or normal mode */ 525 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 526 reg_msr = XCAN_MSR_LBACK_MASK; 527 else 528 reg_msr = 0x0; 529 530 /* enable the first extended filter, if any, as cores with extended 531 * filtering default to non-receipt if all filters are disabled 532 */ 533 if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) 534 priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); 535 536 priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); 537 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); 538 539 netdev_dbg(ndev, "status:#x%08x\n", 540 priv->read_reg(priv, XCAN_SR_OFFSET)); 541 542 priv->can.state = CAN_STATE_ERROR_ACTIVE; 543 return 0; 544 } 545 546 /** 547 * xcan_do_set_mode - This sets the mode of the driver 548 * @ndev: Pointer to net_device structure 549 * @mode: Tells the mode of the driver 550 * 551 * This check the drivers state and calls the corresponding modes to set. 552 * 553 * Return: 0 on success and failure value on error 554 */ 555 static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) 556 { 557 int ret; 558 559 switch (mode) { 560 case CAN_MODE_START: 561 ret = xcan_chip_start(ndev); 562 if (ret < 0) { 563 netdev_err(ndev, "xcan_chip_start failed!\n"); 564 return ret; 565 } 566 netif_wake_queue(ndev); 567 break; 568 default: 569 ret = -EOPNOTSUPP; 570 break; 571 } 572 573 return ret; 574 } 575 576 /** 577 * xcan_write_frame - Write a frame to HW 578 * @ndev: Pointer to net_device structure 579 * @skb: sk_buff pointer that contains data to be Txed 580 * @frame_offset: Register offset to write the frame to 581 */ 582 static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, 583 int frame_offset) 584 { 585 u32 id, dlc, data[2] = {0, 0}; 586 struct canfd_frame *cf = (struct canfd_frame *)skb->data; 587 u32 ramoff, dwindex = 0, i; 588 struct xcan_priv *priv = netdev_priv(ndev); 589 590 /* Watch carefully on the bit sequence */ 591 if (cf->can_id & CAN_EFF_FLAG) { 592 /* Extended CAN ID format */ 593 id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & 594 XCAN_IDR_ID2_MASK; 595 id |= (((cf->can_id & CAN_EFF_MASK) >> 596 (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << 597 XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; 598 599 /* The substibute remote TX request bit should be "1" 600 * for extended frames as in the Xilinx CAN datasheet 601 */ 602 id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; 603 604 if (cf->can_id & CAN_RTR_FLAG) 605 /* Extended frames remote TX request */ 606 id |= XCAN_IDR_RTR_MASK; 607 } else { 608 /* Standard CAN ID format */ 609 id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & 610 XCAN_IDR_ID1_MASK; 611 612 if (cf->can_id & CAN_RTR_FLAG) 613 /* Standard frames remote TX request */ 614 id |= XCAN_IDR_SRR_MASK; 615 } 616 617 dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; 618 if (can_is_canfd_skb(skb)) { 619 if (cf->flags & CANFD_BRS) 620 dlc |= XCAN_DLCR_BRS_MASK; 621 dlc |= XCAN_DLCR_EDL_MASK; 622 } 623 624 if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && 625 (priv->devtype.flags & XCAN_FLAG_TXFEMP)) 626 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); 627 else 628 can_put_echo_skb(skb, ndev, 0, 0); 629 630 priv->tx_head++; 631 632 priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); 633 /* If the CAN frame is RTR frame this write triggers transmission 634 * (not on CAN FD) 635 */ 636 priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); 637 if (priv->devtype.cantype == XAXI_CANFD || 638 priv->devtype.cantype == XAXI_CANFD_2_0) { 639 for (i = 0; i < cf->len; i += 4) { 640 ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + 641 (dwindex * XCANFD_DW_BYTES); 642 priv->write_reg(priv, ramoff, 643 be32_to_cpup((__be32 *)(cf->data + i))); 644 dwindex++; 645 } 646 } else { 647 if (cf->len > 0) 648 data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); 649 if (cf->len > 4) 650 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 651 652 if (!(cf->can_id & CAN_RTR_FLAG)) { 653 priv->write_reg(priv, 654 XCAN_FRAME_DW1_OFFSET(frame_offset), 655 data[0]); 656 /* If the CAN frame is Standard/Extended frame this 657 * write triggers transmission (not on CAN FD) 658 */ 659 priv->write_reg(priv, 660 XCAN_FRAME_DW2_OFFSET(frame_offset), 661 data[1]); 662 } 663 } 664 } 665 666 /** 667 * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) 668 * @skb: sk_buff pointer that contains data to be Txed 669 * @ndev: Pointer to net_device structure 670 * 671 * Return: 0 on success, -ENOSPC if FIFO is full. 672 */ 673 static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) 674 { 675 struct xcan_priv *priv = netdev_priv(ndev); 676 unsigned long flags; 677 678 /* Check if the TX buffer is full */ 679 if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & 680 XCAN_SR_TXFLL_MASK)) 681 return -ENOSPC; 682 683 spin_lock_irqsave(&priv->tx_lock, flags); 684 685 xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); 686 687 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ 688 if (priv->tx_max > 1) 689 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); 690 691 /* Check if the TX buffer is full */ 692 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 693 netif_stop_queue(ndev); 694 695 spin_unlock_irqrestore(&priv->tx_lock, flags); 696 697 return 0; 698 } 699 700 /** 701 * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) 702 * @skb: sk_buff pointer that contains data to be Txed 703 * @ndev: Pointer to net_device structure 704 * 705 * Return: 0 on success, -ENOSPC if there is no space 706 */ 707 static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) 708 { 709 struct xcan_priv *priv = netdev_priv(ndev); 710 unsigned long flags; 711 712 if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & 713 BIT(XCAN_TX_MAILBOX_IDX))) 714 return -ENOSPC; 715 716 spin_lock_irqsave(&priv->tx_lock, flags); 717 718 xcan_write_frame(ndev, skb, 719 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); 720 721 /* Mark buffer as ready for transmit */ 722 priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); 723 724 netif_stop_queue(ndev); 725 726 spin_unlock_irqrestore(&priv->tx_lock, flags); 727 728 return 0; 729 } 730 731 /** 732 * xcan_start_xmit - Starts the transmission 733 * @skb: sk_buff pointer that contains data to be Txed 734 * @ndev: Pointer to net_device structure 735 * 736 * This function is invoked from upper layers to initiate transmission. 737 * 738 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full 739 */ 740 static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) 741 { 742 struct xcan_priv *priv = netdev_priv(ndev); 743 int ret; 744 745 if (can_dropped_invalid_skb(ndev, skb)) 746 return NETDEV_TX_OK; 747 748 if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) 749 ret = xcan_start_xmit_mailbox(skb, ndev); 750 else 751 ret = xcan_start_xmit_fifo(skb, ndev); 752 753 if (ret < 0) { 754 netdev_err(ndev, "BUG!, TX full when queue awake!\n"); 755 netif_stop_queue(ndev); 756 return NETDEV_TX_BUSY; 757 } 758 759 return NETDEV_TX_OK; 760 } 761 762 /** 763 * xcan_rx - Is called from CAN isr to complete the received 764 * frame processing 765 * @ndev: Pointer to net_device structure 766 * @frame_base: Register offset to the frame to be read 767 * 768 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 769 * does minimal processing and invokes "netif_receive_skb" to complete further 770 * processing. 771 * Return: 1 on success and 0 on failure. 772 */ 773 static int xcan_rx(struct net_device *ndev, int frame_base) 774 { 775 struct xcan_priv *priv = netdev_priv(ndev); 776 struct net_device_stats *stats = &ndev->stats; 777 struct can_frame *cf; 778 struct sk_buff *skb; 779 u32 id_xcan, dlc, data[2] = {0, 0}; 780 781 skb = alloc_can_skb(ndev, &cf); 782 if (unlikely(!skb)) { 783 stats->rx_dropped++; 784 return 0; 785 } 786 787 /* Read a frame from Xilinx zynq CANPS */ 788 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 789 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> 790 XCAN_DLCR_DLC_SHIFT; 791 792 /* Change Xilinx CAN data length format to socketCAN data format */ 793 cf->len = can_cc_dlc2len(dlc); 794 795 /* Change Xilinx CAN ID format to socketCAN ID format */ 796 if (id_xcan & XCAN_IDR_IDE_MASK) { 797 /* The received frame is an Extended format frame */ 798 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 799 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 800 XCAN_IDR_ID2_SHIFT; 801 cf->can_id |= CAN_EFF_FLAG; 802 if (id_xcan & XCAN_IDR_RTR_MASK) 803 cf->can_id |= CAN_RTR_FLAG; 804 } else { 805 /* The received frame is a standard format frame */ 806 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 807 XCAN_IDR_ID1_SHIFT; 808 if (id_xcan & XCAN_IDR_SRR_MASK) 809 cf->can_id |= CAN_RTR_FLAG; 810 } 811 812 /* DW1/DW2 must always be read to remove message from RXFIFO */ 813 data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); 814 data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); 815 816 if (!(cf->can_id & CAN_RTR_FLAG)) { 817 /* Change Xilinx CAN data format to socketCAN data format */ 818 if (cf->len > 0) 819 *(__be32 *)(cf->data) = cpu_to_be32(data[0]); 820 if (cf->len > 4) 821 *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); 822 823 stats->rx_bytes += cf->len; 824 } 825 stats->rx_packets++; 826 827 netif_receive_skb(skb); 828 829 return 1; 830 } 831 832 /** 833 * xcanfd_rx - Is called from CAN isr to complete the received 834 * frame processing 835 * @ndev: Pointer to net_device structure 836 * @frame_base: Register offset to the frame to be read 837 * 838 * This function is invoked from the CAN isr(poll) to process the Rx frames. It 839 * does minimal processing and invokes "netif_receive_skb" to complete further 840 * processing. 841 * Return: 1 on success and 0 on failure. 842 */ 843 static int xcanfd_rx(struct net_device *ndev, int frame_base) 844 { 845 struct xcan_priv *priv = netdev_priv(ndev); 846 struct net_device_stats *stats = &ndev->stats; 847 struct canfd_frame *cf; 848 struct sk_buff *skb; 849 u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; 850 851 id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); 852 dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); 853 if (dlc & XCAN_DLCR_EDL_MASK) 854 skb = alloc_canfd_skb(ndev, &cf); 855 else 856 skb = alloc_can_skb(ndev, (struct can_frame **)&cf); 857 858 if (unlikely(!skb)) { 859 stats->rx_dropped++; 860 return 0; 861 } 862 863 /* Change Xilinx CANFD data length format to socketCAN data 864 * format 865 */ 866 if (dlc & XCAN_DLCR_EDL_MASK) 867 cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 868 XCAN_DLCR_DLC_SHIFT); 869 else 870 cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> 871 XCAN_DLCR_DLC_SHIFT); 872 873 /* Change Xilinx CAN ID format to socketCAN ID format */ 874 if (id_xcan & XCAN_IDR_IDE_MASK) { 875 /* The received frame is an Extended format frame */ 876 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; 877 cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> 878 XCAN_IDR_ID2_SHIFT; 879 cf->can_id |= CAN_EFF_FLAG; 880 if (id_xcan & XCAN_IDR_RTR_MASK) 881 cf->can_id |= CAN_RTR_FLAG; 882 } else { 883 /* The received frame is a standard format frame */ 884 cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 885 XCAN_IDR_ID1_SHIFT; 886 if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & 887 XCAN_IDR_SRR_MASK)) 888 cf->can_id |= CAN_RTR_FLAG; 889 } 890 891 /* Check the frame received is FD or not*/ 892 if (dlc & XCAN_DLCR_EDL_MASK) { 893 for (i = 0; i < cf->len; i += 4) { 894 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + 895 (dwindex * XCANFD_DW_BYTES); 896 data[0] = priv->read_reg(priv, dw_offset); 897 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 898 dwindex++; 899 } 900 } else { 901 for (i = 0; i < cf->len; i += 4) { 902 dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); 903 data[0] = priv->read_reg(priv, dw_offset + i); 904 *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); 905 } 906 } 907 908 if (!(cf->can_id & CAN_RTR_FLAG)) 909 stats->rx_bytes += cf->len; 910 stats->rx_packets++; 911 912 netif_receive_skb(skb); 913 914 return 1; 915 } 916 917 /** 918 * xcan_current_error_state - Get current error state from HW 919 * @ndev: Pointer to net_device structure 920 * 921 * Checks the current CAN error state from the HW. Note that this 922 * only checks for ERROR_PASSIVE and ERROR_WARNING. 923 * 924 * Return: 925 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE 926 * otherwise. 927 */ 928 static enum can_state xcan_current_error_state(struct net_device *ndev) 929 { 930 struct xcan_priv *priv = netdev_priv(ndev); 931 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); 932 933 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) 934 return CAN_STATE_ERROR_PASSIVE; 935 else if (status & XCAN_SR_ERRWRN_MASK) 936 return CAN_STATE_ERROR_WARNING; 937 else 938 return CAN_STATE_ERROR_ACTIVE; 939 } 940 941 /** 942 * xcan_set_error_state - Set new CAN error state 943 * @ndev: Pointer to net_device structure 944 * @new_state: The new CAN state to be set 945 * @cf: Error frame to be populated or NULL 946 * 947 * Set new CAN error state for the device, updating statistics and 948 * populating the error frame if given. 949 */ 950 static void xcan_set_error_state(struct net_device *ndev, 951 enum can_state new_state, 952 struct can_frame *cf) 953 { 954 struct xcan_priv *priv = netdev_priv(ndev); 955 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); 956 u32 txerr = ecr & XCAN_ECR_TEC_MASK; 957 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; 958 enum can_state tx_state = txerr >= rxerr ? new_state : 0; 959 enum can_state rx_state = txerr <= rxerr ? new_state : 0; 960 961 /* non-ERROR states are handled elsewhere */ 962 if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) 963 return; 964 965 can_change_state(ndev, cf, tx_state, rx_state); 966 967 if (cf) { 968 cf->can_id |= CAN_ERR_CNT; 969 cf->data[6] = txerr; 970 cf->data[7] = rxerr; 971 } 972 } 973 974 /** 975 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX 976 * @ndev: Pointer to net_device structure 977 * 978 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if 979 * the performed RX/TX has caused it to drop to a lesser state and set 980 * the interface state accordingly. 981 */ 982 static void xcan_update_error_state_after_rxtx(struct net_device *ndev) 983 { 984 struct xcan_priv *priv = netdev_priv(ndev); 985 enum can_state old_state = priv->can.state; 986 enum can_state new_state; 987 988 /* changing error state due to successful frame RX/TX can only 989 * occur from these states 990 */ 991 if (old_state != CAN_STATE_ERROR_WARNING && 992 old_state != CAN_STATE_ERROR_PASSIVE) 993 return; 994 995 new_state = xcan_current_error_state(ndev); 996 997 if (new_state != old_state) { 998 struct sk_buff *skb; 999 struct can_frame *cf; 1000 1001 skb = alloc_can_err_skb(ndev, &cf); 1002 1003 xcan_set_error_state(ndev, new_state, skb ? cf : NULL); 1004 1005 if (skb) 1006 netif_rx(skb); 1007 } 1008 } 1009 1010 /** 1011 * xcan_err_interrupt - error frame Isr 1012 * @ndev: net_device pointer 1013 * @isr: interrupt status register value 1014 * 1015 * This is the CAN error interrupt and it will 1016 * check the type of error and forward the error 1017 * frame to upper layers. 1018 */ 1019 static void xcan_err_interrupt(struct net_device *ndev, u32 isr) 1020 { 1021 struct xcan_priv *priv = netdev_priv(ndev); 1022 struct net_device_stats *stats = &ndev->stats; 1023 struct can_frame cf = { }; 1024 u32 err_status; 1025 1026 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 1027 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 1028 1029 if (isr & XCAN_IXR_BSOFF_MASK) { 1030 priv->can.state = CAN_STATE_BUS_OFF; 1031 priv->can.can_stats.bus_off++; 1032 /* Leave device in Config Mode in bus-off state */ 1033 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); 1034 can_bus_off(ndev); 1035 cf.can_id |= CAN_ERR_BUSOFF; 1036 } else { 1037 enum can_state new_state = xcan_current_error_state(ndev); 1038 1039 if (new_state != priv->can.state) 1040 xcan_set_error_state(ndev, new_state, &cf); 1041 } 1042 1043 /* Check for Arbitration lost interrupt */ 1044 if (isr & XCAN_IXR_ARBLST_MASK) { 1045 priv->can.can_stats.arbitration_lost++; 1046 cf.can_id |= CAN_ERR_LOSTARB; 1047 cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; 1048 } 1049 1050 /* Check for RX FIFO Overflow interrupt */ 1051 if (isr & XCAN_IXR_RXOFLW_MASK) { 1052 stats->rx_over_errors++; 1053 stats->rx_errors++; 1054 cf.can_id |= CAN_ERR_CRTL; 1055 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 1056 } 1057 1058 /* Check for RX Match Not Finished interrupt */ 1059 if (isr & XCAN_IXR_RXMNF_MASK) { 1060 stats->rx_dropped++; 1061 stats->rx_errors++; 1062 netdev_err(ndev, "RX match not finished, frame discarded\n"); 1063 cf.can_id |= CAN_ERR_CRTL; 1064 cf.data[1] |= CAN_ERR_CRTL_UNSPEC; 1065 } 1066 1067 /* Check for error interrupt */ 1068 if (isr & XCAN_IXR_ERROR_MASK) { 1069 bool berr_reporting = false; 1070 1071 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { 1072 berr_reporting = true; 1073 cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 1074 } 1075 1076 /* Check for Ack error interrupt */ 1077 if (err_status & XCAN_ESR_ACKER_MASK) { 1078 stats->tx_errors++; 1079 if (berr_reporting) { 1080 cf.can_id |= CAN_ERR_ACK; 1081 cf.data[3] = CAN_ERR_PROT_LOC_ACK; 1082 } 1083 } 1084 1085 /* Check for Bit error interrupt */ 1086 if (err_status & XCAN_ESR_BERR_MASK) { 1087 stats->tx_errors++; 1088 if (berr_reporting) { 1089 cf.can_id |= CAN_ERR_PROT; 1090 cf.data[2] = CAN_ERR_PROT_BIT; 1091 } 1092 } 1093 1094 /* Check for Stuff error interrupt */ 1095 if (err_status & XCAN_ESR_STER_MASK) { 1096 stats->rx_errors++; 1097 if (berr_reporting) { 1098 cf.can_id |= CAN_ERR_PROT; 1099 cf.data[2] = CAN_ERR_PROT_STUFF; 1100 } 1101 } 1102 1103 /* Check for Form error interrupt */ 1104 if (err_status & XCAN_ESR_FMER_MASK) { 1105 stats->rx_errors++; 1106 if (berr_reporting) { 1107 cf.can_id |= CAN_ERR_PROT; 1108 cf.data[2] = CAN_ERR_PROT_FORM; 1109 } 1110 } 1111 1112 /* Check for CRC error interrupt */ 1113 if (err_status & XCAN_ESR_CRCER_MASK) { 1114 stats->rx_errors++; 1115 if (berr_reporting) { 1116 cf.can_id |= CAN_ERR_PROT; 1117 cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 1118 } 1119 } 1120 priv->can.can_stats.bus_error++; 1121 } 1122 1123 if (cf.can_id) { 1124 struct can_frame *skb_cf; 1125 struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); 1126 1127 if (skb) { 1128 skb_cf->can_id |= cf.can_id; 1129 memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); 1130 netif_rx(skb); 1131 } 1132 } 1133 1134 netdev_dbg(ndev, "%s: error status register:0x%x\n", 1135 __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); 1136 } 1137 1138 /** 1139 * xcan_state_interrupt - It will check the state of the CAN device 1140 * @ndev: net_device pointer 1141 * @isr: interrupt status register value 1142 * 1143 * This will checks the state of the CAN device 1144 * and puts the device into appropriate state. 1145 */ 1146 static void xcan_state_interrupt(struct net_device *ndev, u32 isr) 1147 { 1148 struct xcan_priv *priv = netdev_priv(ndev); 1149 1150 /* Check for Sleep interrupt if set put CAN device in sleep state */ 1151 if (isr & XCAN_IXR_SLP_MASK) 1152 priv->can.state = CAN_STATE_SLEEPING; 1153 1154 /* Check for Wake up interrupt if set put CAN device in Active state */ 1155 if (isr & XCAN_IXR_WKUP_MASK) 1156 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1157 } 1158 1159 /** 1160 * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame 1161 * @priv: Driver private data structure 1162 * 1163 * Return: Register offset of the next frame in RX FIFO. 1164 */ 1165 static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) 1166 { 1167 int offset; 1168 1169 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { 1170 u32 fsr, mask; 1171 1172 /* clear RXOK before the is-empty check so that any newly 1173 * received frame will reassert it without a race 1174 */ 1175 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); 1176 1177 fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); 1178 1179 /* check if RX FIFO is empty */ 1180 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1181 mask = XCAN_2_FSR_FL_MASK; 1182 else 1183 mask = XCAN_FSR_FL_MASK; 1184 1185 if (!(fsr & mask)) 1186 return -ENOENT; 1187 1188 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) 1189 offset = 1190 XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); 1191 else 1192 offset = 1193 XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); 1194 1195 } else { 1196 /* check if RX FIFO is empty */ 1197 if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & 1198 XCAN_IXR_RXNEMP_MASK)) 1199 return -ENOENT; 1200 1201 /* frames are read from a static offset */ 1202 offset = XCAN_RXFIFO_OFFSET; 1203 } 1204 1205 return offset; 1206 } 1207 1208 /** 1209 * xcan_rx_poll - Poll routine for rx packets (NAPI) 1210 * @napi: napi structure pointer 1211 * @quota: Max number of rx packets to be processed. 1212 * 1213 * This is the poll routine for rx part. 1214 * It will process the packets maximux quota value. 1215 * 1216 * Return: number of packets received 1217 */ 1218 static int xcan_rx_poll(struct napi_struct *napi, int quota) 1219 { 1220 struct net_device *ndev = napi->dev; 1221 struct xcan_priv *priv = netdev_priv(ndev); 1222 u32 ier; 1223 int work_done = 0; 1224 int frame_offset; 1225 1226 while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && 1227 (work_done < quota)) { 1228 if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) 1229 work_done += xcanfd_rx(ndev, frame_offset); 1230 else 1231 work_done += xcan_rx(ndev, frame_offset); 1232 1233 if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) 1234 /* increment read index */ 1235 priv->write_reg(priv, XCAN_FSR_OFFSET, 1236 XCAN_FSR_IRI_MASK); 1237 else 1238 /* clear rx-not-empty (will actually clear only if 1239 * empty) 1240 */ 1241 priv->write_reg(priv, XCAN_ICR_OFFSET, 1242 XCAN_IXR_RXNEMP_MASK); 1243 } 1244 1245 if (work_done) 1246 xcan_update_error_state_after_rxtx(ndev); 1247 1248 if (work_done < quota) { 1249 if (napi_complete_done(napi, work_done)) { 1250 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1251 ier |= xcan_rx_int_mask(priv); 1252 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1253 } 1254 } 1255 return work_done; 1256 } 1257 1258 /** 1259 * xcan_tx_interrupt - Tx Done Isr 1260 * @ndev: net_device pointer 1261 * @isr: Interrupt status register value 1262 */ 1263 static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) 1264 { 1265 struct xcan_priv *priv = netdev_priv(ndev); 1266 struct net_device_stats *stats = &ndev->stats; 1267 unsigned int frames_in_fifo; 1268 int frames_sent = 1; /* TXOK => at least 1 frame was sent */ 1269 unsigned long flags; 1270 int retries = 0; 1271 1272 /* Synchronize with xmit as we need to know the exact number 1273 * of frames in the FIFO to stay in sync due to the TXFEMP 1274 * handling. 1275 * This also prevents a race between netif_wake_queue() and 1276 * netif_stop_queue(). 1277 */ 1278 spin_lock_irqsave(&priv->tx_lock, flags); 1279 1280 frames_in_fifo = priv->tx_head - priv->tx_tail; 1281 1282 if (WARN_ON_ONCE(frames_in_fifo == 0)) { 1283 /* clear TXOK anyway to avoid getting back here */ 1284 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1285 spin_unlock_irqrestore(&priv->tx_lock, flags); 1286 return; 1287 } 1288 1289 /* Check if 2 frames were sent (TXOK only means that at least 1 1290 * frame was sent). 1291 */ 1292 if (frames_in_fifo > 1) { 1293 WARN_ON(frames_in_fifo > priv->tx_max); 1294 1295 /* Synchronize TXOK and isr so that after the loop: 1296 * (1) isr variable is up-to-date at least up to TXOK clear 1297 * time. This avoids us clearing a TXOK of a second frame 1298 * but not noticing that the FIFO is now empty and thus 1299 * marking only a single frame as sent. 1300 * (2) No TXOK is left. Having one could mean leaving a 1301 * stray TXOK as we might process the associated frame 1302 * via TXFEMP handling as we read TXFEMP *after* TXOK 1303 * clear to satisfy (1). 1304 */ 1305 while ((isr & XCAN_IXR_TXOK_MASK) && 1306 !WARN_ON(++retries == 100)) { 1307 priv->write_reg(priv, XCAN_ICR_OFFSET, 1308 XCAN_IXR_TXOK_MASK); 1309 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1310 } 1311 1312 if (isr & XCAN_IXR_TXFEMP_MASK) { 1313 /* nothing in FIFO anymore */ 1314 frames_sent = frames_in_fifo; 1315 } 1316 } else { 1317 /* single frame in fifo, just clear TXOK */ 1318 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 1319 } 1320 1321 while (frames_sent--) { 1322 stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % 1323 priv->tx_max, NULL); 1324 priv->tx_tail++; 1325 stats->tx_packets++; 1326 } 1327 1328 netif_wake_queue(ndev); 1329 1330 spin_unlock_irqrestore(&priv->tx_lock, flags); 1331 1332 xcan_update_error_state_after_rxtx(ndev); 1333 } 1334 1335 /** 1336 * xcan_interrupt - CAN Isr 1337 * @irq: irq number 1338 * @dev_id: device id pointer 1339 * 1340 * This is the xilinx CAN Isr. It checks for the type of interrupt 1341 * and invokes the corresponding ISR. 1342 * 1343 * Return: 1344 * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise 1345 */ 1346 static irqreturn_t xcan_interrupt(int irq, void *dev_id) 1347 { 1348 struct net_device *ndev = (struct net_device *)dev_id; 1349 struct xcan_priv *priv = netdev_priv(ndev); 1350 u32 isr, ier; 1351 u32 isr_errors; 1352 u32 rx_int_mask = xcan_rx_int_mask(priv); 1353 1354 /* Get the interrupt status from Xilinx CAN */ 1355 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 1356 if (!isr) 1357 return IRQ_NONE; 1358 1359 /* Check for the type of interrupt and Processing it */ 1360 if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { 1361 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | 1362 XCAN_IXR_WKUP_MASK)); 1363 xcan_state_interrupt(ndev, isr); 1364 } 1365 1366 /* Check for Tx interrupt and Processing it */ 1367 if (isr & XCAN_IXR_TXOK_MASK) 1368 xcan_tx_interrupt(ndev, isr); 1369 1370 /* Check for the type of error interrupt and Processing it */ 1371 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 1372 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | 1373 XCAN_IXR_RXMNF_MASK); 1374 if (isr_errors) { 1375 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); 1376 xcan_err_interrupt(ndev, isr); 1377 } 1378 1379 /* Check for the type of receive interrupt and Processing it */ 1380 if (isr & rx_int_mask) { 1381 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 1382 ier &= ~rx_int_mask; 1383 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 1384 napi_schedule(&priv->napi); 1385 } 1386 return IRQ_HANDLED; 1387 } 1388 1389 /** 1390 * xcan_chip_stop - Driver stop routine 1391 * @ndev: Pointer to net_device structure 1392 * 1393 * This is the drivers stop routine. It will disable the 1394 * interrupts and put the device into configuration mode. 1395 */ 1396 static void xcan_chip_stop(struct net_device *ndev) 1397 { 1398 struct xcan_priv *priv = netdev_priv(ndev); 1399 int ret; 1400 1401 /* Disable interrupts and leave the can in configuration mode */ 1402 ret = set_reset_mode(ndev); 1403 if (ret < 0) 1404 netdev_dbg(ndev, "set_reset_mode() Failed\n"); 1405 1406 priv->can.state = CAN_STATE_STOPPED; 1407 } 1408 1409 /** 1410 * xcan_open - Driver open routine 1411 * @ndev: Pointer to net_device structure 1412 * 1413 * This is the driver open routine. 1414 * Return: 0 on success and failure value on error 1415 */ 1416 static int xcan_open(struct net_device *ndev) 1417 { 1418 struct xcan_priv *priv = netdev_priv(ndev); 1419 int ret; 1420 1421 ret = pm_runtime_get_sync(priv->dev); 1422 if (ret < 0) { 1423 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1424 __func__, ret); 1425 goto err; 1426 } 1427 1428 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, 1429 ndev->name, ndev); 1430 if (ret < 0) { 1431 netdev_err(ndev, "irq allocation for CAN failed\n"); 1432 goto err; 1433 } 1434 1435 /* Set chip into reset mode */ 1436 ret = set_reset_mode(ndev); 1437 if (ret < 0) { 1438 netdev_err(ndev, "mode resetting failed!\n"); 1439 goto err_irq; 1440 } 1441 1442 /* Common open */ 1443 ret = open_candev(ndev); 1444 if (ret) 1445 goto err_irq; 1446 1447 ret = xcan_chip_start(ndev); 1448 if (ret < 0) { 1449 netdev_err(ndev, "xcan_chip_start failed!\n"); 1450 goto err_candev; 1451 } 1452 1453 napi_enable(&priv->napi); 1454 netif_start_queue(ndev); 1455 1456 return 0; 1457 1458 err_candev: 1459 close_candev(ndev); 1460 err_irq: 1461 free_irq(ndev->irq, ndev); 1462 err: 1463 pm_runtime_put(priv->dev); 1464 1465 return ret; 1466 } 1467 1468 /** 1469 * xcan_close - Driver close routine 1470 * @ndev: Pointer to net_device structure 1471 * 1472 * Return: 0 always 1473 */ 1474 static int xcan_close(struct net_device *ndev) 1475 { 1476 struct xcan_priv *priv = netdev_priv(ndev); 1477 1478 netif_stop_queue(ndev); 1479 napi_disable(&priv->napi); 1480 xcan_chip_stop(ndev); 1481 free_irq(ndev->irq, ndev); 1482 close_candev(ndev); 1483 1484 pm_runtime_put(priv->dev); 1485 1486 return 0; 1487 } 1488 1489 /** 1490 * xcan_get_berr_counter - error counter routine 1491 * @ndev: Pointer to net_device structure 1492 * @bec: Pointer to can_berr_counter structure 1493 * 1494 * This is the driver error counter routine. 1495 * Return: 0 on success and failure value on error 1496 */ 1497 static int xcan_get_berr_counter(const struct net_device *ndev, 1498 struct can_berr_counter *bec) 1499 { 1500 struct xcan_priv *priv = netdev_priv(ndev); 1501 int ret; 1502 1503 ret = pm_runtime_get_sync(priv->dev); 1504 if (ret < 0) { 1505 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1506 __func__, ret); 1507 pm_runtime_put(priv->dev); 1508 return ret; 1509 } 1510 1511 bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; 1512 bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & 1513 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); 1514 1515 pm_runtime_put(priv->dev); 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value 1522 * @ndev: Pointer to net_device structure 1523 * @tdcv: Pointer to TDCV value 1524 * 1525 * Return: 0 on success 1526 */ 1527 static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) 1528 { 1529 struct xcan_priv *priv = netdev_priv(ndev); 1530 1531 *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET)); 1532 1533 return 0; 1534 } 1535 1536 static const struct net_device_ops xcan_netdev_ops = { 1537 .ndo_open = xcan_open, 1538 .ndo_stop = xcan_close, 1539 .ndo_start_xmit = xcan_start_xmit, 1540 .ndo_change_mtu = can_change_mtu, 1541 }; 1542 1543 /** 1544 * xcan_suspend - Suspend method for the driver 1545 * @dev: Address of the device structure 1546 * 1547 * Put the driver into low power mode. 1548 * Return: 0 on success and failure value on error 1549 */ 1550 static int __maybe_unused xcan_suspend(struct device *dev) 1551 { 1552 struct net_device *ndev = dev_get_drvdata(dev); 1553 1554 if (netif_running(ndev)) { 1555 netif_stop_queue(ndev); 1556 netif_device_detach(ndev); 1557 xcan_chip_stop(ndev); 1558 } 1559 1560 return pm_runtime_force_suspend(dev); 1561 } 1562 1563 /** 1564 * xcan_resume - Resume from suspend 1565 * @dev: Address of the device structure 1566 * 1567 * Resume operation after suspend. 1568 * Return: 0 on success and failure value on error 1569 */ 1570 static int __maybe_unused xcan_resume(struct device *dev) 1571 { 1572 struct net_device *ndev = dev_get_drvdata(dev); 1573 int ret; 1574 1575 ret = pm_runtime_force_resume(dev); 1576 if (ret) { 1577 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 1578 return ret; 1579 } 1580 1581 if (netif_running(ndev)) { 1582 ret = xcan_chip_start(ndev); 1583 if (ret) { 1584 dev_err(dev, "xcan_chip_start failed on resume\n"); 1585 return ret; 1586 } 1587 1588 netif_device_attach(ndev); 1589 netif_start_queue(ndev); 1590 } 1591 1592 return 0; 1593 } 1594 1595 /** 1596 * xcan_runtime_suspend - Runtime suspend method for the driver 1597 * @dev: Address of the device structure 1598 * 1599 * Put the driver into low power mode. 1600 * Return: 0 always 1601 */ 1602 static int __maybe_unused xcan_runtime_suspend(struct device *dev) 1603 { 1604 struct net_device *ndev = dev_get_drvdata(dev); 1605 struct xcan_priv *priv = netdev_priv(ndev); 1606 1607 clk_disable_unprepare(priv->bus_clk); 1608 clk_disable_unprepare(priv->can_clk); 1609 1610 return 0; 1611 } 1612 1613 /** 1614 * xcan_runtime_resume - Runtime resume from suspend 1615 * @dev: Address of the device structure 1616 * 1617 * Resume operation after suspend. 1618 * Return: 0 on success and failure value on error 1619 */ 1620 static int __maybe_unused xcan_runtime_resume(struct device *dev) 1621 { 1622 struct net_device *ndev = dev_get_drvdata(dev); 1623 struct xcan_priv *priv = netdev_priv(ndev); 1624 int ret; 1625 1626 ret = clk_prepare_enable(priv->bus_clk); 1627 if (ret) { 1628 dev_err(dev, "Cannot enable clock.\n"); 1629 return ret; 1630 } 1631 ret = clk_prepare_enable(priv->can_clk); 1632 if (ret) { 1633 dev_err(dev, "Cannot enable clock.\n"); 1634 clk_disable_unprepare(priv->bus_clk); 1635 return ret; 1636 } 1637 1638 return 0; 1639 } 1640 1641 static const struct dev_pm_ops xcan_dev_pm_ops = { 1642 SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) 1643 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1644 }; 1645 1646 static const struct xcan_devtype_data xcan_zynq_data = { 1647 .cantype = XZYNQ_CANPS, 1648 .flags = XCAN_FLAG_TXFEMP, 1649 .bittiming_const = &xcan_bittiming_const, 1650 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1651 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1652 .bus_clk_name = "pclk", 1653 }; 1654 1655 static const struct xcan_devtype_data xcan_axi_data = { 1656 .cantype = XAXI_CAN, 1657 .bittiming_const = &xcan_bittiming_const, 1658 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1659 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1660 .bus_clk_name = "s_axi_aclk", 1661 }; 1662 1663 static const struct xcan_devtype_data xcan_canfd_data = { 1664 .cantype = XAXI_CANFD, 1665 .flags = XCAN_FLAG_EXT_FILTERS | 1666 XCAN_FLAG_RXMNF | 1667 XCAN_FLAG_TX_MAILBOXES | 1668 XCAN_FLAG_RX_FIFO_MULTI, 1669 .bittiming_const = &xcan_bittiming_const_canfd, 1670 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1671 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1672 .bus_clk_name = "s_axi_aclk", 1673 }; 1674 1675 static const struct xcan_devtype_data xcan_canfd2_data = { 1676 .cantype = XAXI_CANFD_2_0, 1677 .flags = XCAN_FLAG_EXT_FILTERS | 1678 XCAN_FLAG_RXMNF | 1679 XCAN_FLAG_TX_MAILBOXES | 1680 XCAN_FLAG_CANFD_2 | 1681 XCAN_FLAG_RX_FIFO_MULTI, 1682 .bittiming_const = &xcan_bittiming_const_canfd2, 1683 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, 1684 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, 1685 .bus_clk_name = "s_axi_aclk", 1686 }; 1687 1688 /* Match table for OF platform binding */ 1689 static const struct of_device_id xcan_of_match[] = { 1690 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, 1691 { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, 1692 { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, 1693 { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, 1694 { /* end of list */ }, 1695 }; 1696 MODULE_DEVICE_TABLE(of, xcan_of_match); 1697 1698 /** 1699 * xcan_probe - Platform registration call 1700 * @pdev: Handle to the platform device structure 1701 * 1702 * This function does all the memory allocation and registration for the CAN 1703 * device. 1704 * 1705 * Return: 0 on success and failure value on error 1706 */ 1707 static int xcan_probe(struct platform_device *pdev) 1708 { 1709 struct net_device *ndev; 1710 struct xcan_priv *priv; 1711 const struct of_device_id *of_id; 1712 const struct xcan_devtype_data *devtype = &xcan_axi_data; 1713 void __iomem *addr; 1714 int ret; 1715 int rx_max, tx_max; 1716 u32 hw_tx_max = 0, hw_rx_max = 0; 1717 const char *hw_tx_max_property; 1718 1719 /* Get the virtual base address for the device */ 1720 addr = devm_platform_ioremap_resource(pdev, 0); 1721 if (IS_ERR(addr)) { 1722 ret = PTR_ERR(addr); 1723 goto err; 1724 } 1725 1726 of_id = of_match_device(xcan_of_match, &pdev->dev); 1727 if (of_id && of_id->data) 1728 devtype = of_id->data; 1729 1730 hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? 1731 "tx-mailbox-count" : "tx-fifo-depth"; 1732 1733 ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, 1734 &hw_tx_max); 1735 if (ret < 0) { 1736 dev_err(&pdev->dev, "missing %s property\n", 1737 hw_tx_max_property); 1738 goto err; 1739 } 1740 1741 ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", 1742 &hw_rx_max); 1743 if (ret < 0) { 1744 dev_err(&pdev->dev, 1745 "missing rx-fifo-depth property (mailbox mode is not supported)\n"); 1746 goto err; 1747 } 1748 1749 /* With TX FIFO: 1750 * 1751 * There is no way to directly figure out how many frames have been 1752 * sent when the TXOK interrupt is processed. If TXFEMP 1753 * is supported, we can have 2 frames in the FIFO and use TXFEMP 1754 * to determine if 1 or 2 frames have been sent. 1755 * Theoretically we should be able to use TXFWMEMP to determine up 1756 * to 3 frames, but it seems that after putting a second frame in the 1757 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less 1758 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was 1759 * sent), which is not a sensible state - possibly TXFWMEMP is not 1760 * completely synchronized with the rest of the bits? 1761 * 1762 * With TX mailboxes: 1763 * 1764 * HW sends frames in CAN ID priority order. To preserve FIFO ordering 1765 * we submit frames one at a time. 1766 */ 1767 if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && 1768 (devtype->flags & XCAN_FLAG_TXFEMP)) 1769 tx_max = min(hw_tx_max, 2U); 1770 else 1771 tx_max = 1; 1772 1773 rx_max = hw_rx_max; 1774 1775 /* Create a CAN device instance */ 1776 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1777 if (!ndev) 1778 return -ENOMEM; 1779 1780 priv = netdev_priv(ndev); 1781 priv->dev = &pdev->dev; 1782 priv->can.bittiming_const = devtype->bittiming_const; 1783 priv->can.do_set_mode = xcan_do_set_mode; 1784 priv->can.do_get_berr_counter = xcan_get_berr_counter; 1785 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1786 CAN_CTRLMODE_BERR_REPORTING; 1787 1788 if (devtype->cantype == XAXI_CANFD) { 1789 priv->can.data_bittiming_const = 1790 &xcan_data_bittiming_const_canfd; 1791 priv->can.tdc_const = &xcan_tdc_const_canfd; 1792 } 1793 1794 if (devtype->cantype == XAXI_CANFD_2_0) { 1795 priv->can.data_bittiming_const = 1796 &xcan_data_bittiming_const_canfd2; 1797 priv->can.tdc_const = &xcan_tdc_const_canfd2; 1798 } 1799 1800 if (devtype->cantype == XAXI_CANFD || 1801 devtype->cantype == XAXI_CANFD_2_0) { 1802 priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | 1803 CAN_CTRLMODE_TDC_AUTO; 1804 priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv; 1805 } 1806 1807 priv->reg_base = addr; 1808 priv->tx_max = tx_max; 1809 priv->devtype = *devtype; 1810 spin_lock_init(&priv->tx_lock); 1811 1812 /* Get IRQ for the device */ 1813 ret = platform_get_irq(pdev, 0); 1814 if (ret < 0) 1815 goto err_free; 1816 1817 ndev->irq = ret; 1818 1819 ndev->flags |= IFF_ECHO; /* We support local echo */ 1820 1821 platform_set_drvdata(pdev, ndev); 1822 SET_NETDEV_DEV(ndev, &pdev->dev); 1823 ndev->netdev_ops = &xcan_netdev_ops; 1824 1825 /* Getting the CAN can_clk info */ 1826 priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); 1827 if (IS_ERR(priv->can_clk)) { 1828 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), 1829 "device clock not found\n"); 1830 goto err_free; 1831 } 1832 1833 priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); 1834 if (IS_ERR(priv->bus_clk)) { 1835 ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), 1836 "bus clock not found\n"); 1837 goto err_free; 1838 } 1839 1840 priv->write_reg = xcan_write_reg_le; 1841 priv->read_reg = xcan_read_reg_le; 1842 1843 pm_runtime_enable(&pdev->dev); 1844 ret = pm_runtime_get_sync(&pdev->dev); 1845 if (ret < 0) { 1846 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", 1847 __func__, ret); 1848 goto err_disableclks; 1849 } 1850 1851 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { 1852 priv->write_reg = xcan_write_reg_be; 1853 priv->read_reg = xcan_read_reg_be; 1854 } 1855 1856 priv->can.clock.freq = clk_get_rate(priv->can_clk); 1857 1858 netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max); 1859 1860 ret = register_candev(ndev); 1861 if (ret) { 1862 dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); 1863 goto err_disableclks; 1864 } 1865 1866 pm_runtime_put(&pdev->dev); 1867 1868 if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { 1869 priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); 1870 priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); 1871 } 1872 1873 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", 1874 priv->reg_base, ndev->irq, priv->can.clock.freq, 1875 hw_tx_max, priv->tx_max); 1876 1877 return 0; 1878 1879 err_disableclks: 1880 pm_runtime_put(priv->dev); 1881 pm_runtime_disable(&pdev->dev); 1882 err_free: 1883 free_candev(ndev); 1884 err: 1885 return ret; 1886 } 1887 1888 /** 1889 * xcan_remove - Unregister the device after releasing the resources 1890 * @pdev: Handle to the platform device structure 1891 * 1892 * This function frees all the resources allocated to the device. 1893 * Return: 0 always 1894 */ 1895 static int xcan_remove(struct platform_device *pdev) 1896 { 1897 struct net_device *ndev = platform_get_drvdata(pdev); 1898 1899 unregister_candev(ndev); 1900 pm_runtime_disable(&pdev->dev); 1901 free_candev(ndev); 1902 1903 return 0; 1904 } 1905 1906 static struct platform_driver xcan_driver = { 1907 .probe = xcan_probe, 1908 .remove = xcan_remove, 1909 .driver = { 1910 .name = DRIVER_NAME, 1911 .pm = &xcan_dev_pm_ops, 1912 .of_match_table = xcan_of_match, 1913 }, 1914 }; 1915 1916 module_platform_driver(xcan_driver); 1917 1918 MODULE_LICENSE("GPL"); 1919 MODULE_AUTHOR("Xilinx Inc"); 1920 MODULE_DESCRIPTION("Xilinx CAN interface"); 1921