1 // SPDX-License-Identifier: GPL-2.0 2 // CAN bus driver for Bosch M_CAN controller 3 // Copyright (C) 2014 Freescale Semiconductor, Inc. 4 // Dong Aisheng <b29396@freescale.com> 5 // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ 6 7 /* Bosch M_CAN user manual can be obtained from: 8 * https://github.com/linux-can/can-doc/tree/master/m_can 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/of.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/iopoll.h> 22 #include <linux/can/dev.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/phy/phy.h> 25 26 #include "m_can.h" 27 28 /* registers definition */ 29 enum m_can_reg { 30 M_CAN_CREL = 0x0, 31 M_CAN_ENDN = 0x4, 32 M_CAN_CUST = 0x8, 33 M_CAN_DBTP = 0xc, 34 M_CAN_TEST = 0x10, 35 M_CAN_RWD = 0x14, 36 M_CAN_CCCR = 0x18, 37 M_CAN_NBTP = 0x1c, 38 M_CAN_TSCC = 0x20, 39 M_CAN_TSCV = 0x24, 40 M_CAN_TOCC = 0x28, 41 M_CAN_TOCV = 0x2c, 42 M_CAN_ECR = 0x40, 43 M_CAN_PSR = 0x44, 44 /* TDCR Register only available for version >=3.1.x */ 45 M_CAN_TDCR = 0x48, 46 M_CAN_IR = 0x50, 47 M_CAN_IE = 0x54, 48 M_CAN_ILS = 0x58, 49 M_CAN_ILE = 0x5c, 50 M_CAN_GFC = 0x80, 51 M_CAN_SIDFC = 0x84, 52 M_CAN_XIDFC = 0x88, 53 M_CAN_XIDAM = 0x90, 54 M_CAN_HPMS = 0x94, 55 M_CAN_NDAT1 = 0x98, 56 M_CAN_NDAT2 = 0x9c, 57 M_CAN_RXF0C = 0xa0, 58 M_CAN_RXF0S = 0xa4, 59 M_CAN_RXF0A = 0xa8, 60 M_CAN_RXBC = 0xac, 61 M_CAN_RXF1C = 0xb0, 62 M_CAN_RXF1S = 0xb4, 63 M_CAN_RXF1A = 0xb8, 64 M_CAN_RXESC = 0xbc, 65 M_CAN_TXBC = 0xc0, 66 M_CAN_TXFQS = 0xc4, 67 M_CAN_TXESC = 0xc8, 68 M_CAN_TXBRP = 0xcc, 69 M_CAN_TXBAR = 0xd0, 70 M_CAN_TXBCR = 0xd4, 71 M_CAN_TXBTO = 0xd8, 72 M_CAN_TXBCF = 0xdc, 73 M_CAN_TXBTIE = 0xe0, 74 M_CAN_TXBCIE = 0xe4, 75 M_CAN_TXEFC = 0xf0, 76 M_CAN_TXEFS = 0xf4, 77 M_CAN_TXEFA = 0xf8, 78 }; 79 80 /* message ram configuration data length */ 81 #define MRAM_CFG_LEN 8 82 83 /* Core Release Register (CREL) */ 84 #define CREL_REL_MASK GENMASK(31, 28) 85 #define CREL_STEP_MASK GENMASK(27, 24) 86 #define CREL_SUBSTEP_MASK GENMASK(23, 20) 87 88 /* Data Bit Timing & Prescaler Register (DBTP) */ 89 #define DBTP_TDC BIT(23) 90 #define DBTP_DBRP_MASK GENMASK(20, 16) 91 #define DBTP_DTSEG1_MASK GENMASK(12, 8) 92 #define DBTP_DTSEG2_MASK GENMASK(7, 4) 93 #define DBTP_DSJW_MASK GENMASK(3, 0) 94 95 /* Transmitter Delay Compensation Register (TDCR) */ 96 #define TDCR_TDCO_MASK GENMASK(14, 8) 97 #define TDCR_TDCF_MASK GENMASK(6, 0) 98 99 /* Test Register (TEST) */ 100 #define TEST_LBCK BIT(4) 101 102 /* CC Control Register (CCCR) */ 103 #define CCCR_TXP BIT(14) 104 #define CCCR_TEST BIT(7) 105 #define CCCR_DAR BIT(6) 106 #define CCCR_MON BIT(5) 107 #define CCCR_CSR BIT(4) 108 #define CCCR_CSA BIT(3) 109 #define CCCR_ASM BIT(2) 110 #define CCCR_CCE BIT(1) 111 #define CCCR_INIT BIT(0) 112 /* for version 3.0.x */ 113 #define CCCR_CMR_MASK GENMASK(11, 10) 114 #define CCCR_CMR_CANFD 0x1 115 #define CCCR_CMR_CANFD_BRS 0x2 116 #define CCCR_CMR_CAN 0x3 117 #define CCCR_CME_MASK GENMASK(9, 8) 118 #define CCCR_CME_CAN 0 119 #define CCCR_CME_CANFD 0x1 120 #define CCCR_CME_CANFD_BRS 0x2 121 /* for version >=3.1.x */ 122 #define CCCR_EFBI BIT(13) 123 #define CCCR_PXHD BIT(12) 124 #define CCCR_BRSE BIT(9) 125 #define CCCR_FDOE BIT(8) 126 /* for version >=3.2.x */ 127 #define CCCR_NISO BIT(15) 128 /* for version >=3.3.x */ 129 #define CCCR_WMM BIT(11) 130 #define CCCR_UTSU BIT(10) 131 132 /* Nominal Bit Timing & Prescaler Register (NBTP) */ 133 #define NBTP_NSJW_MASK GENMASK(31, 25) 134 #define NBTP_NBRP_MASK GENMASK(24, 16) 135 #define NBTP_NTSEG1_MASK GENMASK(15, 8) 136 #define NBTP_NTSEG2_MASK GENMASK(6, 0) 137 138 /* Timestamp Counter Configuration Register (TSCC) */ 139 #define TSCC_TCP_MASK GENMASK(19, 16) 140 #define TSCC_TSS_MASK GENMASK(1, 0) 141 #define TSCC_TSS_DISABLE 0x0 142 #define TSCC_TSS_INTERNAL 0x1 143 #define TSCC_TSS_EXTERNAL 0x2 144 145 /* Timestamp Counter Value Register (TSCV) */ 146 #define TSCV_TSC_MASK GENMASK(15, 0) 147 148 /* Error Counter Register (ECR) */ 149 #define ECR_RP BIT(15) 150 #define ECR_REC_MASK GENMASK(14, 8) 151 #define ECR_TEC_MASK GENMASK(7, 0) 152 153 /* Protocol Status Register (PSR) */ 154 #define PSR_BO BIT(7) 155 #define PSR_EW BIT(6) 156 #define PSR_EP BIT(5) 157 #define PSR_LEC_MASK GENMASK(2, 0) 158 159 /* Interrupt Register (IR) */ 160 #define IR_ALL_INT 0xffffffff 161 162 /* Renamed bits for versions > 3.1.x */ 163 #define IR_ARA BIT(29) 164 #define IR_PED BIT(28) 165 #define IR_PEA BIT(27) 166 167 /* Bits for version 3.0.x */ 168 #define IR_STE BIT(31) 169 #define IR_FOE BIT(30) 170 #define IR_ACKE BIT(29) 171 #define IR_BE BIT(28) 172 #define IR_CRCE BIT(27) 173 #define IR_WDI BIT(26) 174 #define IR_BO BIT(25) 175 #define IR_EW BIT(24) 176 #define IR_EP BIT(23) 177 #define IR_ELO BIT(22) 178 #define IR_BEU BIT(21) 179 #define IR_BEC BIT(20) 180 #define IR_DRX BIT(19) 181 #define IR_TOO BIT(18) 182 #define IR_MRAF BIT(17) 183 #define IR_TSW BIT(16) 184 #define IR_TEFL BIT(15) 185 #define IR_TEFF BIT(14) 186 #define IR_TEFW BIT(13) 187 #define IR_TEFN BIT(12) 188 #define IR_TFE BIT(11) 189 #define IR_TCF BIT(10) 190 #define IR_TC BIT(9) 191 #define IR_HPM BIT(8) 192 #define IR_RF1L BIT(7) 193 #define IR_RF1F BIT(6) 194 #define IR_RF1W BIT(5) 195 #define IR_RF1N BIT(4) 196 #define IR_RF0L BIT(3) 197 #define IR_RF0F BIT(2) 198 #define IR_RF0W BIT(1) 199 #define IR_RF0N BIT(0) 200 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) 201 202 /* Interrupts for version 3.0.x */ 203 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) 204 #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \ 205 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 206 IR_RF0L) 207 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X) 208 209 /* Interrupts for version >= 3.1.x */ 210 #define IR_ERR_LEC_31X (IR_PED | IR_PEA) 211 #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \ 212 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \ 213 IR_RF0L) 214 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X) 215 216 /* Interrupt Line Select (ILS) */ 217 #define ILS_ALL_INT0 0x0 218 #define ILS_ALL_INT1 0xFFFFFFFF 219 220 /* Interrupt Line Enable (ILE) */ 221 #define ILE_EINT1 BIT(1) 222 #define ILE_EINT0 BIT(0) 223 224 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 225 #define RXFC_FWM_MASK GENMASK(30, 24) 226 #define RXFC_FS_MASK GENMASK(22, 16) 227 228 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ 229 #define RXFS_RFL BIT(25) 230 #define RXFS_FF BIT(24) 231 #define RXFS_FPI_MASK GENMASK(21, 16) 232 #define RXFS_FGI_MASK GENMASK(13, 8) 233 #define RXFS_FFL_MASK GENMASK(6, 0) 234 235 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ 236 #define RXESC_RBDS_MASK GENMASK(10, 8) 237 #define RXESC_F1DS_MASK GENMASK(6, 4) 238 #define RXESC_F0DS_MASK GENMASK(2, 0) 239 #define RXESC_64B 0x7 240 241 /* Tx Buffer Configuration (TXBC) */ 242 #define TXBC_TFQS_MASK GENMASK(29, 24) 243 #define TXBC_NDTB_MASK GENMASK(21, 16) 244 245 /* Tx FIFO/Queue Status (TXFQS) */ 246 #define TXFQS_TFQF BIT(21) 247 #define TXFQS_TFQPI_MASK GENMASK(20, 16) 248 #define TXFQS_TFGI_MASK GENMASK(12, 8) 249 #define TXFQS_TFFL_MASK GENMASK(5, 0) 250 251 /* Tx Buffer Element Size Configuration (TXESC) */ 252 #define TXESC_TBDS_MASK GENMASK(2, 0) 253 #define TXESC_TBDS_64B 0x7 254 255 /* Tx Event FIFO Configuration (TXEFC) */ 256 #define TXEFC_EFS_MASK GENMASK(21, 16) 257 258 /* Tx Event FIFO Status (TXEFS) */ 259 #define TXEFS_TEFL BIT(25) 260 #define TXEFS_EFF BIT(24) 261 #define TXEFS_EFGI_MASK GENMASK(12, 8) 262 #define TXEFS_EFFL_MASK GENMASK(5, 0) 263 264 /* Tx Event FIFO Acknowledge (TXEFA) */ 265 #define TXEFA_EFAI_MASK GENMASK(4, 0) 266 267 /* Message RAM Configuration (in bytes) */ 268 #define SIDF_ELEMENT_SIZE 4 269 #define XIDF_ELEMENT_SIZE 8 270 #define RXF0_ELEMENT_SIZE 72 271 #define RXF1_ELEMENT_SIZE 72 272 #define RXB_ELEMENT_SIZE 72 273 #define TXE_ELEMENT_SIZE 8 274 #define TXB_ELEMENT_SIZE 72 275 276 /* Message RAM Elements */ 277 #define M_CAN_FIFO_ID 0x0 278 #define M_CAN_FIFO_DLC 0x4 279 #define M_CAN_FIFO_DATA 0x8 280 281 /* Rx Buffer Element */ 282 /* R0 */ 283 #define RX_BUF_ESI BIT(31) 284 #define RX_BUF_XTD BIT(30) 285 #define RX_BUF_RTR BIT(29) 286 /* R1 */ 287 #define RX_BUF_ANMF BIT(31) 288 #define RX_BUF_FDF BIT(21) 289 #define RX_BUF_BRS BIT(20) 290 #define RX_BUF_RXTS_MASK GENMASK(15, 0) 291 292 /* Tx Buffer Element */ 293 /* T0 */ 294 #define TX_BUF_ESI BIT(31) 295 #define TX_BUF_XTD BIT(30) 296 #define TX_BUF_RTR BIT(29) 297 /* T1 */ 298 #define TX_BUF_EFC BIT(23) 299 #define TX_BUF_FDF BIT(21) 300 #define TX_BUF_BRS BIT(20) 301 #define TX_BUF_MM_MASK GENMASK(31, 24) 302 #define TX_BUF_DLC_MASK GENMASK(19, 16) 303 304 /* Tx event FIFO Element */ 305 /* E1 */ 306 #define TX_EVENT_MM_MASK GENMASK(31, 24) 307 #define TX_EVENT_TXTS_MASK GENMASK(15, 0) 308 309 /* The ID and DLC registers are adjacent in M_CAN FIFO memory, 310 * and we can save a (potentially slow) bus round trip by combining 311 * reads and writes to them. 312 */ 313 struct id_and_dlc { 314 u32 id; 315 u32 dlc; 316 }; 317 318 static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg) 319 { 320 return cdev->ops->read_reg(cdev, reg); 321 } 322 323 static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg, 324 u32 val) 325 { 326 cdev->ops->write_reg(cdev, reg, val); 327 } 328 329 static int 330 m_can_fifo_read(struct m_can_classdev *cdev, 331 u32 fgi, unsigned int offset, void *val, size_t val_count) 332 { 333 u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + 334 offset; 335 336 if (val_count == 0) 337 return 0; 338 339 return cdev->ops->read_fifo(cdev, addr_offset, val, val_count); 340 } 341 342 static int 343 m_can_fifo_write(struct m_can_classdev *cdev, 344 u32 fpi, unsigned int offset, const void *val, size_t val_count) 345 { 346 u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + 347 offset; 348 349 if (val_count == 0) 350 return 0; 351 352 return cdev->ops->write_fifo(cdev, addr_offset, val, val_count); 353 } 354 355 static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev, 356 u32 fpi, u32 val) 357 { 358 return cdev->ops->write_fifo(cdev, fpi, &val, 1); 359 } 360 361 static int 362 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) 363 { 364 u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + 365 offset; 366 367 return cdev->ops->read_fifo(cdev, addr_offset, val, 1); 368 } 369 370 static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) 371 { 372 return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); 373 } 374 375 static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) 376 { 377 u32 cccr = m_can_read(cdev, M_CAN_CCCR); 378 u32 timeout = 10; 379 u32 val = 0; 380 381 /* Clear the Clock stop request if it was set */ 382 if (cccr & CCCR_CSR) 383 cccr &= ~CCCR_CSR; 384 385 if (enable) { 386 /* enable m_can configuration */ 387 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); 388 udelay(5); 389 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ 390 m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); 391 } else { 392 m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); 393 } 394 395 /* there's a delay for module initialization */ 396 if (enable) 397 val = CCCR_INIT | CCCR_CCE; 398 399 while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { 400 if (timeout == 0) { 401 netdev_warn(cdev->net, "Failed to init module\n"); 402 return; 403 } 404 timeout--; 405 udelay(1); 406 } 407 } 408 409 static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev) 410 { 411 /* Only interrupt line 0 is used in this driver */ 412 m_can_write(cdev, M_CAN_ILE, ILE_EINT0); 413 } 414 415 static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev) 416 { 417 m_can_write(cdev, M_CAN_ILE, 0x0); 418 } 419 420 /* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit 421 * width. 422 */ 423 static u32 m_can_get_timestamp(struct m_can_classdev *cdev) 424 { 425 u32 tscv; 426 u32 tsc; 427 428 tscv = m_can_read(cdev, M_CAN_TSCV); 429 tsc = FIELD_GET(TSCV_TSC_MASK, tscv); 430 431 return (tsc << 16); 432 } 433 434 static void m_can_clean(struct net_device *net) 435 { 436 struct m_can_classdev *cdev = netdev_priv(net); 437 438 if (cdev->tx_skb) { 439 int putidx = 0; 440 441 net->stats.tx_errors++; 442 if (cdev->version > 30) 443 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 444 m_can_read(cdev, M_CAN_TXFQS)); 445 446 can_free_echo_skb(cdev->net, putidx, NULL); 447 cdev->tx_skb = NULL; 448 } 449 } 450 451 /* For peripherals, pass skb to rx-offload, which will push skb from 452 * napi. For non-peripherals, RX is done in napi already, so push 453 * directly. timestamp is used to ensure good skb ordering in 454 * rx-offload and is ignored for non-peripherals. 455 */ 456 static void m_can_receive_skb(struct m_can_classdev *cdev, 457 struct sk_buff *skb, 458 u32 timestamp) 459 { 460 if (cdev->is_peripheral) { 461 struct net_device_stats *stats = &cdev->net->stats; 462 int err; 463 464 err = can_rx_offload_queue_timestamp(&cdev->offload, skb, 465 timestamp); 466 if (err) 467 stats->rx_fifo_errors++; 468 } else { 469 netif_receive_skb(skb); 470 } 471 } 472 473 static int m_can_read_fifo(struct net_device *dev, u32 rxfs) 474 { 475 struct net_device_stats *stats = &dev->stats; 476 struct m_can_classdev *cdev = netdev_priv(dev); 477 struct canfd_frame *cf; 478 struct sk_buff *skb; 479 struct id_and_dlc fifo_header; 480 u32 fgi; 481 u32 timestamp = 0; 482 int err; 483 484 /* calculate the fifo get index for where to read data */ 485 fgi = FIELD_GET(RXFS_FGI_MASK, rxfs); 486 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2); 487 if (err) 488 goto out_fail; 489 490 if (fifo_header.dlc & RX_BUF_FDF) 491 skb = alloc_canfd_skb(dev, &cf); 492 else 493 skb = alloc_can_skb(dev, (struct can_frame **)&cf); 494 if (!skb) { 495 stats->rx_dropped++; 496 return 0; 497 } 498 499 if (fifo_header.dlc & RX_BUF_FDF) 500 cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F); 501 else 502 cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F); 503 504 if (fifo_header.id & RX_BUF_XTD) 505 cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG; 506 else 507 cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK; 508 509 if (fifo_header.id & RX_BUF_ESI) { 510 cf->flags |= CANFD_ESI; 511 netdev_dbg(dev, "ESI Error\n"); 512 } 513 514 if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) { 515 cf->can_id |= CAN_RTR_FLAG; 516 } else { 517 if (fifo_header.dlc & RX_BUF_BRS) 518 cf->flags |= CANFD_BRS; 519 520 err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA, 521 cf->data, DIV_ROUND_UP(cf->len, 4)); 522 if (err) 523 goto out_free_skb; 524 525 stats->rx_bytes += cf->len; 526 } 527 stats->rx_packets++; 528 529 /* acknowledge rx fifo 0 */ 530 m_can_write(cdev, M_CAN_RXF0A, fgi); 531 532 timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc); 533 534 m_can_receive_skb(cdev, skb, timestamp); 535 536 return 0; 537 538 out_free_skb: 539 kfree_skb(skb); 540 out_fail: 541 netdev_err(dev, "FIFO read returned %d\n", err); 542 return err; 543 } 544 545 static int m_can_do_rx_poll(struct net_device *dev, int quota) 546 { 547 struct m_can_classdev *cdev = netdev_priv(dev); 548 u32 pkts = 0; 549 u32 rxfs; 550 int err; 551 552 rxfs = m_can_read(cdev, M_CAN_RXF0S); 553 if (!(rxfs & RXFS_FFL_MASK)) { 554 netdev_dbg(dev, "no messages in fifo0\n"); 555 return 0; 556 } 557 558 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { 559 err = m_can_read_fifo(dev, rxfs); 560 if (err) 561 return err; 562 563 quota--; 564 pkts++; 565 rxfs = m_can_read(cdev, M_CAN_RXF0S); 566 } 567 568 if (pkts) 569 can_led_event(dev, CAN_LED_EVENT_RX); 570 571 return pkts; 572 } 573 574 static int m_can_handle_lost_msg(struct net_device *dev) 575 { 576 struct m_can_classdev *cdev = netdev_priv(dev); 577 struct net_device_stats *stats = &dev->stats; 578 struct sk_buff *skb; 579 struct can_frame *frame; 580 u32 timestamp = 0; 581 582 netdev_err(dev, "msg lost in rxf0\n"); 583 584 stats->rx_errors++; 585 stats->rx_over_errors++; 586 587 skb = alloc_can_err_skb(dev, &frame); 588 if (unlikely(!skb)) 589 return 0; 590 591 frame->can_id |= CAN_ERR_CRTL; 592 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 593 594 if (cdev->is_peripheral) 595 timestamp = m_can_get_timestamp(cdev); 596 597 m_can_receive_skb(cdev, skb, timestamp); 598 599 return 1; 600 } 601 602 static int m_can_handle_lec_err(struct net_device *dev, 603 enum m_can_lec_type lec_type) 604 { 605 struct m_can_classdev *cdev = netdev_priv(dev); 606 struct net_device_stats *stats = &dev->stats; 607 struct can_frame *cf; 608 struct sk_buff *skb; 609 u32 timestamp = 0; 610 611 cdev->can.can_stats.bus_error++; 612 stats->rx_errors++; 613 614 /* propagate the error condition to the CAN stack */ 615 skb = alloc_can_err_skb(dev, &cf); 616 if (unlikely(!skb)) 617 return 0; 618 619 /* check for 'last error code' which tells us the 620 * type of the last error to occur on the CAN bus 621 */ 622 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 623 624 switch (lec_type) { 625 case LEC_STUFF_ERROR: 626 netdev_dbg(dev, "stuff error\n"); 627 cf->data[2] |= CAN_ERR_PROT_STUFF; 628 break; 629 case LEC_FORM_ERROR: 630 netdev_dbg(dev, "form error\n"); 631 cf->data[2] |= CAN_ERR_PROT_FORM; 632 break; 633 case LEC_ACK_ERROR: 634 netdev_dbg(dev, "ack error\n"); 635 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 636 break; 637 case LEC_BIT1_ERROR: 638 netdev_dbg(dev, "bit1 error\n"); 639 cf->data[2] |= CAN_ERR_PROT_BIT1; 640 break; 641 case LEC_BIT0_ERROR: 642 netdev_dbg(dev, "bit0 error\n"); 643 cf->data[2] |= CAN_ERR_PROT_BIT0; 644 break; 645 case LEC_CRC_ERROR: 646 netdev_dbg(dev, "CRC error\n"); 647 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 648 break; 649 default: 650 break; 651 } 652 653 if (cdev->is_peripheral) 654 timestamp = m_can_get_timestamp(cdev); 655 656 m_can_receive_skb(cdev, skb, timestamp); 657 658 return 1; 659 } 660 661 static int __m_can_get_berr_counter(const struct net_device *dev, 662 struct can_berr_counter *bec) 663 { 664 struct m_can_classdev *cdev = netdev_priv(dev); 665 unsigned int ecr; 666 667 ecr = m_can_read(cdev, M_CAN_ECR); 668 bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr); 669 bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr); 670 671 return 0; 672 } 673 674 static int m_can_clk_start(struct m_can_classdev *cdev) 675 { 676 if (cdev->pm_clock_support == 0) 677 return 0; 678 679 return pm_runtime_resume_and_get(cdev->dev); 680 } 681 682 static void m_can_clk_stop(struct m_can_classdev *cdev) 683 { 684 if (cdev->pm_clock_support) 685 pm_runtime_put_sync(cdev->dev); 686 } 687 688 static int m_can_get_berr_counter(const struct net_device *dev, 689 struct can_berr_counter *bec) 690 { 691 struct m_can_classdev *cdev = netdev_priv(dev); 692 int err; 693 694 err = m_can_clk_start(cdev); 695 if (err) 696 return err; 697 698 __m_can_get_berr_counter(dev, bec); 699 700 m_can_clk_stop(cdev); 701 702 return 0; 703 } 704 705 static int m_can_handle_state_change(struct net_device *dev, 706 enum can_state new_state) 707 { 708 struct m_can_classdev *cdev = netdev_priv(dev); 709 struct can_frame *cf; 710 struct sk_buff *skb; 711 struct can_berr_counter bec; 712 unsigned int ecr; 713 u32 timestamp = 0; 714 715 switch (new_state) { 716 case CAN_STATE_ERROR_WARNING: 717 /* error warning state */ 718 cdev->can.can_stats.error_warning++; 719 cdev->can.state = CAN_STATE_ERROR_WARNING; 720 break; 721 case CAN_STATE_ERROR_PASSIVE: 722 /* error passive state */ 723 cdev->can.can_stats.error_passive++; 724 cdev->can.state = CAN_STATE_ERROR_PASSIVE; 725 break; 726 case CAN_STATE_BUS_OFF: 727 /* bus-off state */ 728 cdev->can.state = CAN_STATE_BUS_OFF; 729 m_can_disable_all_interrupts(cdev); 730 cdev->can.can_stats.bus_off++; 731 can_bus_off(dev); 732 break; 733 default: 734 break; 735 } 736 737 /* propagate the error condition to the CAN stack */ 738 skb = alloc_can_err_skb(dev, &cf); 739 if (unlikely(!skb)) 740 return 0; 741 742 __m_can_get_berr_counter(dev, &bec); 743 744 switch (new_state) { 745 case CAN_STATE_ERROR_WARNING: 746 /* error warning state */ 747 cf->can_id |= CAN_ERR_CRTL; 748 cf->data[1] = (bec.txerr > bec.rxerr) ? 749 CAN_ERR_CRTL_TX_WARNING : 750 CAN_ERR_CRTL_RX_WARNING; 751 cf->data[6] = bec.txerr; 752 cf->data[7] = bec.rxerr; 753 break; 754 case CAN_STATE_ERROR_PASSIVE: 755 /* error passive state */ 756 cf->can_id |= CAN_ERR_CRTL; 757 ecr = m_can_read(cdev, M_CAN_ECR); 758 if (ecr & ECR_RP) 759 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 760 if (bec.txerr > 127) 761 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 762 cf->data[6] = bec.txerr; 763 cf->data[7] = bec.rxerr; 764 break; 765 case CAN_STATE_BUS_OFF: 766 /* bus-off state */ 767 cf->can_id |= CAN_ERR_BUSOFF; 768 break; 769 default: 770 break; 771 } 772 773 if (cdev->is_peripheral) 774 timestamp = m_can_get_timestamp(cdev); 775 776 m_can_receive_skb(cdev, skb, timestamp); 777 778 return 1; 779 } 780 781 static int m_can_handle_state_errors(struct net_device *dev, u32 psr) 782 { 783 struct m_can_classdev *cdev = netdev_priv(dev); 784 int work_done = 0; 785 786 if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) { 787 netdev_dbg(dev, "entered error warning state\n"); 788 work_done += m_can_handle_state_change(dev, 789 CAN_STATE_ERROR_WARNING); 790 } 791 792 if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) { 793 netdev_dbg(dev, "entered error passive state\n"); 794 work_done += m_can_handle_state_change(dev, 795 CAN_STATE_ERROR_PASSIVE); 796 } 797 798 if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) { 799 netdev_dbg(dev, "entered error bus off state\n"); 800 work_done += m_can_handle_state_change(dev, 801 CAN_STATE_BUS_OFF); 802 } 803 804 return work_done; 805 } 806 807 static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) 808 { 809 if (irqstatus & IR_WDI) 810 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); 811 if (irqstatus & IR_BEU) 812 netdev_err(dev, "Bit Error Uncorrected\n"); 813 if (irqstatus & IR_BEC) 814 netdev_err(dev, "Bit Error Corrected\n"); 815 if (irqstatus & IR_TOO) 816 netdev_err(dev, "Timeout reached\n"); 817 if (irqstatus & IR_MRAF) 818 netdev_err(dev, "Message RAM access failure occurred\n"); 819 } 820 821 static inline bool is_lec_err(u32 psr) 822 { 823 psr &= LEC_UNUSED; 824 825 return psr && (psr != LEC_UNUSED); 826 } 827 828 static inline bool m_can_is_protocol_err(u32 irqstatus) 829 { 830 return irqstatus & IR_ERR_LEC_31X; 831 } 832 833 static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus) 834 { 835 struct net_device_stats *stats = &dev->stats; 836 struct m_can_classdev *cdev = netdev_priv(dev); 837 struct can_frame *cf; 838 struct sk_buff *skb; 839 u32 timestamp = 0; 840 841 /* propagate the error condition to the CAN stack */ 842 skb = alloc_can_err_skb(dev, &cf); 843 844 /* update tx error stats since there is protocol error */ 845 stats->tx_errors++; 846 847 /* update arbitration lost status */ 848 if (cdev->version >= 31 && (irqstatus & IR_PEA)) { 849 netdev_dbg(dev, "Protocol error in Arbitration fail\n"); 850 cdev->can.can_stats.arbitration_lost++; 851 if (skb) { 852 cf->can_id |= CAN_ERR_LOSTARB; 853 cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; 854 } 855 } 856 857 if (unlikely(!skb)) { 858 netdev_dbg(dev, "allocation of skb failed\n"); 859 return 0; 860 } 861 862 if (cdev->is_peripheral) 863 timestamp = m_can_get_timestamp(cdev); 864 865 m_can_receive_skb(cdev, skb, timestamp); 866 867 return 1; 868 } 869 870 static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, 871 u32 psr) 872 { 873 struct m_can_classdev *cdev = netdev_priv(dev); 874 int work_done = 0; 875 876 if (irqstatus & IR_RF0L) 877 work_done += m_can_handle_lost_msg(dev); 878 879 /* handle lec errors on the bus */ 880 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 881 is_lec_err(psr)) 882 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); 883 884 /* handle protocol errors in arbitration phase */ 885 if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && 886 m_can_is_protocol_err(irqstatus)) 887 work_done += m_can_handle_protocol_error(dev, irqstatus); 888 889 /* other unproccessed error interrupts */ 890 m_can_handle_other_err(dev, irqstatus); 891 892 return work_done; 893 } 894 895 static int m_can_rx_handler(struct net_device *dev, int quota) 896 { 897 struct m_can_classdev *cdev = netdev_priv(dev); 898 int rx_work_or_err; 899 int work_done = 0; 900 u32 irqstatus, psr; 901 902 irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR); 903 if (!irqstatus) 904 goto end; 905 906 /* Errata workaround for issue "Needless activation of MRAF irq" 907 * During frame reception while the MCAN is in Error Passive state 908 * and the Receive Error Counter has the value MCAN_ECR.REC = 127, 909 * it may happen that MCAN_IR.MRAF is set although there was no 910 * Message RAM access failure. 911 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated 912 * The Message RAM Access Failure interrupt routine needs to check 913 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127. 914 * In this case, reset MCAN_IR.MRAF. No further action is required. 915 */ 916 if (cdev->version <= 31 && irqstatus & IR_MRAF && 917 m_can_read(cdev, M_CAN_ECR) & ECR_RP) { 918 struct can_berr_counter bec; 919 920 __m_can_get_berr_counter(dev, &bec); 921 if (bec.rxerr == 127) { 922 m_can_write(cdev, M_CAN_IR, IR_MRAF); 923 irqstatus &= ~IR_MRAF; 924 } 925 } 926 927 psr = m_can_read(cdev, M_CAN_PSR); 928 929 if (irqstatus & IR_ERR_STATE) 930 work_done += m_can_handle_state_errors(dev, psr); 931 932 if (irqstatus & IR_ERR_BUS_30X) 933 work_done += m_can_handle_bus_errors(dev, irqstatus, psr); 934 935 if (irqstatus & IR_RF0N) { 936 rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done)); 937 if (rx_work_or_err < 0) 938 return rx_work_or_err; 939 940 work_done += rx_work_or_err; 941 } 942 end: 943 return work_done; 944 } 945 946 static int m_can_rx_peripheral(struct net_device *dev) 947 { 948 struct m_can_classdev *cdev = netdev_priv(dev); 949 int work_done; 950 951 work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT); 952 953 /* Don't re-enable interrupts if the driver had a fatal error 954 * (e.g., FIFO read failure). 955 */ 956 if (work_done >= 0) 957 m_can_enable_all_interrupts(cdev); 958 959 return work_done; 960 } 961 962 static int m_can_poll(struct napi_struct *napi, int quota) 963 { 964 struct net_device *dev = napi->dev; 965 struct m_can_classdev *cdev = netdev_priv(dev); 966 int work_done; 967 968 work_done = m_can_rx_handler(dev, quota); 969 970 /* Don't re-enable interrupts if the driver had a fatal error 971 * (e.g., FIFO read failure). 972 */ 973 if (work_done >= 0 && work_done < quota) { 974 napi_complete_done(napi, work_done); 975 m_can_enable_all_interrupts(cdev); 976 } 977 978 return work_done; 979 } 980 981 /* Echo tx skb and update net stats. Peripherals use rx-offload for 982 * echo. timestamp is used for peripherals to ensure correct ordering 983 * by rx-offload, and is ignored for non-peripherals. 984 */ 985 static void m_can_tx_update_stats(struct m_can_classdev *cdev, 986 unsigned int msg_mark, 987 u32 timestamp) 988 { 989 struct net_device *dev = cdev->net; 990 struct net_device_stats *stats = &dev->stats; 991 992 if (cdev->is_peripheral) 993 stats->tx_bytes += 994 can_rx_offload_get_echo_skb(&cdev->offload, 995 msg_mark, 996 timestamp, 997 NULL); 998 else 999 stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL); 1000 1001 stats->tx_packets++; 1002 } 1003 1004 static int m_can_echo_tx_event(struct net_device *dev) 1005 { 1006 u32 txe_count = 0; 1007 u32 m_can_txefs; 1008 u32 fgi = 0; 1009 int i = 0; 1010 unsigned int msg_mark; 1011 1012 struct m_can_classdev *cdev = netdev_priv(dev); 1013 1014 /* read tx event fifo status */ 1015 m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); 1016 1017 /* Get Tx Event fifo element count */ 1018 txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs); 1019 1020 /* Get and process all sent elements */ 1021 for (i = 0; i < txe_count; i++) { 1022 u32 txe, timestamp = 0; 1023 int err; 1024 1025 /* retrieve get index */ 1026 fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS)); 1027 1028 /* get message marker, timestamp */ 1029 err = m_can_txe_fifo_read(cdev, fgi, 4, &txe); 1030 if (err) { 1031 netdev_err(dev, "TXE FIFO read returned %d\n", err); 1032 return err; 1033 } 1034 1035 msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe); 1036 timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe); 1037 1038 /* ack txe element */ 1039 m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK, 1040 fgi)); 1041 1042 /* update stats */ 1043 m_can_tx_update_stats(cdev, msg_mark, timestamp); 1044 } 1045 1046 return 0; 1047 } 1048 1049 static irqreturn_t m_can_isr(int irq, void *dev_id) 1050 { 1051 struct net_device *dev = (struct net_device *)dev_id; 1052 struct m_can_classdev *cdev = netdev_priv(dev); 1053 u32 ir; 1054 1055 if (pm_runtime_suspended(cdev->dev)) 1056 return IRQ_NONE; 1057 ir = m_can_read(cdev, M_CAN_IR); 1058 if (!ir) 1059 return IRQ_NONE; 1060 1061 /* ACK all irqs */ 1062 if (ir & IR_ALL_INT) 1063 m_can_write(cdev, M_CAN_IR, ir); 1064 1065 if (cdev->ops->clear_interrupts) 1066 cdev->ops->clear_interrupts(cdev); 1067 1068 /* schedule NAPI in case of 1069 * - rx IRQ 1070 * - state change IRQ 1071 * - bus error IRQ and bus error reporting 1072 */ 1073 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) { 1074 cdev->irqstatus = ir; 1075 m_can_disable_all_interrupts(cdev); 1076 if (!cdev->is_peripheral) 1077 napi_schedule(&cdev->napi); 1078 else if (m_can_rx_peripheral(dev) < 0) 1079 goto out_fail; 1080 } 1081 1082 if (cdev->version == 30) { 1083 if (ir & IR_TC) { 1084 /* Transmission Complete Interrupt*/ 1085 u32 timestamp = 0; 1086 1087 if (cdev->is_peripheral) 1088 timestamp = m_can_get_timestamp(cdev); 1089 m_can_tx_update_stats(cdev, 0, timestamp); 1090 1091 can_led_event(dev, CAN_LED_EVENT_TX); 1092 netif_wake_queue(dev); 1093 } 1094 } else { 1095 if (ir & IR_TEFN) { 1096 /* New TX FIFO Element arrived */ 1097 if (m_can_echo_tx_event(dev) != 0) 1098 goto out_fail; 1099 1100 can_led_event(dev, CAN_LED_EVENT_TX); 1101 if (netif_queue_stopped(dev) && 1102 !m_can_tx_fifo_full(cdev)) 1103 netif_wake_queue(dev); 1104 } 1105 } 1106 1107 if (cdev->is_peripheral) 1108 can_rx_offload_threaded_irq_finish(&cdev->offload); 1109 1110 return IRQ_HANDLED; 1111 1112 out_fail: 1113 m_can_disable_all_interrupts(cdev); 1114 return IRQ_HANDLED; 1115 } 1116 1117 static const struct can_bittiming_const m_can_bittiming_const_30X = { 1118 .name = KBUILD_MODNAME, 1119 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1120 .tseg1_max = 64, 1121 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1122 .tseg2_max = 16, 1123 .sjw_max = 16, 1124 .brp_min = 1, 1125 .brp_max = 1024, 1126 .brp_inc = 1, 1127 }; 1128 1129 static const struct can_bittiming_const m_can_data_bittiming_const_30X = { 1130 .name = KBUILD_MODNAME, 1131 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1132 .tseg1_max = 16, 1133 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1134 .tseg2_max = 8, 1135 .sjw_max = 4, 1136 .brp_min = 1, 1137 .brp_max = 32, 1138 .brp_inc = 1, 1139 }; 1140 1141 static const struct can_bittiming_const m_can_bittiming_const_31X = { 1142 .name = KBUILD_MODNAME, 1143 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ 1144 .tseg1_max = 256, 1145 .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ 1146 .tseg2_max = 128, 1147 .sjw_max = 128, 1148 .brp_min = 1, 1149 .brp_max = 512, 1150 .brp_inc = 1, 1151 }; 1152 1153 static const struct can_bittiming_const m_can_data_bittiming_const_31X = { 1154 .name = KBUILD_MODNAME, 1155 .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ 1156 .tseg1_max = 32, 1157 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ 1158 .tseg2_max = 16, 1159 .sjw_max = 16, 1160 .brp_min = 1, 1161 .brp_max = 32, 1162 .brp_inc = 1, 1163 }; 1164 1165 static int m_can_set_bittiming(struct net_device *dev) 1166 { 1167 struct m_can_classdev *cdev = netdev_priv(dev); 1168 const struct can_bittiming *bt = &cdev->can.bittiming; 1169 const struct can_bittiming *dbt = &cdev->can.data_bittiming; 1170 u16 brp, sjw, tseg1, tseg2; 1171 u32 reg_btp; 1172 1173 brp = bt->brp - 1; 1174 sjw = bt->sjw - 1; 1175 tseg1 = bt->prop_seg + bt->phase_seg1 - 1; 1176 tseg2 = bt->phase_seg2 - 1; 1177 reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) | 1178 FIELD_PREP(NBTP_NSJW_MASK, sjw) | 1179 FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) | 1180 FIELD_PREP(NBTP_NTSEG2_MASK, tseg2); 1181 m_can_write(cdev, M_CAN_NBTP, reg_btp); 1182 1183 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1184 reg_btp = 0; 1185 brp = dbt->brp - 1; 1186 sjw = dbt->sjw - 1; 1187 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; 1188 tseg2 = dbt->phase_seg2 - 1; 1189 1190 /* TDC is only needed for bitrates beyond 2.5 MBit/s. 1191 * This is mentioned in the "Bit Time Requirements for CAN FD" 1192 * paper presented at the International CAN Conference 2013 1193 */ 1194 if (dbt->bitrate > 2500000) { 1195 u32 tdco, ssp; 1196 1197 /* Use the same value of secondary sampling point 1198 * as the data sampling point 1199 */ 1200 ssp = dbt->sample_point; 1201 1202 /* Equation based on Bosch's M_CAN User Manual's 1203 * Transmitter Delay Compensation Section 1204 */ 1205 tdco = (cdev->can.clock.freq / 1000) * 1206 ssp / dbt->bitrate; 1207 1208 /* Max valid TDCO value is 127 */ 1209 if (tdco > 127) { 1210 netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n", 1211 tdco); 1212 tdco = 127; 1213 } 1214 1215 reg_btp |= DBTP_TDC; 1216 m_can_write(cdev, M_CAN_TDCR, 1217 FIELD_PREP(TDCR_TDCO_MASK, tdco)); 1218 } 1219 1220 reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) | 1221 FIELD_PREP(DBTP_DSJW_MASK, sjw) | 1222 FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) | 1223 FIELD_PREP(DBTP_DTSEG2_MASK, tseg2); 1224 1225 m_can_write(cdev, M_CAN_DBTP, reg_btp); 1226 } 1227 1228 return 0; 1229 } 1230 1231 /* Configure M_CAN chip: 1232 * - set rx buffer/fifo element size 1233 * - configure rx fifo 1234 * - accept non-matching frame into fifo 0 1235 * - configure tx buffer 1236 * - >= v3.1.x: TX FIFO is used 1237 * - configure mode 1238 * - setup bittiming 1239 * - configure timestamp generation 1240 */ 1241 static void m_can_chip_config(struct net_device *dev) 1242 { 1243 struct m_can_classdev *cdev = netdev_priv(dev); 1244 u32 cccr, test; 1245 1246 m_can_config_endisable(cdev, true); 1247 1248 /* RX Buffer/FIFO Element Size 64 bytes data field */ 1249 m_can_write(cdev, M_CAN_RXESC, 1250 FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) | 1251 FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) | 1252 FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B)); 1253 1254 /* Accept Non-matching Frames Into FIFO 0 */ 1255 m_can_write(cdev, M_CAN_GFC, 0x0); 1256 1257 if (cdev->version == 30) { 1258 /* only support one Tx Buffer currently */ 1259 m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) | 1260 cdev->mcfg[MRAM_TXB].off); 1261 } else { 1262 /* TX FIFO is used for newer IP Core versions */ 1263 m_can_write(cdev, M_CAN_TXBC, 1264 FIELD_PREP(TXBC_TFQS_MASK, 1265 cdev->mcfg[MRAM_TXB].num) | 1266 cdev->mcfg[MRAM_TXB].off); 1267 } 1268 1269 /* support 64 bytes payload */ 1270 m_can_write(cdev, M_CAN_TXESC, 1271 FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B)); 1272 1273 /* TX Event FIFO */ 1274 if (cdev->version == 30) { 1275 m_can_write(cdev, M_CAN_TXEFC, 1276 FIELD_PREP(TXEFC_EFS_MASK, 1) | 1277 cdev->mcfg[MRAM_TXE].off); 1278 } else { 1279 /* Full TX Event FIFO is used */ 1280 m_can_write(cdev, M_CAN_TXEFC, 1281 FIELD_PREP(TXEFC_EFS_MASK, 1282 cdev->mcfg[MRAM_TXE].num) | 1283 cdev->mcfg[MRAM_TXE].off); 1284 } 1285 1286 /* rx fifo configuration, blocking mode, fifo size 1 */ 1287 m_can_write(cdev, M_CAN_RXF0C, 1288 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) | 1289 cdev->mcfg[MRAM_RXF0].off); 1290 1291 m_can_write(cdev, M_CAN_RXF1C, 1292 FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) | 1293 cdev->mcfg[MRAM_RXF1].off); 1294 1295 cccr = m_can_read(cdev, M_CAN_CCCR); 1296 test = m_can_read(cdev, M_CAN_TEST); 1297 test &= ~TEST_LBCK; 1298 if (cdev->version == 30) { 1299 /* Version 3.0.x */ 1300 1301 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | 1302 FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) | 1303 FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK))); 1304 1305 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1306 cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS); 1307 1308 } else { 1309 /* Version 3.1.x or 3.2.x */ 1310 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | 1311 CCCR_NISO | CCCR_DAR); 1312 1313 /* Only 3.2.x has NISO Bit implemented */ 1314 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1315 cccr |= CCCR_NISO; 1316 1317 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) 1318 cccr |= (CCCR_BRSE | CCCR_FDOE); 1319 } 1320 1321 /* Loopback Mode */ 1322 if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 1323 cccr |= CCCR_TEST | CCCR_MON; 1324 test |= TEST_LBCK; 1325 } 1326 1327 /* Enable Monitoring (all versions) */ 1328 if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 1329 cccr |= CCCR_MON; 1330 1331 /* Disable Auto Retransmission (all versions) */ 1332 if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) 1333 cccr |= CCCR_DAR; 1334 1335 /* Write config */ 1336 m_can_write(cdev, M_CAN_CCCR, cccr); 1337 m_can_write(cdev, M_CAN_TEST, test); 1338 1339 /* Enable interrupts */ 1340 m_can_write(cdev, M_CAN_IR, IR_ALL_INT); 1341 if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 1342 if (cdev->version == 30) 1343 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1344 ~(IR_ERR_LEC_30X)); 1345 else 1346 m_can_write(cdev, M_CAN_IE, IR_ALL_INT & 1347 ~(IR_ERR_LEC_31X)); 1348 else 1349 m_can_write(cdev, M_CAN_IE, IR_ALL_INT); 1350 1351 /* route all interrupts to INT0 */ 1352 m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0); 1353 1354 /* set bittiming params */ 1355 m_can_set_bittiming(dev); 1356 1357 /* enable internal timestamp generation, with a prescalar of 16. The 1358 * prescalar is applied to the nominal bit timing 1359 */ 1360 m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf)); 1361 1362 m_can_config_endisable(cdev, false); 1363 1364 if (cdev->ops->init) 1365 cdev->ops->init(cdev); 1366 } 1367 1368 static void m_can_start(struct net_device *dev) 1369 { 1370 struct m_can_classdev *cdev = netdev_priv(dev); 1371 1372 /* basic m_can configuration */ 1373 m_can_chip_config(dev); 1374 1375 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 1376 1377 m_can_enable_all_interrupts(cdev); 1378 } 1379 1380 static int m_can_set_mode(struct net_device *dev, enum can_mode mode) 1381 { 1382 switch (mode) { 1383 case CAN_MODE_START: 1384 m_can_clean(dev); 1385 m_can_start(dev); 1386 netif_wake_queue(dev); 1387 break; 1388 default: 1389 return -EOPNOTSUPP; 1390 } 1391 1392 return 0; 1393 } 1394 1395 /* Checks core release number of M_CAN 1396 * returns 0 if an unsupported device is detected 1397 * else it returns the release and step coded as: 1398 * return value = 10 * <release> + 1 * <step> 1399 */ 1400 static int m_can_check_core_release(struct m_can_classdev *cdev) 1401 { 1402 u32 crel_reg; 1403 u8 rel; 1404 u8 step; 1405 int res; 1406 1407 /* Read Core Release Version and split into version number 1408 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1; 1409 */ 1410 crel_reg = m_can_read(cdev, M_CAN_CREL); 1411 rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg); 1412 step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg); 1413 1414 if (rel == 3) { 1415 /* M_CAN v3.x.y: create return value */ 1416 res = 30 + step; 1417 } else { 1418 /* Unsupported M_CAN version */ 1419 res = 0; 1420 } 1421 1422 return res; 1423 } 1424 1425 /* Selectable Non ISO support only in version 3.2.x 1426 * This function checks if the bit is writable. 1427 */ 1428 static bool m_can_niso_supported(struct m_can_classdev *cdev) 1429 { 1430 u32 cccr_reg, cccr_poll = 0; 1431 int niso_timeout = -ETIMEDOUT; 1432 int i; 1433 1434 m_can_config_endisable(cdev, true); 1435 cccr_reg = m_can_read(cdev, M_CAN_CCCR); 1436 cccr_reg |= CCCR_NISO; 1437 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1438 1439 for (i = 0; i <= 10; i++) { 1440 cccr_poll = m_can_read(cdev, M_CAN_CCCR); 1441 if (cccr_poll == cccr_reg) { 1442 niso_timeout = 0; 1443 break; 1444 } 1445 1446 usleep_range(1, 5); 1447 } 1448 1449 /* Clear NISO */ 1450 cccr_reg &= ~(CCCR_NISO); 1451 m_can_write(cdev, M_CAN_CCCR, cccr_reg); 1452 1453 m_can_config_endisable(cdev, false); 1454 1455 /* return false if time out (-ETIMEDOUT), else return true */ 1456 return !niso_timeout; 1457 } 1458 1459 static int m_can_dev_setup(struct m_can_classdev *cdev) 1460 { 1461 struct net_device *dev = cdev->net; 1462 int m_can_version, err; 1463 1464 m_can_version = m_can_check_core_release(cdev); 1465 /* return if unsupported version */ 1466 if (!m_can_version) { 1467 dev_err(cdev->dev, "Unsupported version number: %2d", 1468 m_can_version); 1469 return -EINVAL; 1470 } 1471 1472 if (!cdev->is_peripheral) 1473 netif_napi_add(dev, &cdev->napi, 1474 m_can_poll, NAPI_POLL_WEIGHT); 1475 1476 /* Shared properties of all M_CAN versions */ 1477 cdev->version = m_can_version; 1478 cdev->can.do_set_mode = m_can_set_mode; 1479 cdev->can.do_get_berr_counter = m_can_get_berr_counter; 1480 1481 /* Set M_CAN supported operations */ 1482 cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 1483 CAN_CTRLMODE_LISTENONLY | 1484 CAN_CTRLMODE_BERR_REPORTING | 1485 CAN_CTRLMODE_FD | 1486 CAN_CTRLMODE_ONE_SHOT; 1487 1488 /* Set properties depending on M_CAN version */ 1489 switch (cdev->version) { 1490 case 30: 1491 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ 1492 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1493 if (err) 1494 return err; 1495 cdev->can.bittiming_const = cdev->bit_timing ? 1496 cdev->bit_timing : &m_can_bittiming_const_30X; 1497 1498 cdev->can.data_bittiming_const = cdev->data_timing ? 1499 cdev->data_timing : 1500 &m_can_data_bittiming_const_30X; 1501 break; 1502 case 31: 1503 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ 1504 err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); 1505 if (err) 1506 return err; 1507 cdev->can.bittiming_const = cdev->bit_timing ? 1508 cdev->bit_timing : &m_can_bittiming_const_31X; 1509 1510 cdev->can.data_bittiming_const = cdev->data_timing ? 1511 cdev->data_timing : 1512 &m_can_data_bittiming_const_31X; 1513 break; 1514 case 32: 1515 case 33: 1516 /* Support both MCAN version v3.2.x and v3.3.0 */ 1517 cdev->can.bittiming_const = cdev->bit_timing ? 1518 cdev->bit_timing : &m_can_bittiming_const_31X; 1519 1520 cdev->can.data_bittiming_const = cdev->data_timing ? 1521 cdev->data_timing : 1522 &m_can_data_bittiming_const_31X; 1523 1524 cdev->can.ctrlmode_supported |= 1525 (m_can_niso_supported(cdev) ? 1526 CAN_CTRLMODE_FD_NON_ISO : 0); 1527 break; 1528 default: 1529 dev_err(cdev->dev, "Unsupported version number: %2d", 1530 cdev->version); 1531 return -EINVAL; 1532 } 1533 1534 if (cdev->ops->init) 1535 cdev->ops->init(cdev); 1536 1537 return 0; 1538 } 1539 1540 static void m_can_stop(struct net_device *dev) 1541 { 1542 struct m_can_classdev *cdev = netdev_priv(dev); 1543 1544 /* disable all interrupts */ 1545 m_can_disable_all_interrupts(cdev); 1546 1547 /* Set init mode to disengage from the network */ 1548 m_can_config_endisable(cdev, true); 1549 1550 /* set the state as STOPPED */ 1551 cdev->can.state = CAN_STATE_STOPPED; 1552 } 1553 1554 static int m_can_close(struct net_device *dev) 1555 { 1556 struct m_can_classdev *cdev = netdev_priv(dev); 1557 1558 netif_stop_queue(dev); 1559 1560 if (!cdev->is_peripheral) 1561 napi_disable(&cdev->napi); 1562 1563 m_can_stop(dev); 1564 m_can_clk_stop(cdev); 1565 free_irq(dev->irq, dev); 1566 1567 if (cdev->is_peripheral) { 1568 cdev->tx_skb = NULL; 1569 destroy_workqueue(cdev->tx_wq); 1570 cdev->tx_wq = NULL; 1571 } 1572 1573 if (cdev->is_peripheral) 1574 can_rx_offload_disable(&cdev->offload); 1575 1576 close_candev(dev); 1577 can_led_event(dev, CAN_LED_EVENT_STOP); 1578 1579 phy_power_off(cdev->transceiver); 1580 1581 return 0; 1582 } 1583 1584 static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx) 1585 { 1586 struct m_can_classdev *cdev = netdev_priv(dev); 1587 /*get wrap around for loopback skb index */ 1588 unsigned int wrap = cdev->can.echo_skb_max; 1589 int next_idx; 1590 1591 /* calculate next index */ 1592 next_idx = (++putidx >= wrap ? 0 : putidx); 1593 1594 /* check if occupied */ 1595 return !!cdev->can.echo_skb[next_idx]; 1596 } 1597 1598 static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) 1599 { 1600 struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data; 1601 struct net_device *dev = cdev->net; 1602 struct sk_buff *skb = cdev->tx_skb; 1603 struct id_and_dlc fifo_header; 1604 u32 cccr, fdflags; 1605 int err; 1606 int putidx; 1607 1608 cdev->tx_skb = NULL; 1609 1610 /* Generate ID field for TX buffer Element */ 1611 /* Common to all supported M_CAN versions */ 1612 if (cf->can_id & CAN_EFF_FLAG) { 1613 fifo_header.id = cf->can_id & CAN_EFF_MASK; 1614 fifo_header.id |= TX_BUF_XTD; 1615 } else { 1616 fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18); 1617 } 1618 1619 if (cf->can_id & CAN_RTR_FLAG) 1620 fifo_header.id |= TX_BUF_RTR; 1621 1622 if (cdev->version == 30) { 1623 netif_stop_queue(dev); 1624 1625 fifo_header.dlc = can_fd_len2dlc(cf->len) << 16; 1626 1627 /* Write the frame ID, DLC, and payload to the FIFO element. */ 1628 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2); 1629 if (err) 1630 goto out_fail; 1631 1632 err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA, 1633 cf->data, DIV_ROUND_UP(cf->len, 4)); 1634 if (err) 1635 goto out_fail; 1636 1637 if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { 1638 cccr = m_can_read(cdev, M_CAN_CCCR); 1639 cccr &= ~CCCR_CMR_MASK; 1640 if (can_is_canfd_skb(skb)) { 1641 if (cf->flags & CANFD_BRS) 1642 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1643 CCCR_CMR_CANFD_BRS); 1644 else 1645 cccr |= FIELD_PREP(CCCR_CMR_MASK, 1646 CCCR_CMR_CANFD); 1647 } else { 1648 cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN); 1649 } 1650 m_can_write(cdev, M_CAN_CCCR, cccr); 1651 } 1652 m_can_write(cdev, M_CAN_TXBTIE, 0x1); 1653 1654 can_put_echo_skb(skb, dev, 0, 0); 1655 1656 m_can_write(cdev, M_CAN_TXBAR, 0x1); 1657 /* End of xmit function for version 3.0.x */ 1658 } else { 1659 /* Transmit routine for version >= v3.1.x */ 1660 1661 /* Check if FIFO full */ 1662 if (m_can_tx_fifo_full(cdev)) { 1663 /* This shouldn't happen */ 1664 netif_stop_queue(dev); 1665 netdev_warn(dev, 1666 "TX queue active although FIFO is full."); 1667 1668 if (cdev->is_peripheral) { 1669 kfree_skb(skb); 1670 dev->stats.tx_dropped++; 1671 return NETDEV_TX_OK; 1672 } else { 1673 return NETDEV_TX_BUSY; 1674 } 1675 } 1676 1677 /* get put index for frame */ 1678 putidx = FIELD_GET(TXFQS_TFQPI_MASK, 1679 m_can_read(cdev, M_CAN_TXFQS)); 1680 1681 /* Construct DLC Field, with CAN-FD configuration. 1682 * Use the put index of the fifo as the message marker, 1683 * used in the TX interrupt for sending the correct echo frame. 1684 */ 1685 1686 /* get CAN FD configuration of frame */ 1687 fdflags = 0; 1688 if (can_is_canfd_skb(skb)) { 1689 fdflags |= TX_BUF_FDF; 1690 if (cf->flags & CANFD_BRS) 1691 fdflags |= TX_BUF_BRS; 1692 } 1693 1694 fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) | 1695 FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) | 1696 fdflags | TX_BUF_EFC; 1697 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2); 1698 if (err) 1699 goto out_fail; 1700 1701 err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA, 1702 cf->data, DIV_ROUND_UP(cf->len, 4)); 1703 if (err) 1704 goto out_fail; 1705 1706 /* Push loopback echo. 1707 * Will be looped back on TX interrupt based on message marker 1708 */ 1709 can_put_echo_skb(skb, dev, putidx, 0); 1710 1711 /* Enable TX FIFO element to start transfer */ 1712 m_can_write(cdev, M_CAN_TXBAR, (1 << putidx)); 1713 1714 /* stop network queue if fifo full */ 1715 if (m_can_tx_fifo_full(cdev) || 1716 m_can_next_echo_skb_occupied(dev, putidx)) 1717 netif_stop_queue(dev); 1718 } 1719 1720 return NETDEV_TX_OK; 1721 1722 out_fail: 1723 netdev_err(dev, "FIFO write returned %d\n", err); 1724 m_can_disable_all_interrupts(cdev); 1725 return NETDEV_TX_BUSY; 1726 } 1727 1728 static void m_can_tx_work_queue(struct work_struct *ws) 1729 { 1730 struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, 1731 tx_work); 1732 1733 m_can_tx_handler(cdev); 1734 } 1735 1736 static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, 1737 struct net_device *dev) 1738 { 1739 struct m_can_classdev *cdev = netdev_priv(dev); 1740 1741 if (can_dropped_invalid_skb(dev, skb)) 1742 return NETDEV_TX_OK; 1743 1744 if (cdev->is_peripheral) { 1745 if (cdev->tx_skb) { 1746 netdev_err(dev, "hard_xmit called while tx busy\n"); 1747 return NETDEV_TX_BUSY; 1748 } 1749 1750 if (cdev->can.state == CAN_STATE_BUS_OFF) { 1751 m_can_clean(dev); 1752 } else { 1753 /* Need to stop the queue to avoid numerous requests 1754 * from being sent. Suggested improvement is to create 1755 * a queueing mechanism that will queue the skbs and 1756 * process them in order. 1757 */ 1758 cdev->tx_skb = skb; 1759 netif_stop_queue(cdev->net); 1760 queue_work(cdev->tx_wq, &cdev->tx_work); 1761 } 1762 } else { 1763 cdev->tx_skb = skb; 1764 return m_can_tx_handler(cdev); 1765 } 1766 1767 return NETDEV_TX_OK; 1768 } 1769 1770 static int m_can_open(struct net_device *dev) 1771 { 1772 struct m_can_classdev *cdev = netdev_priv(dev); 1773 int err; 1774 1775 err = phy_power_on(cdev->transceiver); 1776 if (err) 1777 return err; 1778 1779 err = m_can_clk_start(cdev); 1780 if (err) 1781 goto out_phy_power_off; 1782 1783 /* open the can device */ 1784 err = open_candev(dev); 1785 if (err) { 1786 netdev_err(dev, "failed to open can device\n"); 1787 goto exit_disable_clks; 1788 } 1789 1790 if (cdev->is_peripheral) 1791 can_rx_offload_enable(&cdev->offload); 1792 1793 /* register interrupt handler */ 1794 if (cdev->is_peripheral) { 1795 cdev->tx_skb = NULL; 1796 cdev->tx_wq = alloc_workqueue("mcan_wq", 1797 WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); 1798 if (!cdev->tx_wq) { 1799 err = -ENOMEM; 1800 goto out_wq_fail; 1801 } 1802 1803 INIT_WORK(&cdev->tx_work, m_can_tx_work_queue); 1804 1805 err = request_threaded_irq(dev->irq, NULL, m_can_isr, 1806 IRQF_ONESHOT, 1807 dev->name, dev); 1808 } else { 1809 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, 1810 dev); 1811 } 1812 1813 if (err < 0) { 1814 netdev_err(dev, "failed to request interrupt\n"); 1815 goto exit_irq_fail; 1816 } 1817 1818 /* start the m_can controller */ 1819 m_can_start(dev); 1820 1821 can_led_event(dev, CAN_LED_EVENT_OPEN); 1822 1823 if (!cdev->is_peripheral) 1824 napi_enable(&cdev->napi); 1825 1826 netif_start_queue(dev); 1827 1828 return 0; 1829 1830 exit_irq_fail: 1831 if (cdev->is_peripheral) 1832 destroy_workqueue(cdev->tx_wq); 1833 out_wq_fail: 1834 if (cdev->is_peripheral) 1835 can_rx_offload_disable(&cdev->offload); 1836 close_candev(dev); 1837 exit_disable_clks: 1838 m_can_clk_stop(cdev); 1839 out_phy_power_off: 1840 phy_power_off(cdev->transceiver); 1841 return err; 1842 } 1843 1844 static const struct net_device_ops m_can_netdev_ops = { 1845 .ndo_open = m_can_open, 1846 .ndo_stop = m_can_close, 1847 .ndo_start_xmit = m_can_start_xmit, 1848 .ndo_change_mtu = can_change_mtu, 1849 }; 1850 1851 static int register_m_can_dev(struct net_device *dev) 1852 { 1853 dev->flags |= IFF_ECHO; /* we support local echo */ 1854 dev->netdev_ops = &m_can_netdev_ops; 1855 1856 return register_candev(dev); 1857 } 1858 1859 static void m_can_of_parse_mram(struct m_can_classdev *cdev, 1860 const u32 *mram_config_vals) 1861 { 1862 cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; 1863 cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; 1864 cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + 1865 cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; 1866 cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; 1867 cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + 1868 cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; 1869 cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & 1870 FIELD_MAX(RXFC_FS_MASK); 1871 cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + 1872 cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; 1873 cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & 1874 FIELD_MAX(RXFC_FS_MASK); 1875 cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + 1876 cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; 1877 cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; 1878 cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + 1879 cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; 1880 cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; 1881 cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + 1882 cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; 1883 cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & 1884 FIELD_MAX(TXBC_NDTB_MASK); 1885 1886 dev_dbg(cdev->dev, 1887 "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", 1888 cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num, 1889 cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num, 1890 cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num, 1891 cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num, 1892 cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num, 1893 cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num, 1894 cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num); 1895 } 1896 1897 int m_can_init_ram(struct m_can_classdev *cdev) 1898 { 1899 int end, i, start; 1900 int err = 0; 1901 1902 /* initialize the entire Message RAM in use to avoid possible 1903 * ECC/parity checksum errors when reading an uninitialized buffer 1904 */ 1905 start = cdev->mcfg[MRAM_SIDF].off; 1906 end = cdev->mcfg[MRAM_TXB].off + 1907 cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; 1908 1909 for (i = start; i < end; i += 4) { 1910 err = m_can_fifo_write_no_off(cdev, i, 0x0); 1911 if (err) 1912 break; 1913 } 1914 1915 return err; 1916 } 1917 EXPORT_SYMBOL_GPL(m_can_init_ram); 1918 1919 int m_can_class_get_clocks(struct m_can_classdev *cdev) 1920 { 1921 int ret = 0; 1922 1923 cdev->hclk = devm_clk_get(cdev->dev, "hclk"); 1924 cdev->cclk = devm_clk_get(cdev->dev, "cclk"); 1925 1926 if (IS_ERR(cdev->cclk)) { 1927 dev_err(cdev->dev, "no clock found\n"); 1928 ret = -ENODEV; 1929 } 1930 1931 return ret; 1932 } 1933 EXPORT_SYMBOL_GPL(m_can_class_get_clocks); 1934 1935 struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, 1936 int sizeof_priv) 1937 { 1938 struct m_can_classdev *class_dev = NULL; 1939 u32 mram_config_vals[MRAM_CFG_LEN]; 1940 struct net_device *net_dev; 1941 u32 tx_fifo_size; 1942 int ret; 1943 1944 ret = fwnode_property_read_u32_array(dev_fwnode(dev), 1945 "bosch,mram-cfg", 1946 mram_config_vals, 1947 sizeof(mram_config_vals) / 4); 1948 if (ret) { 1949 dev_err(dev, "Could not get Message RAM configuration."); 1950 goto out; 1951 } 1952 1953 /* Get TX FIFO size 1954 * Defines the total amount of echo buffers for loopback 1955 */ 1956 tx_fifo_size = mram_config_vals[7]; 1957 1958 /* allocate the m_can device */ 1959 net_dev = alloc_candev(sizeof_priv, tx_fifo_size); 1960 if (!net_dev) { 1961 dev_err(dev, "Failed to allocate CAN device"); 1962 goto out; 1963 } 1964 1965 class_dev = netdev_priv(net_dev); 1966 class_dev->net = net_dev; 1967 class_dev->dev = dev; 1968 SET_NETDEV_DEV(net_dev, dev); 1969 1970 m_can_of_parse_mram(class_dev, mram_config_vals); 1971 out: 1972 return class_dev; 1973 } 1974 EXPORT_SYMBOL_GPL(m_can_class_allocate_dev); 1975 1976 void m_can_class_free_dev(struct net_device *net) 1977 { 1978 free_candev(net); 1979 } 1980 EXPORT_SYMBOL_GPL(m_can_class_free_dev); 1981 1982 int m_can_class_register(struct m_can_classdev *cdev) 1983 { 1984 int ret; 1985 1986 if (cdev->pm_clock_support) { 1987 ret = m_can_clk_start(cdev); 1988 if (ret) 1989 return ret; 1990 } 1991 1992 if (cdev->is_peripheral) { 1993 ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, 1994 NAPI_POLL_WEIGHT); 1995 if (ret) 1996 goto clk_disable; 1997 } 1998 1999 ret = m_can_dev_setup(cdev); 2000 if (ret) 2001 goto rx_offload_del; 2002 2003 ret = register_m_can_dev(cdev->net); 2004 if (ret) { 2005 dev_err(cdev->dev, "registering %s failed (err=%d)\n", 2006 cdev->net->name, ret); 2007 goto rx_offload_del; 2008 } 2009 2010 devm_can_led_init(cdev->net); 2011 2012 of_can_transceiver(cdev->net); 2013 2014 dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", 2015 KBUILD_MODNAME, cdev->net->irq, cdev->version); 2016 2017 /* Probe finished 2018 * Stop clocks. They will be reactivated once the M_CAN device is opened 2019 */ 2020 m_can_clk_stop(cdev); 2021 2022 return 0; 2023 2024 rx_offload_del: 2025 if (cdev->is_peripheral) 2026 can_rx_offload_del(&cdev->offload); 2027 clk_disable: 2028 m_can_clk_stop(cdev); 2029 2030 return ret; 2031 } 2032 EXPORT_SYMBOL_GPL(m_can_class_register); 2033 2034 void m_can_class_unregister(struct m_can_classdev *cdev) 2035 { 2036 if (cdev->is_peripheral) 2037 can_rx_offload_del(&cdev->offload); 2038 unregister_candev(cdev->net); 2039 } 2040 EXPORT_SYMBOL_GPL(m_can_class_unregister); 2041 2042 int m_can_class_suspend(struct device *dev) 2043 { 2044 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2045 struct net_device *ndev = cdev->net; 2046 2047 if (netif_running(ndev)) { 2048 netif_stop_queue(ndev); 2049 netif_device_detach(ndev); 2050 m_can_stop(ndev); 2051 m_can_clk_stop(cdev); 2052 } 2053 2054 pinctrl_pm_select_sleep_state(dev); 2055 2056 cdev->can.state = CAN_STATE_SLEEPING; 2057 2058 return 0; 2059 } 2060 EXPORT_SYMBOL_GPL(m_can_class_suspend); 2061 2062 int m_can_class_resume(struct device *dev) 2063 { 2064 struct m_can_classdev *cdev = dev_get_drvdata(dev); 2065 struct net_device *ndev = cdev->net; 2066 2067 pinctrl_pm_select_default_state(dev); 2068 2069 cdev->can.state = CAN_STATE_ERROR_ACTIVE; 2070 2071 if (netif_running(ndev)) { 2072 int ret; 2073 2074 ret = m_can_clk_start(cdev); 2075 if (ret) 2076 return ret; 2077 2078 m_can_init_ram(cdev); 2079 m_can_start(ndev); 2080 netif_device_attach(ndev); 2081 netif_start_queue(ndev); 2082 } 2083 2084 return 0; 2085 } 2086 EXPORT_SYMBOL_GPL(m_can_class_resume); 2087 2088 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); 2089 MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); 2090 MODULE_LICENSE("GPL v2"); 2091 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); 2092