1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * at91_can.c - CAN network driver for AT91 SoC CAN controller 4 * 5 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> 6 * (C) 2008, 2009, 2010, 2011, 2023 by Marc Kleine-Budde <kernel@pengutronix.de> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/clk.h> 11 #include <linux/errno.h> 12 #include <linux/ethtool.h> 13 #include <linux/if_arp.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/of.h> 19 #include <linux/phy/phy.h> 20 #include <linux/platform_device.h> 21 #include <linux/rtnetlink.h> 22 #include <linux/skbuff.h> 23 #include <linux/spinlock.h> 24 #include <linux/string.h> 25 #include <linux/types.h> 26 27 #include <linux/can/dev.h> 28 #include <linux/can/error.h> 29 #include <linux/can/rx-offload.h> 30 31 #define AT91_MB_MASK(i) ((1 << (i)) - 1) 32 33 /* Common registers */ 34 enum at91_reg { 35 AT91_MR = 0x000, 36 AT91_IER = 0x004, 37 AT91_IDR = 0x008, 38 AT91_IMR = 0x00C, 39 AT91_SR = 0x010, 40 AT91_BR = 0x014, 41 AT91_TIM = 0x018, 42 AT91_TIMESTP = 0x01C, 43 AT91_ECR = 0x020, 44 AT91_TCR = 0x024, 45 AT91_ACR = 0x028, 46 }; 47 48 /* Mailbox registers (0 <= i <= 15) */ 49 #define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) 50 #define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) 51 #define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) 52 #define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) 53 #define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) 54 #define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) 55 #define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) 56 #define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) 57 58 /* Register bits */ 59 #define AT91_MR_CANEN BIT(0) 60 #define AT91_MR_LPM BIT(1) 61 #define AT91_MR_ABM BIT(2) 62 #define AT91_MR_OVL BIT(3) 63 #define AT91_MR_TEOF BIT(4) 64 #define AT91_MR_TTM BIT(5) 65 #define AT91_MR_TIMFRZ BIT(6) 66 #define AT91_MR_DRPT BIT(7) 67 68 #define AT91_SR_RBSY BIT(29) 69 #define AT91_SR_TBSY BIT(30) 70 #define AT91_SR_OVLSY BIT(31) 71 72 #define AT91_BR_PHASE2_MASK GENMASK(2, 0) 73 #define AT91_BR_PHASE1_MASK GENMASK(6, 4) 74 #define AT91_BR_PROPAG_MASK GENMASK(10, 8) 75 #define AT91_BR_SJW_MASK GENMASK(13, 12) 76 #define AT91_BR_BRP_MASK GENMASK(22, 16) 77 #define AT91_BR_SMP BIT(24) 78 79 #define AT91_TIM_TIMER_MASK GENMASK(15, 0) 80 81 #define AT91_ECR_REC_MASK GENMASK(8, 0) 82 #define AT91_ECR_TEC_MASK GENMASK(23, 16) 83 84 #define AT91_TCR_TIMRST BIT(31) 85 86 #define AT91_MMR_MTIMEMARK_MASK GENMASK(15, 0) 87 #define AT91_MMR_PRIOR_MASK GENMASK(19, 16) 88 #define AT91_MMR_MOT_MASK GENMASK(26, 24) 89 90 #define AT91_MID_MIDVB_MASK GENMASK(17, 0) 91 #define AT91_MID_MIDVA_MASK GENMASK(28, 18) 92 #define AT91_MID_MIDE BIT(29) 93 94 #define AT91_MSR_MTIMESTAMP_MASK GENMASK(15, 0) 95 #define AT91_MSR_MDLC_MASK GENMASK(19, 16) 96 #define AT91_MSR_MRTR BIT(20) 97 #define AT91_MSR_MABT BIT(22) 98 #define AT91_MSR_MRDY BIT(23) 99 #define AT91_MSR_MMI BIT(24) 100 101 #define AT91_MCR_MDLC_MASK GENMASK(19, 16) 102 #define AT91_MCR_MRTR BIT(20) 103 #define AT91_MCR_MACR BIT(22) 104 #define AT91_MCR_MTCR BIT(23) 105 106 /* Mailbox Modes */ 107 enum at91_mb_mode { 108 AT91_MB_MODE_DISABLED = 0, 109 AT91_MB_MODE_RX = 1, 110 AT91_MB_MODE_RX_OVRWR = 2, 111 AT91_MB_MODE_TX = 3, 112 AT91_MB_MODE_CONSUMER = 4, 113 AT91_MB_MODE_PRODUCER = 5, 114 }; 115 116 /* Interrupt mask bits */ 117 #define AT91_IRQ_ERRA BIT(16) 118 #define AT91_IRQ_WARN BIT(17) 119 #define AT91_IRQ_ERRP BIT(18) 120 #define AT91_IRQ_BOFF BIT(19) 121 #define AT91_IRQ_SLEEP BIT(20) 122 #define AT91_IRQ_WAKEUP BIT(21) 123 #define AT91_IRQ_TOVF BIT(22) 124 #define AT91_IRQ_TSTP BIT(23) 125 #define AT91_IRQ_CERR BIT(24) 126 #define AT91_IRQ_SERR BIT(25) 127 #define AT91_IRQ_AERR BIT(26) 128 #define AT91_IRQ_FERR BIT(27) 129 #define AT91_IRQ_BERR BIT(28) 130 131 #define AT91_IRQ_ERR_ALL (0x1fff0000) 132 #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ 133 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) 134 #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ 135 AT91_IRQ_ERRP | AT91_IRQ_BOFF) 136 137 #define AT91_IRQ_ALL (0x1fffffff) 138 139 enum at91_devtype { 140 AT91_DEVTYPE_SAM9263, 141 AT91_DEVTYPE_SAM9X5, 142 }; 143 144 struct at91_devtype_data { 145 unsigned int rx_first; 146 unsigned int rx_last; 147 unsigned int tx_shift; 148 enum at91_devtype type; 149 }; 150 151 struct at91_priv { 152 struct can_priv can; /* must be the first member! */ 153 struct can_rx_offload offload; 154 struct phy *transceiver; 155 156 void __iomem *reg_base; 157 158 unsigned int tx_head; 159 unsigned int tx_tail; 160 struct at91_devtype_data devtype_data; 161 162 struct clk *clk; 163 struct at91_can_data *pdata; 164 165 canid_t mb0_id; 166 }; 167 168 static inline struct at91_priv *rx_offload_to_priv(struct can_rx_offload *offload) 169 { 170 return container_of(offload, struct at91_priv, offload); 171 } 172 173 static const struct at91_devtype_data at91_at91sam9263_data = { 174 .rx_first = 1, 175 .rx_last = 11, 176 .tx_shift = 2, 177 .type = AT91_DEVTYPE_SAM9263, 178 }; 179 180 static const struct at91_devtype_data at91_at91sam9x5_data = { 181 .rx_first = 0, 182 .rx_last = 5, 183 .tx_shift = 1, 184 .type = AT91_DEVTYPE_SAM9X5, 185 }; 186 187 static const struct can_bittiming_const at91_bittiming_const = { 188 .name = KBUILD_MODNAME, 189 .tseg1_min = 4, 190 .tseg1_max = 16, 191 .tseg2_min = 2, 192 .tseg2_max = 8, 193 .sjw_max = 4, 194 .brp_min = 2, 195 .brp_max = 128, 196 .brp_inc = 1, 197 }; 198 199 #define AT91_IS(_model) \ 200 static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \ 201 { \ 202 return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \ 203 } 204 205 AT91_IS(9263); 206 AT91_IS(9X5); 207 208 static inline unsigned int get_mb_rx_first(const struct at91_priv *priv) 209 { 210 return priv->devtype_data.rx_first; 211 } 212 213 static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) 214 { 215 return priv->devtype_data.rx_last; 216 } 217 218 static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) 219 { 220 return priv->devtype_data.tx_shift; 221 } 222 223 static inline unsigned int get_mb_tx_num(const struct at91_priv *priv) 224 { 225 return 1 << get_mb_tx_shift(priv); 226 } 227 228 static inline unsigned int get_mb_tx_first(const struct at91_priv *priv) 229 { 230 return get_mb_rx_last(priv) + 1; 231 } 232 233 static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) 234 { 235 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; 236 } 237 238 static inline unsigned int get_head_prio_shift(const struct at91_priv *priv) 239 { 240 return get_mb_tx_shift(priv); 241 } 242 243 static inline unsigned int get_head_prio_mask(const struct at91_priv *priv) 244 { 245 return 0xf << get_mb_tx_shift(priv); 246 } 247 248 static inline unsigned int get_head_mb_mask(const struct at91_priv *priv) 249 { 250 return AT91_MB_MASK(get_mb_tx_shift(priv)); 251 } 252 253 static inline unsigned int get_head_mask(const struct at91_priv *priv) 254 { 255 return get_head_mb_mask(priv) | get_head_prio_mask(priv); 256 } 257 258 static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) 259 { 260 return AT91_MB_MASK(get_mb_rx_last(priv) + 1) & 261 ~AT91_MB_MASK(get_mb_rx_first(priv)); 262 } 263 264 static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) 265 { 266 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) & 267 ~AT91_MB_MASK(get_mb_tx_first(priv)); 268 } 269 270 static inline unsigned int get_tx_head_mb(const struct at91_priv *priv) 271 { 272 return (priv->tx_head & get_head_mb_mask(priv)) + get_mb_tx_first(priv); 273 } 274 275 static inline unsigned int get_tx_head_prio(const struct at91_priv *priv) 276 { 277 return (priv->tx_head >> get_head_prio_shift(priv)) & 0xf; 278 } 279 280 static inline unsigned int get_tx_tail_mb(const struct at91_priv *priv) 281 { 282 return (priv->tx_tail & get_head_mb_mask(priv)) + get_mb_tx_first(priv); 283 } 284 285 static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) 286 { 287 return readl_relaxed(priv->reg_base + reg); 288 } 289 290 static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, 291 u32 value) 292 { 293 writel_relaxed(value, priv->reg_base + reg); 294 } 295 296 static inline void set_mb_mode_prio(const struct at91_priv *priv, 297 unsigned int mb, enum at91_mb_mode mode, 298 u8 prio) 299 { 300 const u32 reg_mmr = FIELD_PREP(AT91_MMR_MOT_MASK, mode) | 301 FIELD_PREP(AT91_MMR_PRIOR_MASK, prio); 302 303 at91_write(priv, AT91_MMR(mb), reg_mmr); 304 } 305 306 static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, 307 enum at91_mb_mode mode) 308 { 309 set_mb_mode_prio(priv, mb, mode, 0); 310 } 311 312 static inline u32 at91_can_id_to_reg_mid(canid_t can_id) 313 { 314 u32 reg_mid; 315 316 if (can_id & CAN_EFF_FLAG) 317 reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, can_id) | 318 AT91_MID_MIDE; 319 else 320 reg_mid = FIELD_PREP(AT91_MID_MIDVA_MASK, can_id); 321 322 return reg_mid; 323 } 324 325 static void at91_setup_mailboxes(struct net_device *dev) 326 { 327 struct at91_priv *priv = netdev_priv(dev); 328 unsigned int i; 329 u32 reg_mid; 330 331 /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first 332 * mailbox is disabled. The next mailboxes are used as a 333 * reception FIFO. The last of the RX mailboxes is configured with 334 * overwrite option. The overwrite flag indicates a FIFO 335 * overflow. 336 */ 337 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); 338 for (i = 0; i < get_mb_rx_first(priv); i++) { 339 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); 340 at91_write(priv, AT91_MID(i), reg_mid); 341 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ 342 } 343 344 for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++) 345 set_mb_mode(priv, i, AT91_MB_MODE_RX); 346 set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR); 347 348 /* reset acceptance mask and id register */ 349 for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) { 350 at91_write(priv, AT91_MAM(i), 0x0); 351 at91_write(priv, AT91_MID(i), AT91_MID_MIDE); 352 } 353 354 /* The last mailboxes are used for transmitting. */ 355 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) 356 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 357 358 /* Reset tx helper pointers */ 359 priv->tx_head = priv->tx_tail = 0; 360 } 361 362 static int at91_set_bittiming(struct net_device *dev) 363 { 364 const struct at91_priv *priv = netdev_priv(dev); 365 const struct can_bittiming *bt = &priv->can.bittiming; 366 u32 reg_br = 0; 367 368 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) 369 reg_br |= AT91_BR_SMP; 370 371 reg_br |= FIELD_PREP(AT91_BR_BRP_MASK, bt->brp - 1) | 372 FIELD_PREP(AT91_BR_SJW_MASK, bt->sjw - 1) | 373 FIELD_PREP(AT91_BR_PROPAG_MASK, bt->prop_seg - 1) | 374 FIELD_PREP(AT91_BR_PHASE1_MASK, bt->phase_seg1 - 1) | 375 FIELD_PREP(AT91_BR_PHASE2_MASK, bt->phase_seg2 - 1); 376 377 netdev_dbg(dev, "writing AT91_BR: 0x%08x\n", reg_br); 378 379 at91_write(priv, AT91_BR, reg_br); 380 381 return 0; 382 } 383 384 static int at91_get_berr_counter(const struct net_device *dev, 385 struct can_berr_counter *bec) 386 { 387 const struct at91_priv *priv = netdev_priv(dev); 388 u32 reg_ecr = at91_read(priv, AT91_ECR); 389 390 bec->rxerr = FIELD_GET(AT91_ECR_REC_MASK, reg_ecr); 391 bec->txerr = FIELD_GET(AT91_ECR_TEC_MASK, reg_ecr); 392 393 return 0; 394 } 395 396 static void at91_chip_start(struct net_device *dev) 397 { 398 struct at91_priv *priv = netdev_priv(dev); 399 u32 reg_mr, reg_ier; 400 401 /* disable interrupts */ 402 at91_write(priv, AT91_IDR, AT91_IRQ_ALL); 403 404 /* disable chip */ 405 reg_mr = at91_read(priv, AT91_MR); 406 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); 407 408 at91_set_bittiming(dev); 409 at91_setup_mailboxes(dev); 410 411 /* enable chip */ 412 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 413 reg_mr = AT91_MR_CANEN | AT91_MR_ABM; 414 else 415 reg_mr = AT91_MR_CANEN; 416 at91_write(priv, AT91_MR, reg_mr); 417 418 priv->can.state = CAN_STATE_ERROR_ACTIVE; 419 420 /* Dummy read to clear latched line error interrupts on 421 * sam9x5 and newer SoCs. 422 */ 423 at91_read(priv, AT91_SR); 424 425 /* Enable interrupts */ 426 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERR_LINE | AT91_IRQ_ERR_FRAME; 427 at91_write(priv, AT91_IER, reg_ier); 428 } 429 430 static void at91_chip_stop(struct net_device *dev, enum can_state state) 431 { 432 struct at91_priv *priv = netdev_priv(dev); 433 u32 reg_mr; 434 435 /* Abort any pending TX requests. However this doesn't seem to 436 * work in case of bus-off on sama5d3. 437 */ 438 at91_write(priv, AT91_ACR, get_irq_mb_tx(priv)); 439 440 /* disable interrupts */ 441 at91_write(priv, AT91_IDR, AT91_IRQ_ALL); 442 443 reg_mr = at91_read(priv, AT91_MR); 444 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); 445 446 priv->can.state = state; 447 } 448 449 /* theory of operation: 450 * 451 * According to the datasheet priority 0 is the highest priority, 15 452 * is the lowest. If two mailboxes have the same priority level the 453 * message of the mailbox with the lowest number is sent first. 454 * 455 * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then 456 * the next mailbox with prio 0, and so on, until all mailboxes are 457 * used. Then we start from the beginning with mailbox 458 * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1 459 * prio 1. When we reach the last mailbox with prio 15, we have to 460 * stop sending, waiting for all messages to be delivered, then start 461 * again with mailbox AT91_MB_TX_FIRST prio 0. 462 * 463 * We use the priv->tx_head as counter for the next transmission 464 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits 465 * encode the mailbox number, the upper 4 bits the mailbox priority: 466 * 467 * priv->tx_head = (prio << get_next_prio_shift(priv)) | 468 * (mb - get_mb_tx_first(priv)); 469 * 470 */ 471 static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) 472 { 473 struct at91_priv *priv = netdev_priv(dev); 474 struct can_frame *cf = (struct can_frame *)skb->data; 475 unsigned int mb, prio; 476 u32 reg_mid, reg_mcr; 477 478 if (can_dev_dropped_skb(dev, skb)) 479 return NETDEV_TX_OK; 480 481 mb = get_tx_head_mb(priv); 482 prio = get_tx_head_prio(priv); 483 484 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { 485 netif_stop_queue(dev); 486 487 netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); 488 return NETDEV_TX_BUSY; 489 } 490 reg_mid = at91_can_id_to_reg_mid(cf->can_id); 491 492 reg_mcr = FIELD_PREP(AT91_MCR_MDLC_MASK, cf->len) | 493 AT91_MCR_MTCR; 494 495 if (cf->can_id & CAN_RTR_FLAG) 496 reg_mcr |= AT91_MCR_MRTR; 497 498 /* disable MB while writing ID (see datasheet) */ 499 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); 500 at91_write(priv, AT91_MID(mb), reg_mid); 501 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio); 502 503 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0)); 504 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4)); 505 506 /* This triggers transmission */ 507 at91_write(priv, AT91_MCR(mb), reg_mcr); 508 509 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 510 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0); 511 512 /* we have to stop the queue and deliver all messages in case 513 * of a prio+mb counter wrap around. This is the case if 514 * tx_head buffer prio and mailbox equals 0. 515 * 516 * also stop the queue if next buffer is still in use 517 * (== not ready) 518 */ 519 priv->tx_head++; 520 if (!(at91_read(priv, AT91_MSR(get_tx_head_mb(priv))) & 521 AT91_MSR_MRDY) || 522 (priv->tx_head & get_head_mask(priv)) == 0) 523 netif_stop_queue(dev); 524 525 /* Enable interrupt for this mailbox */ 526 at91_write(priv, AT91_IER, 1 << mb); 527 528 return NETDEV_TX_OK; 529 } 530 531 static inline u32 at91_get_timestamp(const struct at91_priv *priv) 532 { 533 return at91_read(priv, AT91_TIM); 534 } 535 536 static inline struct sk_buff * 537 at91_alloc_can_err_skb(struct net_device *dev, 538 struct can_frame **cf, u32 *timestamp) 539 { 540 const struct at91_priv *priv = netdev_priv(dev); 541 542 *timestamp = at91_get_timestamp(priv); 543 544 return alloc_can_err_skb(dev, cf); 545 } 546 547 /** 548 * at91_rx_overflow_err - send error frame due to rx overflow 549 * @dev: net device 550 */ 551 static void at91_rx_overflow_err(struct net_device *dev) 552 { 553 struct net_device_stats *stats = &dev->stats; 554 struct sk_buff *skb; 555 struct at91_priv *priv = netdev_priv(dev); 556 struct can_frame *cf; 557 u32 timestamp; 558 int err; 559 560 netdev_dbg(dev, "RX buffer overflow\n"); 561 stats->rx_over_errors++; 562 stats->rx_errors++; 563 564 skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); 565 if (unlikely(!skb)) 566 return; 567 568 cf->can_id |= CAN_ERR_CRTL; 569 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 570 571 err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); 572 if (err) 573 stats->rx_fifo_errors++; 574 } 575 576 /** 577 * at91_mailbox_read - read CAN msg from mailbox 578 * @offload: rx-offload 579 * @mb: mailbox number to read from 580 * @timestamp: pointer to 32 bit timestamp 581 * @drop: true indicated mailbox to mark as read and drop frame 582 * 583 * Reads a CAN message from the given mailbox if not empty. 584 */ 585 static struct sk_buff *at91_mailbox_read(struct can_rx_offload *offload, 586 unsigned int mb, u32 *timestamp, 587 bool drop) 588 { 589 const struct at91_priv *priv = rx_offload_to_priv(offload); 590 struct can_frame *cf; 591 struct sk_buff *skb; 592 u32 reg_msr, reg_mid; 593 594 reg_msr = at91_read(priv, AT91_MSR(mb)); 595 if (!(reg_msr & AT91_MSR_MRDY)) 596 return NULL; 597 598 if (unlikely(drop)) { 599 skb = ERR_PTR(-ENOBUFS); 600 goto mark_as_read; 601 } 602 603 skb = alloc_can_skb(offload->dev, &cf); 604 if (unlikely(!skb)) { 605 skb = ERR_PTR(-ENOMEM); 606 goto mark_as_read; 607 } 608 609 reg_mid = at91_read(priv, AT91_MID(mb)); 610 if (reg_mid & AT91_MID_MIDE) 611 cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK | AT91_MID_MIDVB_MASK, reg_mid) | 612 CAN_EFF_FLAG; 613 else 614 cf->can_id = FIELD_GET(AT91_MID_MIDVA_MASK, reg_mid); 615 616 /* extend timestamp to full 32 bit */ 617 *timestamp = FIELD_GET(AT91_MSR_MTIMESTAMP_MASK, reg_msr) << 16; 618 619 cf->len = can_cc_dlc2len(FIELD_GET(AT91_MSR_MDLC_MASK, reg_msr)); 620 621 if (reg_msr & AT91_MSR_MRTR) { 622 cf->can_id |= CAN_RTR_FLAG; 623 } else { 624 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 625 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 626 } 627 628 /* allow RX of extended frames */ 629 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); 630 631 if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) 632 at91_rx_overflow_err(offload->dev); 633 634 mark_as_read: 635 at91_write(priv, AT91_MCR(mb), AT91_MCR_MTCR); 636 637 return skb; 638 } 639 640 /* theory of operation: 641 * 642 * priv->tx_tail holds the number of the oldest can_frame put for 643 * transmission into the hardware, but not yet ACKed by the CAN tx 644 * complete IRQ. 645 * 646 * We iterate from priv->tx_tail to priv->tx_head and check if the 647 * packet has been transmitted, echo it back to the CAN framework. If 648 * we discover a not yet transmitted package, stop looking for more. 649 * 650 */ 651 static void at91_irq_tx(struct net_device *dev, u32 reg_sr) 652 { 653 struct at91_priv *priv = netdev_priv(dev); 654 u32 reg_msr; 655 unsigned int mb; 656 657 for (/* nix */; (priv->tx_head - priv->tx_tail) > 0; priv->tx_tail++) { 658 mb = get_tx_tail_mb(priv); 659 660 /* no event in mailbox? */ 661 if (!(reg_sr & (1 << mb))) 662 break; 663 664 /* Disable irq for this TX mailbox */ 665 at91_write(priv, AT91_IDR, 1 << mb); 666 667 /* only echo if mailbox signals us a transfer 668 * complete (MSR_MRDY). Otherwise it's a tansfer 669 * abort. "can_bus_off()" takes care about the skbs 670 * parked in the echo queue. 671 */ 672 reg_msr = at91_read(priv, AT91_MSR(mb)); 673 if (unlikely(!(reg_msr & AT91_MSR_MRDY && 674 ~reg_msr & AT91_MSR_MABT))) 675 continue; 676 677 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 678 dev->stats.tx_bytes += 679 can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); 680 dev->stats.tx_packets++; 681 } 682 683 /* restart queue if we don't have a wrap around but restart if 684 * we get a TX int for the last can frame directly before a 685 * wrap around. 686 */ 687 if ((priv->tx_head & get_head_mask(priv)) != 0 || 688 (priv->tx_tail & get_head_mask(priv)) == 0) 689 netif_wake_queue(dev); 690 } 691 692 static void at91_irq_err_line(struct net_device *dev, const u32 reg_sr) 693 { 694 struct net_device_stats *stats = &dev->stats; 695 enum can_state new_state, rx_state, tx_state; 696 struct at91_priv *priv = netdev_priv(dev); 697 struct can_berr_counter bec; 698 struct sk_buff *skb; 699 struct can_frame *cf; 700 u32 timestamp; 701 int err; 702 703 at91_get_berr_counter(dev, &bec); 704 can_state_get_by_berr_counter(dev, &bec, &tx_state, &rx_state); 705 706 /* The chip automatically recovers from bus-off after 128 707 * occurrences of 11 consecutive recessive bits. 708 * 709 * After an auto-recovered bus-off, the error counters no 710 * longer reflect this fact. On the sam9263 the state bits in 711 * the SR register show the current state (based on the 712 * current error counters), while on sam9x5 and newer SoCs 713 * these bits are latched. 714 * 715 * Take any latched bus-off information from the SR register 716 * into account when calculating the CAN new state, to start 717 * the standard CAN bus off handling. 718 */ 719 if (reg_sr & AT91_IRQ_BOFF) 720 rx_state = CAN_STATE_BUS_OFF; 721 722 new_state = max(tx_state, rx_state); 723 724 /* state hasn't changed */ 725 if (likely(new_state == priv->can.state)) 726 return; 727 728 /* The skb allocation might fail, but can_change_state() 729 * handles cf == NULL. 730 */ 731 skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); 732 can_change_state(dev, cf, tx_state, rx_state); 733 734 if (new_state == CAN_STATE_BUS_OFF) { 735 at91_chip_stop(dev, CAN_STATE_BUS_OFF); 736 can_bus_off(dev); 737 } 738 739 if (unlikely(!skb)) 740 return; 741 742 if (new_state != CAN_STATE_BUS_OFF) { 743 cf->can_id |= CAN_ERR_CNT; 744 cf->data[6] = bec.txerr; 745 cf->data[7] = bec.rxerr; 746 } 747 748 err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); 749 if (err) 750 stats->rx_fifo_errors++; 751 } 752 753 static void at91_irq_err_frame(struct net_device *dev, const u32 reg_sr) 754 { 755 struct net_device_stats *stats = &dev->stats; 756 struct at91_priv *priv = netdev_priv(dev); 757 struct can_frame *cf; 758 struct sk_buff *skb; 759 u32 timestamp; 760 int err; 761 762 priv->can.can_stats.bus_error++; 763 764 skb = at91_alloc_can_err_skb(dev, &cf, ×tamp); 765 if (cf) 766 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 767 768 if (reg_sr & AT91_IRQ_CERR) { 769 netdev_dbg(dev, "CRC error\n"); 770 771 stats->rx_errors++; 772 if (cf) 773 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 774 } 775 776 if (reg_sr & AT91_IRQ_SERR) { 777 netdev_dbg(dev, "Stuff error\n"); 778 779 stats->rx_errors++; 780 if (cf) 781 cf->data[2] |= CAN_ERR_PROT_STUFF; 782 } 783 784 if (reg_sr & AT91_IRQ_AERR) { 785 netdev_dbg(dev, "NACK error\n"); 786 787 stats->tx_errors++; 788 if (cf) { 789 cf->can_id |= CAN_ERR_ACK; 790 cf->data[2] |= CAN_ERR_PROT_TX; 791 } 792 } 793 794 if (reg_sr & AT91_IRQ_FERR) { 795 netdev_dbg(dev, "Format error\n"); 796 797 stats->rx_errors++; 798 if (cf) 799 cf->data[2] |= CAN_ERR_PROT_FORM; 800 } 801 802 if (reg_sr & AT91_IRQ_BERR) { 803 netdev_dbg(dev, "Bit error\n"); 804 805 stats->tx_errors++; 806 if (cf) 807 cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT; 808 } 809 810 if (!cf) 811 return; 812 813 err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); 814 if (err) 815 stats->rx_fifo_errors++; 816 } 817 818 static u32 at91_get_reg_sr_rx(const struct at91_priv *priv, u32 *reg_sr_p) 819 { 820 const u32 reg_sr = at91_read(priv, AT91_SR); 821 822 *reg_sr_p |= reg_sr; 823 824 return reg_sr & get_irq_mb_rx(priv); 825 } 826 827 static irqreturn_t at91_irq(int irq, void *dev_id) 828 { 829 struct net_device *dev = dev_id; 830 struct at91_priv *priv = netdev_priv(dev); 831 irqreturn_t handled = IRQ_NONE; 832 u32 reg_sr = 0, reg_sr_rx; 833 int ret; 834 835 /* Receive interrupt 836 * Some bits of AT91_SR are cleared on read, keep them in reg_sr. 837 */ 838 while ((reg_sr_rx = at91_get_reg_sr_rx(priv, ®_sr))) { 839 ret = can_rx_offload_irq_offload_timestamp(&priv->offload, 840 reg_sr_rx); 841 handled = IRQ_HANDLED; 842 843 if (!ret) 844 break; 845 } 846 847 /* Transmission complete interrupt */ 848 if (reg_sr & get_irq_mb_tx(priv)) { 849 at91_irq_tx(dev, reg_sr); 850 handled = IRQ_HANDLED; 851 } 852 853 /* Line Error interrupt */ 854 if (reg_sr & AT91_IRQ_ERR_LINE || 855 priv->can.state > CAN_STATE_ERROR_ACTIVE) { 856 at91_irq_err_line(dev, reg_sr); 857 handled = IRQ_HANDLED; 858 } 859 860 /* Frame Error Interrupt */ 861 if (reg_sr & AT91_IRQ_ERR_FRAME) { 862 at91_irq_err_frame(dev, reg_sr); 863 handled = IRQ_HANDLED; 864 } 865 866 if (handled) 867 can_rx_offload_irq_finish(&priv->offload); 868 869 return handled; 870 } 871 872 static int at91_open(struct net_device *dev) 873 { 874 struct at91_priv *priv = netdev_priv(dev); 875 int err; 876 877 err = phy_power_on(priv->transceiver); 878 if (err) 879 return err; 880 881 /* check or determine and set bittime */ 882 err = open_candev(dev); 883 if (err) 884 goto out_phy_power_off; 885 886 err = clk_prepare_enable(priv->clk); 887 if (err) 888 goto out_close_candev; 889 890 /* register interrupt handler */ 891 err = request_irq(dev->irq, at91_irq, IRQF_SHARED, 892 dev->name, dev); 893 if (err) 894 goto out_clock_disable_unprepare; 895 896 /* start chip and queuing */ 897 at91_chip_start(dev); 898 can_rx_offload_enable(&priv->offload); 899 netif_start_queue(dev); 900 901 return 0; 902 903 out_clock_disable_unprepare: 904 clk_disable_unprepare(priv->clk); 905 out_close_candev: 906 close_candev(dev); 907 out_phy_power_off: 908 phy_power_off(priv->transceiver); 909 910 return err; 911 } 912 913 /* stop CAN bus activity 914 */ 915 static int at91_close(struct net_device *dev) 916 { 917 struct at91_priv *priv = netdev_priv(dev); 918 919 netif_stop_queue(dev); 920 can_rx_offload_disable(&priv->offload); 921 at91_chip_stop(dev, CAN_STATE_STOPPED); 922 923 free_irq(dev->irq, dev); 924 clk_disable_unprepare(priv->clk); 925 phy_power_off(priv->transceiver); 926 927 close_candev(dev); 928 929 return 0; 930 } 931 932 static int at91_set_mode(struct net_device *dev, enum can_mode mode) 933 { 934 switch (mode) { 935 case CAN_MODE_START: 936 at91_chip_start(dev); 937 netif_wake_queue(dev); 938 break; 939 940 default: 941 return -EOPNOTSUPP; 942 } 943 944 return 0; 945 } 946 947 static const struct net_device_ops at91_netdev_ops = { 948 .ndo_open = at91_open, 949 .ndo_stop = at91_close, 950 .ndo_start_xmit = at91_start_xmit, 951 .ndo_change_mtu = can_change_mtu, 952 }; 953 954 static const struct ethtool_ops at91_ethtool_ops = { 955 .get_ts_info = ethtool_op_get_ts_info, 956 }; 957 958 static ssize_t mb0_id_show(struct device *dev, 959 struct device_attribute *attr, char *buf) 960 { 961 struct at91_priv *priv = netdev_priv(to_net_dev(dev)); 962 963 if (priv->mb0_id & CAN_EFF_FLAG) 964 return sysfs_emit(buf, "0x%08x\n", priv->mb0_id); 965 else 966 return sysfs_emit(buf, "0x%03x\n", priv->mb0_id); 967 } 968 969 static ssize_t mb0_id_store(struct device *dev, 970 struct device_attribute *attr, 971 const char *buf, size_t count) 972 { 973 struct net_device *ndev = to_net_dev(dev); 974 struct at91_priv *priv = netdev_priv(ndev); 975 unsigned long can_id; 976 ssize_t ret; 977 int err; 978 979 rtnl_lock(); 980 981 if (ndev->flags & IFF_UP) { 982 ret = -EBUSY; 983 goto out; 984 } 985 986 err = kstrtoul(buf, 0, &can_id); 987 if (err) { 988 ret = err; 989 goto out; 990 } 991 992 if (can_id & CAN_EFF_FLAG) 993 can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; 994 else 995 can_id &= CAN_SFF_MASK; 996 997 priv->mb0_id = can_id; 998 ret = count; 999 1000 out: 1001 rtnl_unlock(); 1002 return ret; 1003 } 1004 1005 static DEVICE_ATTR_RW(mb0_id); 1006 1007 static struct attribute *at91_sysfs_attrs[] = { 1008 &dev_attr_mb0_id.attr, 1009 NULL, 1010 }; 1011 1012 static const struct attribute_group at91_sysfs_attr_group = { 1013 .attrs = at91_sysfs_attrs, 1014 }; 1015 1016 #if defined(CONFIG_OF) 1017 static const struct of_device_id at91_can_dt_ids[] = { 1018 { 1019 .compatible = "atmel,at91sam9x5-can", 1020 .data = &at91_at91sam9x5_data, 1021 }, { 1022 .compatible = "atmel,at91sam9263-can", 1023 .data = &at91_at91sam9263_data, 1024 }, { 1025 /* sentinel */ 1026 } 1027 }; 1028 MODULE_DEVICE_TABLE(of, at91_can_dt_ids); 1029 #endif 1030 1031 static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev) 1032 { 1033 if (pdev->dev.of_node) { 1034 const struct of_device_id *match; 1035 1036 match = of_match_node(at91_can_dt_ids, pdev->dev.of_node); 1037 if (!match) { 1038 dev_err(&pdev->dev, "no matching node found in dtb\n"); 1039 return NULL; 1040 } 1041 return (const struct at91_devtype_data *)match->data; 1042 } 1043 return (const struct at91_devtype_data *) 1044 platform_get_device_id(pdev)->driver_data; 1045 } 1046 1047 static int at91_can_probe(struct platform_device *pdev) 1048 { 1049 const struct at91_devtype_data *devtype_data; 1050 struct phy *transceiver; 1051 struct net_device *dev; 1052 struct at91_priv *priv; 1053 struct resource *res; 1054 struct clk *clk; 1055 void __iomem *addr; 1056 int err, irq; 1057 1058 devtype_data = at91_can_get_driver_data(pdev); 1059 if (!devtype_data) { 1060 dev_err(&pdev->dev, "no driver data\n"); 1061 err = -ENODEV; 1062 goto exit; 1063 } 1064 1065 clk = clk_get(&pdev->dev, "can_clk"); 1066 if (IS_ERR(clk)) { 1067 dev_err(&pdev->dev, "no clock defined\n"); 1068 err = -ENODEV; 1069 goto exit; 1070 } 1071 1072 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1073 irq = platform_get_irq(pdev, 0); 1074 if (!res || irq <= 0) { 1075 err = -ENODEV; 1076 goto exit_put; 1077 } 1078 1079 if (!request_mem_region(res->start, 1080 resource_size(res), 1081 pdev->name)) { 1082 err = -EBUSY; 1083 goto exit_put; 1084 } 1085 1086 addr = ioremap(res->start, resource_size(res)); 1087 if (!addr) { 1088 err = -ENOMEM; 1089 goto exit_release; 1090 } 1091 1092 dev = alloc_candev(sizeof(struct at91_priv), 1093 1 << devtype_data->tx_shift); 1094 if (!dev) { 1095 err = -ENOMEM; 1096 goto exit_iounmap; 1097 } 1098 1099 transceiver = devm_phy_optional_get(&pdev->dev, NULL); 1100 if (IS_ERR(transceiver)) { 1101 err = PTR_ERR(transceiver); 1102 dev_err_probe(&pdev->dev, err, "failed to get phy\n"); 1103 goto exit_iounmap; 1104 } 1105 1106 dev->netdev_ops = &at91_netdev_ops; 1107 dev->ethtool_ops = &at91_ethtool_ops; 1108 dev->irq = irq; 1109 dev->flags |= IFF_ECHO; 1110 1111 priv = netdev_priv(dev); 1112 priv->can.clock.freq = clk_get_rate(clk); 1113 priv->can.bittiming_const = &at91_bittiming_const; 1114 priv->can.do_set_mode = at91_set_mode; 1115 priv->can.do_get_berr_counter = at91_get_berr_counter; 1116 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 1117 CAN_CTRLMODE_LISTENONLY; 1118 priv->reg_base = addr; 1119 priv->devtype_data = *devtype_data; 1120 priv->clk = clk; 1121 priv->pdata = dev_get_platdata(&pdev->dev); 1122 priv->mb0_id = 0x7ff; 1123 priv->offload.mailbox_read = at91_mailbox_read; 1124 priv->offload.mb_first = devtype_data->rx_first; 1125 priv->offload.mb_last = devtype_data->rx_last; 1126 1127 can_rx_offload_add_timestamp(dev, &priv->offload); 1128 1129 if (transceiver) 1130 priv->can.bitrate_max = transceiver->attrs.max_link_rate; 1131 1132 if (at91_is_sam9263(priv)) 1133 dev->sysfs_groups[0] = &at91_sysfs_attr_group; 1134 1135 platform_set_drvdata(pdev, dev); 1136 SET_NETDEV_DEV(dev, &pdev->dev); 1137 1138 err = register_candev(dev); 1139 if (err) { 1140 dev_err(&pdev->dev, "registering netdev failed\n"); 1141 goto exit_free; 1142 } 1143 1144 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", 1145 priv->reg_base, dev->irq); 1146 1147 return 0; 1148 1149 exit_free: 1150 free_candev(dev); 1151 exit_iounmap: 1152 iounmap(addr); 1153 exit_release: 1154 release_mem_region(res->start, resource_size(res)); 1155 exit_put: 1156 clk_put(clk); 1157 exit: 1158 return err; 1159 } 1160 1161 static void at91_can_remove(struct platform_device *pdev) 1162 { 1163 struct net_device *dev = platform_get_drvdata(pdev); 1164 struct at91_priv *priv = netdev_priv(dev); 1165 struct resource *res; 1166 1167 unregister_netdev(dev); 1168 1169 iounmap(priv->reg_base); 1170 1171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1172 release_mem_region(res->start, resource_size(res)); 1173 1174 clk_put(priv->clk); 1175 1176 free_candev(dev); 1177 } 1178 1179 static const struct platform_device_id at91_can_id_table[] = { 1180 { 1181 .name = "at91sam9x5_can", 1182 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, 1183 }, { 1184 .name = "at91_can", 1185 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, 1186 }, { 1187 /* sentinel */ 1188 } 1189 }; 1190 MODULE_DEVICE_TABLE(platform, at91_can_id_table); 1191 1192 static struct platform_driver at91_can_driver = { 1193 .probe = at91_can_probe, 1194 .remove_new = at91_can_remove, 1195 .driver = { 1196 .name = KBUILD_MODNAME, 1197 .of_match_table = of_match_ptr(at91_can_dt_ids), 1198 }, 1199 .id_table = at91_can_id_table, 1200 }; 1201 1202 module_platform_driver(at91_can_driver); 1203 1204 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); 1205 MODULE_LICENSE("GPL v2"); 1206 MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver"); 1207