1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2023, 2024 Pengutronix, 4 // Marc Kleine-Budde <kernel@pengutronix.de> 5 // 6 7 #include <linux/clocksource.h> 8 9 #include "rockchip_canfd.h" 10 11 static u64 rkcanfd_timestamp_read(const struct cyclecounter *cc) 12 { 13 const struct rkcanfd_priv *priv = container_of(cc, struct rkcanfd_priv, cc); 14 15 return rkcanfd_get_timestamp(priv); 16 } 17 18 void rkcanfd_skb_set_timestamp(const struct rkcanfd_priv *priv, 19 struct sk_buff *skb, const u32 timestamp) 20 { 21 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 22 u64 ns; 23 24 ns = timecounter_cyc2time(&priv->tc, timestamp); 25 26 hwtstamps->hwtstamp = ns_to_ktime(ns); 27 } 28 29 static void rkcanfd_timestamp_work(struct work_struct *work) 30 { 31 const struct delayed_work *delayed_work = to_delayed_work(work); 32 struct rkcanfd_priv *priv; 33 34 priv = container_of(delayed_work, struct rkcanfd_priv, timestamp); 35 timecounter_read(&priv->tc); 36 37 schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); 38 } 39 40 void rkcanfd_timestamp_init(struct rkcanfd_priv *priv) 41 { 42 const struct can_bittiming *dbt = &priv->can.data_bittiming; 43 const struct can_bittiming *bt = &priv->can.bittiming; 44 struct cyclecounter *cc = &priv->cc; 45 u32 bitrate, div, reg, rate; 46 u64 work_delay_ns; 47 u64 max_cycles; 48 49 /* At the standard clock rate of 300Mhz on the rk3658, the 32 50 * bit timer overflows every 14s. This means that we have to 51 * poll it quite often to avoid missing a wrap around. 52 * 53 * Divide it down to a reasonable rate, at least twice the bit 54 * rate. 55 */ 56 bitrate = max(bt->bitrate, dbt->bitrate); 57 div = min(DIV_ROUND_UP(priv->can.clock.freq, bitrate * 2), 58 FIELD_MAX(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE) + 1); 59 60 reg = FIELD_PREP(RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_PRESCALE, 61 div - 1) | 62 RKCANFD_REG_TIMESTAMP_CTRL_TIME_BASE_COUNTER_ENABLE; 63 rkcanfd_write(priv, RKCANFD_REG_TIMESTAMP_CTRL, reg); 64 65 cc->read = rkcanfd_timestamp_read; 66 cc->mask = CYCLECOUNTER_MASK(32); 67 68 rate = priv->can.clock.freq / div; 69 clocks_calc_mult_shift(&cc->mult, &cc->shift, rate, NSEC_PER_SEC, 70 RKCANFD_TIMESTAMP_WORK_MAX_DELAY_SEC); 71 72 max_cycles = div_u64(ULLONG_MAX, cc->mult); 73 max_cycles = min(max_cycles, cc->mask); 74 work_delay_ns = clocksource_cyc2ns(max_cycles, cc->mult, cc->shift) / 3; 75 priv->work_delay_jiffies = nsecs_to_jiffies(work_delay_ns); 76 INIT_DELAYED_WORK(&priv->timestamp, rkcanfd_timestamp_work); 77 78 netdev_dbg(priv->ndev, "clock=%lu.%02luMHz bitrate=%lu.%02luMBit/s div=%u rate=%lu.%02luMHz mult=%u shift=%u delay=%lus\n", 79 priv->can.clock.freq / MEGA, 80 priv->can.clock.freq % MEGA / KILO / 10, 81 bitrate / MEGA, 82 bitrate % MEGA / KILO / 100, 83 div, 84 rate / MEGA, 85 rate % MEGA / KILO / 10, 86 cc->mult, cc->shift, 87 priv->work_delay_jiffies / HZ); 88 } 89 90 void rkcanfd_timestamp_start(struct rkcanfd_priv *priv) 91 { 92 timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); 93 94 schedule_delayed_work(&priv->timestamp, priv->work_delay_jiffies); 95 } 96 97 void rkcanfd_timestamp_stop(struct rkcanfd_priv *priv) 98 { 99 cancel_delayed_work(&priv->timestamp); 100 } 101 102 void rkcanfd_timestamp_stop_sync(struct rkcanfd_priv *priv) 103 { 104 cancel_delayed_work_sync(&priv->timestamp); 105 } 106