1 // SPDX-License-Identifier: GPL-2.0 2 /* cavium_ptp.c - PTP 1588 clock on Cavium hardware 3 * Copyright (c) 2003-2015, 2017 Cavium, Inc. 4 */ 5 6 #include <linux/device.h> 7 #include <linux/module.h> 8 #include <linux/timecounter.h> 9 #include <linux/pci.h> 10 11 #include "cavium_ptp.h" 12 13 #define DRV_NAME "cavium_ptp" 14 15 #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C 16 #define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C 17 #define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C 18 #define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C 19 #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E 20 21 #define PCI_PTP_BAR_NO 0 22 #define PCI_RST_BAR_NO 0 23 24 #define PTP_CLOCK_CFG 0xF00ULL 25 #define PTP_CLOCK_CFG_PTP_EN BIT(0) 26 #define PTP_CLOCK_LO 0xF08ULL 27 #define PTP_CLOCK_HI 0xF10ULL 28 #define PTP_CLOCK_COMP 0xF18ULL 29 30 #define RST_BOOT 0x1600ULL 31 #define CLOCK_BASE_RATE 50000000ULL 32 33 static u64 ptp_cavium_clock_get(void) 34 { 35 struct pci_dev *pdev; 36 void __iomem *base; 37 u64 ret = CLOCK_BASE_RATE * 16; 38 39 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 40 PCI_DEVICE_ID_CAVIUM_RST, NULL); 41 if (!pdev) 42 goto error; 43 44 base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); 45 if (!base) 46 goto error_put_pdev; 47 48 ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f); 49 50 iounmap(base); 51 52 error_put_pdev: 53 pci_dev_put(pdev); 54 55 error: 56 return ret; 57 } 58 59 struct cavium_ptp *cavium_ptp_get(void) 60 { 61 struct cavium_ptp *ptp; 62 struct pci_dev *pdev; 63 64 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 65 PCI_DEVICE_ID_CAVIUM_PTP, NULL); 66 if (!pdev) 67 return ERR_PTR(-ENODEV); 68 69 ptp = pci_get_drvdata(pdev); 70 if (!ptp) 71 ptp = ERR_PTR(-EPROBE_DEFER); 72 if (IS_ERR(ptp)) 73 pci_dev_put(pdev); 74 75 return ptp; 76 } 77 EXPORT_SYMBOL(cavium_ptp_get); 78 79 void cavium_ptp_put(struct cavium_ptp *ptp) 80 { 81 if (!ptp) 82 return; 83 pci_dev_put(ptp->pdev); 84 } 85 EXPORT_SYMBOL(cavium_ptp_put); 86 87 /** 88 * cavium_ptp_adjfine() - Adjust ptp frequency 89 * @ptp_info: PTP clock info 90 * @scaled_ppm: how much to adjust by, in parts per million, but with a 91 * 16 bit binary fractional field 92 */ 93 static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) 94 { 95 struct cavium_ptp *clock = 96 container_of(ptp_info, struct cavium_ptp, ptp_info); 97 unsigned long flags; 98 u64 comp; 99 u64 adj; 100 bool neg_adj = false; 101 102 if (scaled_ppm < 0) { 103 neg_adj = true; 104 scaled_ppm = -scaled_ppm; 105 } 106 107 /* The hardware adds the clock compensation value to the PTP clock 108 * on every coprocessor clock cycle. Typical convention is that it 109 * represent number of nanosecond betwen each cycle. In this 110 * convention compensation value is in 64 bit fixed-point 111 * representation where upper 32 bits are number of nanoseconds 112 * and lower is fractions of nanosecond. 113 * The scaled_ppm represent the ratio in "parts per bilion" by which the 114 * compensation value should be corrected. 115 * To calculate new compenstation value we use 64bit fixed point 116 * arithmetic on following formula 117 * comp = tbase + tbase * scaled_ppm / (1M * 2^16) 118 * where tbase is the basic compensation value calculated initialy 119 * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian 120 * independent structure definition to write data to PTP register. 121 */ 122 comp = ((u64)1000000000ull << 32) / clock->clock_rate; 123 adj = comp * scaled_ppm; 124 adj >>= 16; 125 adj = div_u64(adj, 1000000ull); 126 comp = neg_adj ? comp - adj : comp + adj; 127 128 spin_lock_irqsave(&clock->spin_lock, flags); 129 writeq(comp, clock->reg_base + PTP_CLOCK_COMP); 130 spin_unlock_irqrestore(&clock->spin_lock, flags); 131 132 return 0; 133 } 134 135 /** 136 * cavium_ptp_adjtime() - Adjust ptp time 137 * @ptp_info: PTP clock info 138 * @delta: how much to adjust by, in nanosecs 139 */ 140 static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) 141 { 142 struct cavium_ptp *clock = 143 container_of(ptp_info, struct cavium_ptp, ptp_info); 144 unsigned long flags; 145 146 spin_lock_irqsave(&clock->spin_lock, flags); 147 timecounter_adjtime(&clock->time_counter, delta); 148 spin_unlock_irqrestore(&clock->spin_lock, flags); 149 150 /* Sync, for network driver to get latest value */ 151 smp_mb(); 152 153 return 0; 154 } 155 156 /** 157 * cavium_ptp_gettime() - Get hardware clock time with adjustment 158 * @ptp_info: PTP clock info 159 * @ts: timespec 160 */ 161 static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info, 162 struct timespec64 *ts) 163 { 164 struct cavium_ptp *clock = 165 container_of(ptp_info, struct cavium_ptp, ptp_info); 166 unsigned long flags; 167 u64 nsec; 168 169 spin_lock_irqsave(&clock->spin_lock, flags); 170 nsec = timecounter_read(&clock->time_counter); 171 spin_unlock_irqrestore(&clock->spin_lock, flags); 172 173 *ts = ns_to_timespec64(nsec); 174 175 return 0; 176 } 177 178 /** 179 * cavium_ptp_settime() - Set hardware clock time. Reset adjustment 180 * @ptp_info: PTP clock info 181 * @ts: timespec 182 */ 183 static int cavium_ptp_settime(struct ptp_clock_info *ptp_info, 184 const struct timespec64 *ts) 185 { 186 struct cavium_ptp *clock = 187 container_of(ptp_info, struct cavium_ptp, ptp_info); 188 unsigned long flags; 189 u64 nsec; 190 191 nsec = timespec64_to_ns(ts); 192 193 spin_lock_irqsave(&clock->spin_lock, flags); 194 timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec); 195 spin_unlock_irqrestore(&clock->spin_lock, flags); 196 197 return 0; 198 } 199 200 /** 201 * cavium_ptp_enable() - Request to enable or disable an ancillary feature. 202 * @ptp_info: PTP clock info 203 * @rq: request 204 * @on: is it on 205 */ 206 static int cavium_ptp_enable(struct ptp_clock_info *ptp_info, 207 struct ptp_clock_request *rq, int on) 208 { 209 return -EOPNOTSUPP; 210 } 211 212 static u64 cavium_ptp_cc_read(const struct cyclecounter *cc) 213 { 214 struct cavium_ptp *clock = 215 container_of(cc, struct cavium_ptp, cycle_counter); 216 217 return readq(clock->reg_base + PTP_CLOCK_HI); 218 } 219 220 static int cavium_ptp_probe(struct pci_dev *pdev, 221 const struct pci_device_id *ent) 222 { 223 struct device *dev = &pdev->dev; 224 struct cavium_ptp *clock; 225 struct cyclecounter *cc; 226 u64 clock_cfg; 227 u64 clock_comp; 228 int err; 229 230 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); 231 if (!clock) { 232 err = -ENOMEM; 233 goto error; 234 } 235 236 clock->pdev = pdev; 237 238 err = pcim_enable_device(pdev); 239 if (err) 240 goto error_free; 241 242 clock->reg_base = pcim_iomap_region(pdev, PCI_PTP_BAR_NO, pci_name(pdev)); 243 err = PTR_ERR_OR_ZERO(clock->reg_base); 244 if (err) 245 goto error_free; 246 247 spin_lock_init(&clock->spin_lock); 248 249 cc = &clock->cycle_counter; 250 cc->read = cavium_ptp_cc_read; 251 cc->mask = CYCLECOUNTER_MASK(64); 252 cc->mult = 1; 253 cc->shift = 0; 254 255 timecounter_init(&clock->time_counter, &clock->cycle_counter, 256 ktime_to_ns(ktime_get_real())); 257 258 clock->clock_rate = ptp_cavium_clock_get(); 259 260 clock->ptp_info = (struct ptp_clock_info) { 261 .owner = THIS_MODULE, 262 .name = "ThunderX PTP", 263 .max_adj = 1000000000ull, 264 .n_ext_ts = 0, 265 .n_pins = 0, 266 .pps = 0, 267 .adjfine = cavium_ptp_adjfine, 268 .adjtime = cavium_ptp_adjtime, 269 .gettime64 = cavium_ptp_gettime, 270 .settime64 = cavium_ptp_settime, 271 .enable = cavium_ptp_enable, 272 }; 273 274 clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); 275 clock_cfg |= PTP_CLOCK_CFG_PTP_EN; 276 writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); 277 278 clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate; 279 writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP); 280 281 clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev); 282 if (IS_ERR(clock->ptp_clock)) { 283 err = PTR_ERR(clock->ptp_clock); 284 goto error_stop; 285 } 286 287 pci_set_drvdata(pdev, clock); 288 return 0; 289 290 error_stop: 291 clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); 292 clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; 293 writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); 294 pcim_iounmap_region(pdev, PCI_PTP_BAR_NO); 295 296 error_free: 297 devm_kfree(dev, clock); 298 299 error: 300 /* For `cavium_ptp_get()` we need to differentiate between the case 301 * when the core has not tried to probe this device and the case when 302 * the probe failed. In the later case we pretend that the 303 * initialization was successful and keep the error in 304 * `dev->driver_data`. 305 */ 306 pci_set_drvdata(pdev, ERR_PTR(err)); 307 return 0; 308 } 309 310 static void cavium_ptp_remove(struct pci_dev *pdev) 311 { 312 struct cavium_ptp *clock = pci_get_drvdata(pdev); 313 u64 clock_cfg; 314 315 if (IS_ERR_OR_NULL(clock)) 316 return; 317 318 ptp_clock_unregister(clock->ptp_clock); 319 320 clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG); 321 clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN; 322 writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG); 323 } 324 325 static const struct pci_device_id cavium_ptp_id_table[] = { 326 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP, 327 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) }, 328 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP, 329 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) }, 330 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP, 331 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) }, 332 { 0, } 333 }; 334 335 static struct pci_driver cavium_ptp_driver = { 336 .name = DRV_NAME, 337 .id_table = cavium_ptp_id_table, 338 .probe = cavium_ptp_probe, 339 .remove = cavium_ptp_remove, 340 }; 341 342 module_pci_driver(cavium_ptp_driver); 343 344 MODULE_DESCRIPTION(DRV_NAME); 345 MODULE_AUTHOR("Cavium Networks <support@cavium.com>"); 346 MODULE_LICENSE("GPL v2"); 347 MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table); 348