Lines Matching +full:int +full:- +full:clock +full:- +full:stable +full:- +full:broken
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtual PTP 1588 clock for use with LM-safe VMclock device.
21 #include <uapi/linux/vmclock-abi.h>
45 int index;
55 (le32_to_cpu((_c)->size) >= (offsetof(struct vmclock_abi, _f) + \
56 sizeof((_c)->_f)))
59 * Multiply a 64-bit count by a 64-bit tick 'period' in units of seconds >> 64
62 * The result is a 128-bit value, the top 64 bits of which are seconds, and
79 if (likely(clk->time_type == VMCLOCK_TIME_UTC)) in tai_adjust()
82 if (clk->time_type == VMCLOCK_TIME_TAI && in tai_adjust()
83 (le64_to_cpu(clk->flags) & VMCLOCK_FLAG_TAI_OFFSET_VALID)) { in tai_adjust()
85 *sec += (int16_t)le16_to_cpu(clk->tai_offset_sec); in tai_adjust()
91 static int vmclock_get_crosststamp(struct vmclock_state *st, in vmclock_get_crosststamp()
102 * We'd expect the hypervisor to know this and to report the clock in vmclock_get_crosststamp()
106 return -EINVAL; in vmclock_get_crosststamp()
110 seq = le32_to_cpu(st->clk->seq_count) & ~1ULL; in vmclock_get_crosststamp()
118 if (st->clk->clock_status == VMCLOCK_STATUS_UNRELIABLE) in vmclock_get_crosststamp()
119 return -EINVAL; in vmclock_get_crosststamp()
124 * same counter as st->cs_id, in which case all three times in vmclock_get_crosststamp()
137 if (systime_snapshot.cs_id == st->cs_id) { in vmclock_get_crosststamp()
147 delta = cycle - le64_to_cpu(st->clk->counter_value); in vmclock_get_crosststamp()
149 frac_sec = mul_u64_u64_shr_add_u64(&tspec->tv_sec, delta, in vmclock_get_crosststamp()
150 le64_to_cpu(st->clk->counter_period_frac_sec), in vmclock_get_crosststamp()
151 st->clk->counter_period_shift, in vmclock_get_crosststamp()
152 le64_to_cpu(st->clk->time_frac_sec)); in vmclock_get_crosststamp()
153 tspec->tv_nsec = mul_u64_u64_shr(frac_sec, NSEC_PER_SEC, 64); in vmclock_get_crosststamp()
154 tspec->tv_sec += le64_to_cpu(st->clk->time_sec); in vmclock_get_crosststamp()
156 if (!tai_adjust(st->clk, &tspec->tv_sec)) in vmclock_get_crosststamp()
157 return -EINVAL; in vmclock_get_crosststamp()
164 if (seq == le32_to_cpu(st->clk->seq_count)) in vmclock_get_crosststamp()
168 return -ETIMEDOUT; in vmclock_get_crosststamp()
172 system_counter->cycles = cycle; in vmclock_get_crosststamp()
173 system_counter->cs_id = st->cs_id; in vmclock_get_crosststamp()
177 sts->pre_ts = ktime_to_timespec64(systime_snapshot.real); in vmclock_get_crosststamp()
178 if (systime_snapshot.cs_id == st->cs_id) in vmclock_get_crosststamp()
179 sts->post_ts = sts->pre_ts; in vmclock_get_crosststamp()
187 * In the case where the system is using the KVM clock for timekeeping, convert
188 * the TSC value into a KVM clock time in order to return a paired reading that
191 static int vmclock_get_crosststamp_kvmclock(struct vmclock_state *st, in vmclock_get_crosststamp_kvmclock()
197 unsigned int pvti_ver; in vmclock_get_crosststamp_kvmclock()
198 int ret; in vmclock_get_crosststamp_kvmclock()
209 system_counter->cycles = __pvclock_read_cycles(pvti, in vmclock_get_crosststamp_kvmclock()
210 system_counter->cycles); in vmclock_get_crosststamp_kvmclock()
211 system_counter->cs_id = CSID_X86_KVM_CLK; in vmclock_get_crosststamp_kvmclock()
215 * stable and reliable enough across vCPUS that it is sane in vmclock_get_crosststamp_kvmclock()
217 * it as the reference counter, then the KVM clock sohuld be in vmclock_get_crosststamp_kvmclock()
218 * in 'master clock mode' and basically never changed. But in vmclock_get_crosststamp_kvmclock()
219 * the KVM clock is a fickle and often broken thing, so do in vmclock_get_crosststamp_kvmclock()
230 static int ptp_vmclock_get_time_fn(ktime_t *device_time, in ptp_vmclock_get_time_fn()
236 int ret; in ptp_vmclock_get_time_fn()
239 if (READ_ONCE(st->sys_cs_id) == CSID_X86_KVM_CLK) in ptp_vmclock_get_time_fn()
252 static int ptp_vmclock_getcrosststamp(struct ptp_clock_info *ptp, in ptp_vmclock_getcrosststamp()
257 int ret = get_device_system_crosststamp(ptp_vmclock_get_time_fn, st, in ptp_vmclock_getcrosststamp()
261 * On x86, the KVM clock may be used for the system time. We can in ptp_vmclock_getcrosststamp()
265 if (ret == -ENODEV) { in ptp_vmclock_getcrosststamp()
272 WRITE_ONCE(st->sys_cs_id, systime_snapshot.cs_id); in ptp_vmclock_getcrosststamp()
282 * PTP clock operations
285 static int ptp_vmclock_adjfine(struct ptp_clock_info *ptp, long delta) in ptp_vmclock_adjfine()
287 return -EOPNOTSUPP; in ptp_vmclock_adjfine()
290 static int ptp_vmclock_adjtime(struct ptp_clock_info *ptp, s64 delta) in ptp_vmclock_adjtime()
292 return -EOPNOTSUPP; in ptp_vmclock_adjtime()
295 static int ptp_vmclock_settime(struct ptp_clock_info *ptp, in ptp_vmclock_settime()
298 return -EOPNOTSUPP; in ptp_vmclock_settime()
301 static int ptp_vmclock_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, in ptp_vmclock_gettimex()
310 static int ptp_vmclock_enable(struct ptp_clock_info *ptp, in ptp_vmclock_enable()
311 struct ptp_clock_request *rq, int on) in ptp_vmclock_enable()
313 return -EOPNOTSUPP; in ptp_vmclock_enable()
336 st->clk->counter_id == VMCLOCK_COUNTER_ARM_VCNT) { in vmclock_ptp_register()
340 st->clk->counter_id == VMCLOCK_COUNTER_X86_TSC) { in vmclock_ptp_register()
347 if (!tai_adjust(st->clk, NULL)) { in vmclock_ptp_register()
352 st->sys_cs_id = cs_id; in vmclock_ptp_register()
353 st->cs_id = cs_id; in vmclock_ptp_register()
354 st->ptp_clock_info = ptp_vmclock_info; in vmclock_ptp_register()
355 strscpy(st->ptp_clock_info.name, st->name); in vmclock_ptp_register()
357 return ptp_clock_register(&st->ptp_clock_info, dev); in vmclock_ptp_register()
360 static int vmclock_miscdev_mmap(struct file *fp, struct vm_area_struct *vma) in vmclock_miscdev_mmap()
362 struct vmclock_state *st = container_of(fp->private_data, in vmclock_miscdev_mmap()
365 if ((vma->vm_flags & (VM_READ|VM_WRITE)) != VM_READ) in vmclock_miscdev_mmap()
366 return -EROFS; in vmclock_miscdev_mmap()
368 if (vma->vm_end - vma->vm_start != PAGE_SIZE || vma->vm_pgoff) in vmclock_miscdev_mmap()
369 return -EINVAL; in vmclock_miscdev_mmap()
371 if (io_remap_pfn_range(vma, vma->vm_start, in vmclock_miscdev_mmap()
372 st->res.start >> PAGE_SHIFT, PAGE_SIZE, in vmclock_miscdev_mmap()
373 vma->vm_page_prot)) in vmclock_miscdev_mmap()
374 return -EAGAIN; in vmclock_miscdev_mmap()
382 struct vmclock_state *st = container_of(fp->private_data, in vmclock_miscdev_read()
391 max_count = PAGE_SIZE - *ppos; in vmclock_miscdev_read()
396 seq = le32_to_cpu(st->clk->seq_count) & ~1U; in vmclock_miscdev_read()
400 if (copy_to_user(buf, ((char *)st->clk) + *ppos, count)) in vmclock_miscdev_read()
401 return -EFAULT; in vmclock_miscdev_read()
405 if (seq == le32_to_cpu(st->clk->seq_count)) in vmclock_miscdev_read()
409 return -ETIMEDOUT; in vmclock_miscdev_read()
428 if (st->ptp_clock) in vmclock_remove()
429 ptp_clock_unregister(st->ptp_clock); in vmclock_remove()
431 if (st->miscdev.minor != MISC_DYNAMIC_MINOR) in vmclock_remove()
432 misc_deregister(&st->miscdev); in vmclock_remove()
441 if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) in vmclock_acpi_resources()
445 if (resource_type(&st->res) == IORESOURCE_MEM) in vmclock_acpi_resources()
452 resource_size(res) < sizeof(st->clk)) in vmclock_acpi_resources()
455 st->res = *res; in vmclock_acpi_resources()
462 static int vmclock_probe_acpi(struct device *dev, struct vmclock_state *st) in vmclock_probe_acpi()
473 return -ENODEV; in vmclock_probe_acpi()
475 status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS, in vmclock_probe_acpi()
477 if (ACPI_FAILURE(status) || resource_type(&st->res) != IORESOURCE_MEM) { in vmclock_probe_acpi()
479 return -ENODEV; in vmclock_probe_acpi()
489 ida_free(&vmclock_ida, st->index); in vmclock_put_idx()
492 static int vmclock_probe(struct platform_device *pdev) in vmclock_probe()
494 struct device *dev = &pdev->dev; in vmclock_probe()
496 int ret; in vmclock_probe()
500 return -ENOMEM; in vmclock_probe()
505 ret = -EINVAL; /* Only ACPI for now */ in vmclock_probe()
512 if (resource_size(&st->res) < VMCLOCK_MIN_SIZE) { in vmclock_probe()
514 resource_size(&st->res)); in vmclock_probe()
515 return -EINVAL; in vmclock_probe()
517 st->clk = devm_memremap(dev, st->res.start, resource_size(&st->res), in vmclock_probe()
519 if (IS_ERR(st->clk)) { in vmclock_probe()
520 ret = PTR_ERR(st->clk); in vmclock_probe()
522 st->clk = NULL; in vmclock_probe()
526 if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC || in vmclock_probe()
527 le32_to_cpu(st->clk->size) > resource_size(&st->res) || in vmclock_probe()
528 le16_to_cpu(st->clk->version) != 1) { in vmclock_probe()
530 return -EINVAL; in vmclock_probe()
537 st->index = ret; in vmclock_probe()
538 ret = devm_add_action_or_reset(&pdev->dev, vmclock_put_idx, st); in vmclock_probe()
542 st->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "vmclock%d", st->index); in vmclock_probe()
543 if (!st->name) in vmclock_probe()
544 return -ENOMEM; in vmclock_probe()
546 st->miscdev.minor = MISC_DYNAMIC_MINOR; in vmclock_probe()
548 ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, st); in vmclock_probe()
558 if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) { in vmclock_probe()
559 st->miscdev.fops = &vmclock_miscdev_fops; in vmclock_probe()
560 st->miscdev.name = st->name; in vmclock_probe()
562 ret = misc_register(&st->miscdev); in vmclock_probe()
567 /* If there is valid clock information, register a PTP clock */ in vmclock_probe()
568 if (VMCLOCK_FIELD_PRESENT(st->clk, time_frac_sec)) { in vmclock_probe()
570 st->ptp_clock = vmclock_ptp_register(dev, st); in vmclock_probe()
571 if (IS_ERR(st->ptp_clock)) { in vmclock_probe()
572 ret = PTR_ERR(st->ptp_clock); in vmclock_probe()
573 st->ptp_clock = NULL; in vmclock_probe()
578 if (!st->miscdev.minor && !st->ptp_clock) { in vmclock_probe()
581 return -ENODEV; in vmclock_probe()
584 dev_info(dev, "%s: registered %s%s%s\n", st->name, in vmclock_probe()
585 st->miscdev.minor ? "miscdev" : "", in vmclock_probe()
586 (st->miscdev.minor && st->ptp_clock) ? ", " : "", in vmclock_probe()
587 st->ptp_clock ? "PTP" : ""); in vmclock_probe()
609 MODULE_DESCRIPTION("PTP clock using VMCLOCK");