13aed038aSVincent Donnefort // SPDX-License-Identifier: GPL-2.0-only 23aed038aSVincent Donnefort /* 33aed038aSVincent Donnefort * Copyright (C) 2025 Google LLC 43aed038aSVincent Donnefort * Author: Vincent Donnefort <vdonnefort@google.com> 53aed038aSVincent Donnefort */ 63aed038aSVincent Donnefort 7b2288891SVincent Donnefort #include <linux/cpumask.h> 83aed038aSVincent Donnefort #include <linux/trace_remote.h> 9b2288891SVincent Donnefort #include <linux/tracefs.h> 103aed038aSVincent Donnefort #include <linux/simple_ring_buffer.h> 113aed038aSVincent Donnefort 12b2288891SVincent Donnefort #include <asm/arch_timer.h> 133aed038aSVincent Donnefort #include <asm/kvm_host.h> 143aed038aSVincent Donnefort #include <asm/kvm_hyptrace.h> 153aed038aSVincent Donnefort #include <asm/kvm_mmu.h> 163aed038aSVincent Donnefort 173aed038aSVincent Donnefort #include "hyp_trace.h" 183aed038aSVincent Donnefort 19b2288891SVincent Donnefort /* Same 10min used by clocksource when width is more than 32-bits */ 20b2288891SVincent Donnefort #define CLOCK_MAX_CONVERSION_S 600 21b2288891SVincent Donnefort /* 22b2288891SVincent Donnefort * Time to give for the clock init. Long enough to get a good mult/shift 23b2288891SVincent Donnefort * estimation. Short enough to not delay the tracing start too much. 24b2288891SVincent Donnefort */ 25b2288891SVincent Donnefort #define CLOCK_INIT_MS 100 26b2288891SVincent Donnefort /* 27b2288891SVincent Donnefort * Time between clock checks. Must be small enough to catch clock deviation when 28b2288891SVincent Donnefort * it is still tiny. 29b2288891SVincent Donnefort */ 30b2288891SVincent Donnefort #define CLOCK_UPDATE_MS 500 31b2288891SVincent Donnefort 32b2288891SVincent Donnefort static struct hyp_trace_clock { 33b2288891SVincent Donnefort u64 cycles; 34b2288891SVincent Donnefort u64 cyc_overflow64; 35b2288891SVincent Donnefort u64 boot; 36b2288891SVincent Donnefort u32 mult; 37b2288891SVincent Donnefort u32 shift; 38b2288891SVincent Donnefort struct delayed_work work; 39b2288891SVincent Donnefort struct completion ready; 40b2288891SVincent Donnefort struct mutex lock; 41b2288891SVincent Donnefort bool running; 42b2288891SVincent Donnefort } hyp_clock; 43b2288891SVincent Donnefort 44b2288891SVincent Donnefort static void __hyp_clock_work(struct work_struct *work) 45b2288891SVincent Donnefort { 46b2288891SVincent Donnefort struct delayed_work *dwork = to_delayed_work(work); 47b2288891SVincent Donnefort struct hyp_trace_clock *hyp_clock; 48b2288891SVincent Donnefort struct system_time_snapshot snap; 49b2288891SVincent Donnefort u64 rate, delta_cycles; 50b2288891SVincent Donnefort u64 boot, delta_boot; 51b2288891SVincent Donnefort 52b2288891SVincent Donnefort hyp_clock = container_of(dwork, struct hyp_trace_clock, work); 53b2288891SVincent Donnefort 54b2288891SVincent Donnefort ktime_get_snapshot(&snap); 55b2288891SVincent Donnefort boot = ktime_to_ns(snap.boot); 56b2288891SVincent Donnefort 57b2288891SVincent Donnefort delta_boot = boot - hyp_clock->boot; 58b2288891SVincent Donnefort delta_cycles = snap.cycles - hyp_clock->cycles; 59b2288891SVincent Donnefort 60b2288891SVincent Donnefort /* Compare hyp clock with the kernel boot clock */ 61b2288891SVincent Donnefort if (hyp_clock->mult) { 62b2288891SVincent Donnefort u64 err, cur = delta_cycles; 63b2288891SVincent Donnefort 64b2288891SVincent Donnefort if (WARN_ON_ONCE(cur >= hyp_clock->cyc_overflow64)) { 65b2288891SVincent Donnefort __uint128_t tmp = (__uint128_t)cur * hyp_clock->mult; 66b2288891SVincent Donnefort 67b2288891SVincent Donnefort cur = tmp >> hyp_clock->shift; 68b2288891SVincent Donnefort } else { 69b2288891SVincent Donnefort cur *= hyp_clock->mult; 70b2288891SVincent Donnefort cur >>= hyp_clock->shift; 71b2288891SVincent Donnefort } 72b2288891SVincent Donnefort cur += hyp_clock->boot; 73b2288891SVincent Donnefort 74b2288891SVincent Donnefort err = abs_diff(cur, boot); 75b2288891SVincent Donnefort /* No deviation, only update epoch if necessary */ 76b2288891SVincent Donnefort if (!err) { 77b2288891SVincent Donnefort if (delta_cycles >= (hyp_clock->cyc_overflow64 >> 1)) 78b2288891SVincent Donnefort goto fast_forward; 79b2288891SVincent Donnefort 80b2288891SVincent Donnefort goto resched; 81b2288891SVincent Donnefort } 82b2288891SVincent Donnefort 83b2288891SVincent Donnefort /* Warn if the error is above tracing precision (1us) */ 84b2288891SVincent Donnefort if (err > NSEC_PER_USEC) 85b2288891SVincent Donnefort pr_warn_ratelimited("hyp trace clock off by %lluus\n", 86b2288891SVincent Donnefort err / NSEC_PER_USEC); 87b2288891SVincent Donnefort } 88b2288891SVincent Donnefort 89b2288891SVincent Donnefort rate = div64_u64(delta_cycles * NSEC_PER_SEC, delta_boot); 90b2288891SVincent Donnefort 91b2288891SVincent Donnefort clocks_calc_mult_shift(&hyp_clock->mult, &hyp_clock->shift, 92b2288891SVincent Donnefort rate, NSEC_PER_SEC, CLOCK_MAX_CONVERSION_S); 93b2288891SVincent Donnefort 94b2288891SVincent Donnefort /* Add a comfortable 50% margin */ 95b2288891SVincent Donnefort hyp_clock->cyc_overflow64 = (U64_MAX / hyp_clock->mult) >> 1; 96b2288891SVincent Donnefort 97b2288891SVincent Donnefort fast_forward: 98b2288891SVincent Donnefort hyp_clock->cycles = snap.cycles; 99b2288891SVincent Donnefort hyp_clock->boot = boot; 100b2288891SVincent Donnefort kvm_call_hyp_nvhe(__tracing_update_clock, hyp_clock->mult, 101b2288891SVincent Donnefort hyp_clock->shift, hyp_clock->boot, hyp_clock->cycles); 102b2288891SVincent Donnefort complete(&hyp_clock->ready); 103b2288891SVincent Donnefort 104b2288891SVincent Donnefort resched: 105b2288891SVincent Donnefort schedule_delayed_work(&hyp_clock->work, 106b2288891SVincent Donnefort msecs_to_jiffies(CLOCK_UPDATE_MS)); 107b2288891SVincent Donnefort } 108b2288891SVincent Donnefort 109b2288891SVincent Donnefort static void hyp_trace_clock_enable(struct hyp_trace_clock *hyp_clock, bool enable) 110b2288891SVincent Donnefort { 111b2288891SVincent Donnefort struct system_time_snapshot snap; 112b2288891SVincent Donnefort 113b2288891SVincent Donnefort if (hyp_clock->running == enable) 114b2288891SVincent Donnefort return; 115b2288891SVincent Donnefort 116b2288891SVincent Donnefort if (!enable) { 117b2288891SVincent Donnefort cancel_delayed_work_sync(&hyp_clock->work); 118b2288891SVincent Donnefort hyp_clock->running = false; 119b2288891SVincent Donnefort } 120b2288891SVincent Donnefort 121b2288891SVincent Donnefort ktime_get_snapshot(&snap); 122b2288891SVincent Donnefort 123b2288891SVincent Donnefort hyp_clock->boot = ktime_to_ns(snap.boot); 124b2288891SVincent Donnefort hyp_clock->cycles = snap.cycles; 125b2288891SVincent Donnefort hyp_clock->mult = 0; 126b2288891SVincent Donnefort 127b2288891SVincent Donnefort init_completion(&hyp_clock->ready); 128b2288891SVincent Donnefort INIT_DELAYED_WORK(&hyp_clock->work, __hyp_clock_work); 129b2288891SVincent Donnefort schedule_delayed_work(&hyp_clock->work, msecs_to_jiffies(CLOCK_INIT_MS)); 130b2288891SVincent Donnefort wait_for_completion(&hyp_clock->ready); 131b2288891SVincent Donnefort hyp_clock->running = true; 132b2288891SVincent Donnefort } 133b2288891SVincent Donnefort 1343aed038aSVincent Donnefort /* Access to this struct within the trace_remote_callbacks are protected by the trace_remote lock */ 1353aed038aSVincent Donnefort static struct hyp_trace_buffer { 1363aed038aSVincent Donnefort struct hyp_trace_desc *desc; 1373aed038aSVincent Donnefort size_t desc_size; 1383aed038aSVincent Donnefort } trace_buffer; 1393aed038aSVincent Donnefort 1403aed038aSVincent Donnefort static int __map_hyp(void *start, size_t size) 1413aed038aSVincent Donnefort { 1423aed038aSVincent Donnefort if (is_protected_kvm_enabled()) 1433aed038aSVincent Donnefort return 0; 1443aed038aSVincent Donnefort 1453aed038aSVincent Donnefort return create_hyp_mappings(start, start + size, PAGE_HYP); 1463aed038aSVincent Donnefort } 1473aed038aSVincent Donnefort 1483aed038aSVincent Donnefort static int __share_page(unsigned long va) 1493aed038aSVincent Donnefort { 1503aed038aSVincent Donnefort return kvm_share_hyp((void *)va, (void *)va + 1); 1513aed038aSVincent Donnefort } 1523aed038aSVincent Donnefort 1533aed038aSVincent Donnefort static void __unshare_page(unsigned long va) 1543aed038aSVincent Donnefort { 1553aed038aSVincent Donnefort kvm_unshare_hyp((void *)va, (void *)va + 1); 1563aed038aSVincent Donnefort } 1573aed038aSVincent Donnefort 1583aed038aSVincent Donnefort static int hyp_trace_buffer_alloc_bpages_backing(struct hyp_trace_buffer *trace_buffer, size_t size) 1593aed038aSVincent Donnefort { 1603aed038aSVincent Donnefort int nr_bpages = (PAGE_ALIGN(size) / PAGE_SIZE) + 1; 1613aed038aSVincent Donnefort size_t backing_size; 1623aed038aSVincent Donnefort void *start; 1633aed038aSVincent Donnefort 1643aed038aSVincent Donnefort backing_size = PAGE_ALIGN(sizeof(struct simple_buffer_page) * nr_bpages * 1653aed038aSVincent Donnefort num_possible_cpus()); 1663aed038aSVincent Donnefort 1673aed038aSVincent Donnefort start = alloc_pages_exact(backing_size, GFP_KERNEL_ACCOUNT); 1683aed038aSVincent Donnefort if (!start) 1693aed038aSVincent Donnefort return -ENOMEM; 1703aed038aSVincent Donnefort 1713aed038aSVincent Donnefort trace_buffer->desc->bpages_backing_start = (unsigned long)start; 1723aed038aSVincent Donnefort trace_buffer->desc->bpages_backing_size = backing_size; 1733aed038aSVincent Donnefort 1743aed038aSVincent Donnefort return __map_hyp(start, backing_size); 1753aed038aSVincent Donnefort } 1763aed038aSVincent Donnefort 1773aed038aSVincent Donnefort static void hyp_trace_buffer_free_bpages_backing(struct hyp_trace_buffer *trace_buffer) 1783aed038aSVincent Donnefort { 1793aed038aSVincent Donnefort free_pages_exact((void *)trace_buffer->desc->bpages_backing_start, 1803aed038aSVincent Donnefort trace_buffer->desc->bpages_backing_size); 1813aed038aSVincent Donnefort } 1823aed038aSVincent Donnefort 1833aed038aSVincent Donnefort static void hyp_trace_buffer_unshare_hyp(struct hyp_trace_buffer *trace_buffer, int last_cpu) 1843aed038aSVincent Donnefort { 1853aed038aSVincent Donnefort struct ring_buffer_desc *rb_desc; 1863aed038aSVincent Donnefort int cpu, p; 1873aed038aSVincent Donnefort 1883aed038aSVincent Donnefort for_each_ring_buffer_desc(rb_desc, cpu, &trace_buffer->desc->trace_buffer_desc) { 1893aed038aSVincent Donnefort if (cpu > last_cpu) 1903aed038aSVincent Donnefort break; 1913aed038aSVincent Donnefort 1923aed038aSVincent Donnefort __share_page(rb_desc->meta_va); 1933aed038aSVincent Donnefort for (p = 0; p < rb_desc->nr_page_va; p++) 1943aed038aSVincent Donnefort __unshare_page(rb_desc->page_va[p]); 1953aed038aSVincent Donnefort } 1963aed038aSVincent Donnefort } 1973aed038aSVincent Donnefort 1983aed038aSVincent Donnefort static int hyp_trace_buffer_share_hyp(struct hyp_trace_buffer *trace_buffer) 1993aed038aSVincent Donnefort { 2003aed038aSVincent Donnefort struct ring_buffer_desc *rb_desc; 2013aed038aSVincent Donnefort int cpu, p, ret = 0; 2023aed038aSVincent Donnefort 2033aed038aSVincent Donnefort for_each_ring_buffer_desc(rb_desc, cpu, &trace_buffer->desc->trace_buffer_desc) { 2043aed038aSVincent Donnefort ret = __share_page(rb_desc->meta_va); 2053aed038aSVincent Donnefort if (ret) 2063aed038aSVincent Donnefort break; 2073aed038aSVincent Donnefort 2083aed038aSVincent Donnefort for (p = 0; p < rb_desc->nr_page_va; p++) { 2093aed038aSVincent Donnefort ret = __share_page(rb_desc->page_va[p]); 2103aed038aSVincent Donnefort if (ret) 2113aed038aSVincent Donnefort break; 2123aed038aSVincent Donnefort } 2133aed038aSVincent Donnefort 2143aed038aSVincent Donnefort if (ret) { 2153aed038aSVincent Donnefort for (p--; p >= 0; p--) 2163aed038aSVincent Donnefort __unshare_page(rb_desc->page_va[p]); 2173aed038aSVincent Donnefort break; 2183aed038aSVincent Donnefort } 2193aed038aSVincent Donnefort } 2203aed038aSVincent Donnefort 2213aed038aSVincent Donnefort if (ret) 2223aed038aSVincent Donnefort hyp_trace_buffer_unshare_hyp(trace_buffer, cpu--); 2233aed038aSVincent Donnefort 2243aed038aSVincent Donnefort return ret; 2253aed038aSVincent Donnefort } 2263aed038aSVincent Donnefort 2273aed038aSVincent Donnefort static struct trace_buffer_desc *hyp_trace_load(unsigned long size, void *priv) 2283aed038aSVincent Donnefort { 2293aed038aSVincent Donnefort struct hyp_trace_buffer *trace_buffer = priv; 2303aed038aSVincent Donnefort struct hyp_trace_desc *desc; 2313aed038aSVincent Donnefort size_t desc_size; 2323aed038aSVincent Donnefort int ret; 2333aed038aSVincent Donnefort 2343aed038aSVincent Donnefort if (WARN_ON(trace_buffer->desc)) 2353aed038aSVincent Donnefort return ERR_PTR(-EINVAL); 2363aed038aSVincent Donnefort 2373aed038aSVincent Donnefort desc_size = trace_buffer_desc_size(size, num_possible_cpus()); 2383aed038aSVincent Donnefort if (desc_size == SIZE_MAX) 2393aed038aSVincent Donnefort return ERR_PTR(-E2BIG); 2403aed038aSVincent Donnefort 2413aed038aSVincent Donnefort desc_size = PAGE_ALIGN(desc_size); 2423aed038aSVincent Donnefort desc = (struct hyp_trace_desc *)alloc_pages_exact(desc_size, GFP_KERNEL); 2433aed038aSVincent Donnefort if (!desc) 2443aed038aSVincent Donnefort return ERR_PTR(-ENOMEM); 2453aed038aSVincent Donnefort 2463aed038aSVincent Donnefort ret = __map_hyp(desc, desc_size); 2473aed038aSVincent Donnefort if (ret) 2483aed038aSVincent Donnefort goto err_free_desc; 2493aed038aSVincent Donnefort 2503aed038aSVincent Donnefort trace_buffer->desc = desc; 2513aed038aSVincent Donnefort 2523aed038aSVincent Donnefort ret = hyp_trace_buffer_alloc_bpages_backing(trace_buffer, size); 2533aed038aSVincent Donnefort if (ret) 2543aed038aSVincent Donnefort goto err_free_desc; 2553aed038aSVincent Donnefort 2563aed038aSVincent Donnefort ret = trace_remote_alloc_buffer(&desc->trace_buffer_desc, desc_size, size, 2573aed038aSVincent Donnefort cpu_possible_mask); 2583aed038aSVincent Donnefort if (ret) 2593aed038aSVincent Donnefort goto err_free_backing; 2603aed038aSVincent Donnefort 2613aed038aSVincent Donnefort ret = hyp_trace_buffer_share_hyp(trace_buffer); 2623aed038aSVincent Donnefort if (ret) 2633aed038aSVincent Donnefort goto err_free_buffer; 2643aed038aSVincent Donnefort 2653aed038aSVincent Donnefort ret = kvm_call_hyp_nvhe(__tracing_load, (unsigned long)desc, desc_size); 2663aed038aSVincent Donnefort if (ret) 2673aed038aSVincent Donnefort goto err_unload_pages; 2683aed038aSVincent Donnefort 2693aed038aSVincent Donnefort return &desc->trace_buffer_desc; 2703aed038aSVincent Donnefort 2713aed038aSVincent Donnefort err_unload_pages: 2723aed038aSVincent Donnefort hyp_trace_buffer_unshare_hyp(trace_buffer, INT_MAX); 2733aed038aSVincent Donnefort 2743aed038aSVincent Donnefort err_free_buffer: 2753aed038aSVincent Donnefort trace_remote_free_buffer(&desc->trace_buffer_desc); 2763aed038aSVincent Donnefort 2773aed038aSVincent Donnefort err_free_backing: 2783aed038aSVincent Donnefort hyp_trace_buffer_free_bpages_backing(trace_buffer); 2793aed038aSVincent Donnefort 2803aed038aSVincent Donnefort err_free_desc: 2813aed038aSVincent Donnefort free_pages_exact(desc, desc_size); 2823aed038aSVincent Donnefort trace_buffer->desc = NULL; 2833aed038aSVincent Donnefort 2843aed038aSVincent Donnefort return ERR_PTR(ret); 2853aed038aSVincent Donnefort } 2863aed038aSVincent Donnefort 2873aed038aSVincent Donnefort static void hyp_trace_unload(struct trace_buffer_desc *desc, void *priv) 2883aed038aSVincent Donnefort { 2893aed038aSVincent Donnefort struct hyp_trace_buffer *trace_buffer = priv; 2903aed038aSVincent Donnefort 2913aed038aSVincent Donnefort if (WARN_ON(desc != &trace_buffer->desc->trace_buffer_desc)) 2923aed038aSVincent Donnefort return; 2933aed038aSVincent Donnefort 2943aed038aSVincent Donnefort kvm_call_hyp_nvhe(__tracing_unload); 2953aed038aSVincent Donnefort hyp_trace_buffer_unshare_hyp(trace_buffer, INT_MAX); 2963aed038aSVincent Donnefort trace_remote_free_buffer(desc); 2973aed038aSVincent Donnefort hyp_trace_buffer_free_bpages_backing(trace_buffer); 2983aed038aSVincent Donnefort free_pages_exact(trace_buffer->desc, trace_buffer->desc_size); 2993aed038aSVincent Donnefort trace_buffer->desc = NULL; 3003aed038aSVincent Donnefort } 3013aed038aSVincent Donnefort 3023aed038aSVincent Donnefort static int hyp_trace_enable_tracing(bool enable, void *priv) 3033aed038aSVincent Donnefort { 304b2288891SVincent Donnefort hyp_trace_clock_enable(&hyp_clock, enable); 305b2288891SVincent Donnefort 3063aed038aSVincent Donnefort return kvm_call_hyp_nvhe(__tracing_enable, enable); 3073aed038aSVincent Donnefort } 3083aed038aSVincent Donnefort 3093aed038aSVincent Donnefort static int hyp_trace_swap_reader_page(unsigned int cpu, void *priv) 3103aed038aSVincent Donnefort { 3113aed038aSVincent Donnefort return kvm_call_hyp_nvhe(__tracing_swap_reader, cpu); 3123aed038aSVincent Donnefort } 3133aed038aSVincent Donnefort 3143aed038aSVincent Donnefort static int hyp_trace_reset(unsigned int cpu, void *priv) 3153aed038aSVincent Donnefort { 3162194d317SVincent Donnefort return kvm_call_hyp_nvhe(__tracing_reset, cpu); 3173aed038aSVincent Donnefort } 3183aed038aSVincent Donnefort 3193aed038aSVincent Donnefort static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv) 3203aed038aSVincent Donnefort { 3210a90fbc8SVincent Donnefort struct hyp_event_id *event_id = lm_alias(&__hyp_event_ids_start[id]); 3220a90fbc8SVincent Donnefort struct page *page; 3230a90fbc8SVincent Donnefort atomic_t *enabled; 3240a90fbc8SVincent Donnefort void *map; 3250a90fbc8SVincent Donnefort 3260a90fbc8SVincent Donnefort if (is_protected_kvm_enabled()) 3270a90fbc8SVincent Donnefort return kvm_call_hyp_nvhe(__tracing_enable_event, id, enable); 3280a90fbc8SVincent Donnefort 3290a90fbc8SVincent Donnefort enabled = &event_id->enabled; 3300a90fbc8SVincent Donnefort page = virt_to_page(enabled); 3310a90fbc8SVincent Donnefort map = vmap(&page, 1, VM_MAP, PAGE_KERNEL); 3320a90fbc8SVincent Donnefort if (!map) 3330a90fbc8SVincent Donnefort return -ENOMEM; 3340a90fbc8SVincent Donnefort 3350a90fbc8SVincent Donnefort enabled = map + offset_in_page(enabled); 3360a90fbc8SVincent Donnefort atomic_set(enabled, enable); 3370a90fbc8SVincent Donnefort 3380a90fbc8SVincent Donnefort vunmap(map); 3390a90fbc8SVincent Donnefort 3403aed038aSVincent Donnefort return 0; 3413aed038aSVincent Donnefort } 3423aed038aSVincent Donnefort 343b2288891SVincent Donnefort static int hyp_trace_clock_show(struct seq_file *m, void *v) 344b2288891SVincent Donnefort { 345b2288891SVincent Donnefort seq_puts(m, "[boot]\n"); 346b2288891SVincent Donnefort 347b2288891SVincent Donnefort return 0; 348b2288891SVincent Donnefort } 349b2288891SVincent Donnefort DEFINE_SHOW_ATTRIBUTE(hyp_trace_clock); 350b2288891SVincent Donnefort 3515bbbed42SVincent Donnefort static ssize_t hyp_trace_write_event_write(struct file *f, const char __user *ubuf, 3525bbbed42SVincent Donnefort size_t cnt, loff_t *pos) 3535bbbed42SVincent Donnefort { 3545bbbed42SVincent Donnefort unsigned long val; 3555bbbed42SVincent Donnefort int ret; 3565bbbed42SVincent Donnefort 3575bbbed42SVincent Donnefort ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3585bbbed42SVincent Donnefort if (ret) 3595bbbed42SVincent Donnefort return ret; 3605bbbed42SVincent Donnefort 3615bbbed42SVincent Donnefort kvm_call_hyp_nvhe(__tracing_write_event, val); 3625bbbed42SVincent Donnefort 3635bbbed42SVincent Donnefort return cnt; 3645bbbed42SVincent Donnefort } 3655bbbed42SVincent Donnefort 3665bbbed42SVincent Donnefort static const struct file_operations hyp_trace_write_event_fops = { 3675bbbed42SVincent Donnefort .write = hyp_trace_write_event_write, 3685bbbed42SVincent Donnefort }; 3695bbbed42SVincent Donnefort 370b2288891SVincent Donnefort static int hyp_trace_init_tracefs(struct dentry *d, void *priv) 371b2288891SVincent Donnefort { 3725bbbed42SVincent Donnefort if (!tracefs_create_file("write_event", 0200, d, NULL, &hyp_trace_write_event_fops)) 3735bbbed42SVincent Donnefort return -ENOMEM; 3745bbbed42SVincent Donnefort 375b2288891SVincent Donnefort return tracefs_create_file("trace_clock", 0440, d, NULL, &hyp_trace_clock_fops) ? 376b2288891SVincent Donnefort 0 : -ENOMEM; 377b2288891SVincent Donnefort } 378b2288891SVincent Donnefort 3793aed038aSVincent Donnefort static struct trace_remote_callbacks trace_remote_callbacks = { 380b2288891SVincent Donnefort .init = hyp_trace_init_tracefs, 3813aed038aSVincent Donnefort .load_trace_buffer = hyp_trace_load, 3823aed038aSVincent Donnefort .unload_trace_buffer = hyp_trace_unload, 3833aed038aSVincent Donnefort .enable_tracing = hyp_trace_enable_tracing, 3843aed038aSVincent Donnefort .swap_reader_page = hyp_trace_swap_reader_page, 3853aed038aSVincent Donnefort .reset = hyp_trace_reset, 3863aed038aSVincent Donnefort .enable_event = hyp_trace_enable_event, 3873aed038aSVincent Donnefort }; 3883aed038aSVincent Donnefort 389696dfec2SVincent Donnefort static const char *__hyp_enter_exit_reason_str(u8 reason); 390696dfec2SVincent Donnefort 3910a90fbc8SVincent Donnefort #include <asm/kvm_define_hypevents.h> 3920a90fbc8SVincent Donnefort 393696dfec2SVincent Donnefort static const char *__hyp_enter_exit_reason_str(u8 reason) 394696dfec2SVincent Donnefort { 395696dfec2SVincent Donnefort static const char strs[][12] = { 396696dfec2SVincent Donnefort "smc", 397696dfec2SVincent Donnefort "hvc", 398696dfec2SVincent Donnefort "psci", 399696dfec2SVincent Donnefort "host_abort", 400696dfec2SVincent Donnefort "guest_exit", 401696dfec2SVincent Donnefort "eret_host", 402696dfec2SVincent Donnefort "eret_guest", 403696dfec2SVincent Donnefort "unknown", 404696dfec2SVincent Donnefort }; 405696dfec2SVincent Donnefort 406696dfec2SVincent Donnefort return strs[min(reason, HYP_REASON_UNKNOWN)]; 407696dfec2SVincent Donnefort } 408696dfec2SVincent Donnefort 4090a90fbc8SVincent Donnefort static void __init hyp_trace_init_events(void) 4100a90fbc8SVincent Donnefort { 4110a90fbc8SVincent Donnefort struct hyp_event_id *hyp_event_id = __hyp_event_ids_start; 4120a90fbc8SVincent Donnefort struct remote_event *event = __hyp_events_start; 4130a90fbc8SVincent Donnefort int id = 0; 4140a90fbc8SVincent Donnefort 4150a90fbc8SVincent Donnefort /* Events on both sides hypervisor are sorted */ 4160a90fbc8SVincent Donnefort for (; event < __hyp_events_end; event++, hyp_event_id++, id++) 4170a90fbc8SVincent Donnefort event->id = hyp_event_id->id = id; 4180a90fbc8SVincent Donnefort } 4190a90fbc8SVincent Donnefort 4203aed038aSVincent Donnefort int __init kvm_hyp_trace_init(void) 4213aed038aSVincent Donnefort { 422b2288891SVincent Donnefort int cpu; 423b2288891SVincent Donnefort 4243aed038aSVincent Donnefort if (is_kernel_in_hyp_mode()) 4253aed038aSVincent Donnefort return 0; 4263aed038aSVincent Donnefort 427b2288891SVincent Donnefort for_each_possible_cpu(cpu) { 428b2288891SVincent Donnefort const struct arch_timer_erratum_workaround *wa = 429b2288891SVincent Donnefort per_cpu(timer_unstable_counter_workaround, cpu); 430b2288891SVincent Donnefort 431*8510d054SArnd Bergmann if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND) && 432*8510d054SArnd Bergmann wa && wa->read_cntvct_el0) { 433b2288891SVincent Donnefort pr_warn("hyp trace can't handle CNTVCT workaround '%s'\n", wa->desc); 434b2288891SVincent Donnefort return -EOPNOTSUPP; 435b2288891SVincent Donnefort } 436b2288891SVincent Donnefort } 437b2288891SVincent Donnefort 4380a90fbc8SVincent Donnefort hyp_trace_init_events(); 4390a90fbc8SVincent Donnefort 4400a90fbc8SVincent Donnefort return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer, 4410a90fbc8SVincent Donnefort __hyp_events_start, __hyp_events_end - __hyp_events_start); 4423aed038aSVincent Donnefort } 443