xref: /linux/arch/loongarch/kernel/time.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common time service routines for LoongArch machines.
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #include <linux/clockchips.h>
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/sched_clock.h>
14 #include <linux/spinlock.h>
15 
16 #include <asm/cpu-features.h>
17 #include <asm/loongarch.h>
18 #include <asm/paravirt.h>
19 #include <asm/time.h>
20 
21 u64 cpu_clock_freq;
22 EXPORT_SYMBOL(cpu_clock_freq);
23 u64 const_clock_freq;
24 EXPORT_SYMBOL(const_clock_freq);
25 
26 static DEFINE_RAW_SPINLOCK(state_lock);
27 static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
28 
29 static void constant_event_handler(struct clock_event_device *dev)
30 {
31 }
32 
33 static irqreturn_t constant_timer_interrupt(int irq, void *data)
34 {
35 	int cpu = smp_processor_id();
36 	struct clock_event_device *cd;
37 
38 	/* Clear Timer Interrupt */
39 	write_csr_tintclear(CSR_TINTCLR_TI);
40 	cd = &per_cpu(constant_clockevent_device, cpu);
41 	cd->event_handler(cd);
42 
43 	return IRQ_HANDLED;
44 }
45 
46 static int constant_set_state_oneshot(struct clock_event_device *evt)
47 {
48 	unsigned long timer_config;
49 
50 	raw_spin_lock(&state_lock);
51 
52 	timer_config = csr_read64(LOONGARCH_CSR_TCFG);
53 	timer_config |= CSR_TCFG_EN;
54 	timer_config &= ~CSR_TCFG_PERIOD;
55 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
56 
57 	raw_spin_unlock(&state_lock);
58 
59 	return 0;
60 }
61 
62 static int constant_set_state_periodic(struct clock_event_device *evt)
63 {
64 	unsigned long period;
65 	unsigned long timer_config;
66 
67 	raw_spin_lock(&state_lock);
68 
69 	period = const_clock_freq / HZ;
70 	timer_config = period & CSR_TCFG_VAL;
71 	timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
72 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
73 
74 	raw_spin_unlock(&state_lock);
75 
76 	return 0;
77 }
78 
79 static int constant_set_state_shutdown(struct clock_event_device *evt)
80 {
81 	unsigned long timer_config;
82 
83 	raw_spin_lock(&state_lock);
84 
85 	timer_config = csr_read64(LOONGARCH_CSR_TCFG);
86 	timer_config &= ~CSR_TCFG_EN;
87 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
88 
89 	raw_spin_unlock(&state_lock);
90 
91 	return 0;
92 }
93 
94 static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
95 {
96 	unsigned long timer_config;
97 
98 	delta &= CSR_TCFG_VAL;
99 	timer_config = delta | CSR_TCFG_EN;
100 	csr_write64(timer_config, LOONGARCH_CSR_TCFG);
101 
102 	return 0;
103 }
104 
105 static unsigned long __init get_loops_per_jiffy(void)
106 {
107 	unsigned long lpj = (unsigned long)const_clock_freq;
108 
109 	do_div(lpj, HZ);
110 
111 	return lpj;
112 }
113 
114 static long init_offset __nosavedata;
115 
116 void save_counter(void)
117 {
118 	init_offset = drdtime();
119 }
120 
121 void sync_counter(void)
122 {
123 	/* Ensure counter begin at 0 */
124 	csr_write64(init_offset, LOONGARCH_CSR_CNTC);
125 }
126 
127 int constant_clockevent_init(void)
128 {
129 	unsigned int cpu = smp_processor_id();
130 	unsigned long min_delta = 0x600;
131 	unsigned long max_delta = (1UL << 48) - 1;
132 	struct clock_event_device *cd;
133 	static int irq = 0, timer_irq_installed = 0;
134 
135 	if (!timer_irq_installed) {
136 		irq = get_percpu_irq(INT_TI);
137 		if (irq < 0)
138 			pr_err("Failed to map irq %d (timer)\n", irq);
139 	}
140 
141 	cd = &per_cpu(constant_clockevent_device, cpu);
142 
143 	cd->name = "Constant";
144 	cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
145 
146 	cd->irq = irq;
147 	cd->rating = 320;
148 	cd->cpumask = cpumask_of(cpu);
149 	cd->set_state_oneshot = constant_set_state_oneshot;
150 	cd->set_state_oneshot_stopped = constant_set_state_shutdown;
151 	cd->set_state_periodic = constant_set_state_periodic;
152 	cd->set_state_shutdown = constant_set_state_shutdown;
153 	cd->set_next_event = constant_timer_next_event;
154 	cd->event_handler = constant_event_handler;
155 
156 	clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
157 
158 	if (timer_irq_installed)
159 		return 0;
160 
161 	timer_irq_installed = 1;
162 
163 	sync_counter();
164 
165 	if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
166 		pr_err("Failed to request irq %d (timer)\n", irq);
167 
168 	lpj_fine = get_loops_per_jiffy();
169 	pr_info("Constant clock event device register\n");
170 
171 	return 0;
172 }
173 
174 static u64 read_const_counter(struct clocksource *clk)
175 {
176 	return drdtime();
177 }
178 
179 static noinstr u64 sched_clock_read(void)
180 {
181 	return drdtime();
182 }
183 
184 static struct clocksource clocksource_const = {
185 	.name = "Constant",
186 	.rating = 400,
187 	.read = read_const_counter,
188 	.mask = CLOCKSOURCE_MASK(64),
189 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
190 	.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
191 };
192 
193 int __init constant_clocksource_init(void)
194 {
195 	int res;
196 	unsigned long freq = const_clock_freq;
197 
198 	res = clocksource_register_hz(&clocksource_const, freq);
199 
200 	sched_clock_register(sched_clock_read, 64, freq);
201 
202 	pr_info("Constant clock source device register\n");
203 
204 	return res;
205 }
206 
207 void __init time_init(void)
208 {
209 	if (!cpu_has_cpucfg)
210 		const_clock_freq = cpu_clock_freq;
211 	else
212 		const_clock_freq = calc_const_freq();
213 
214 	init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
215 
216 	constant_clockevent_init();
217 	constant_clocksource_init();
218 	pv_time_init();
219 }
220