xref: /linux/fs/bcachefs/clock.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "clock.h"
4 
5 #include <linux/freezer.h>
6 #include <linux/kthread.h>
7 #include <linux/preempt.h>
8 
9 static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
10 {
11 	struct io_timer **_l = (struct io_timer **)l;
12 	struct io_timer **_r = (struct io_timer **)r;
13 
14 	return (*_l)->expire < (*_r)->expire;
15 }
16 
17 static inline void io_timer_swp(void *l, void *r, void __always_unused *args)
18 {
19 	struct io_timer **_l = (struct io_timer **)l;
20 	struct io_timer **_r = (struct io_timer **)r;
21 
22 	swap(*_l, *_r);
23 }
24 
25 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
26 {
27 	const struct min_heap_callbacks callbacks = {
28 		.less = io_timer_cmp,
29 		.swp = io_timer_swp,
30 	};
31 
32 	spin_lock(&clock->timer_lock);
33 
34 	if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
35 		spin_unlock(&clock->timer_lock);
36 		timer->fn(timer);
37 		return;
38 	}
39 
40 	for (size_t i = 0; i < clock->timers.nr; i++)
41 		if (clock->timers.data[i] == timer)
42 			goto out;
43 
44 	BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
45 out:
46 	spin_unlock(&clock->timer_lock);
47 }
48 
49 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
50 {
51 	const struct min_heap_callbacks callbacks = {
52 		.less = io_timer_cmp,
53 		.swp = io_timer_swp,
54 	};
55 
56 	spin_lock(&clock->timer_lock);
57 
58 	for (size_t i = 0; i < clock->timers.nr; i++)
59 		if (clock->timers.data[i] == timer) {
60 			min_heap_del(&clock->timers, i, &callbacks, NULL);
61 			break;
62 		}
63 
64 	spin_unlock(&clock->timer_lock);
65 }
66 
67 struct io_clock_wait {
68 	struct io_timer		io_timer;
69 	struct timer_list	cpu_timer;
70 	struct task_struct	*task;
71 	int			expired;
72 };
73 
74 static void io_clock_wait_fn(struct io_timer *timer)
75 {
76 	struct io_clock_wait *wait = container_of(timer,
77 				struct io_clock_wait, io_timer);
78 
79 	wait->expired = 1;
80 	wake_up_process(wait->task);
81 }
82 
83 static void io_clock_cpu_timeout(struct timer_list *timer)
84 {
85 	struct io_clock_wait *wait = container_of(timer,
86 				struct io_clock_wait, cpu_timer);
87 
88 	wait->expired = 1;
89 	wake_up_process(wait->task);
90 }
91 
92 void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until)
93 {
94 	struct io_clock_wait wait = {
95 		.io_timer.expire	= until,
96 		.io_timer.fn		= io_clock_wait_fn,
97 		.io_timer.fn2		= (void *) _RET_IP_,
98 		.task			= current,
99 	};
100 
101 	bch2_io_timer_add(clock, &wait.io_timer);
102 	schedule();
103 	bch2_io_timer_del(clock, &wait.io_timer);
104 }
105 
106 void bch2_kthread_io_clock_wait(struct io_clock *clock,
107 				u64 io_until, unsigned long cpu_timeout)
108 {
109 	bool kthread = (current->flags & PF_KTHREAD) != 0;
110 	struct io_clock_wait wait = {
111 		.io_timer.expire	= io_until,
112 		.io_timer.fn		= io_clock_wait_fn,
113 		.io_timer.fn2		= (void *) _RET_IP_,
114 		.task			= current,
115 	};
116 
117 	bch2_io_timer_add(clock, &wait.io_timer);
118 
119 	timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
120 
121 	if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
122 		mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
123 
124 	do {
125 		set_current_state(TASK_INTERRUPTIBLE);
126 		if (kthread && kthread_should_stop())
127 			break;
128 
129 		if (wait.expired)
130 			break;
131 
132 		schedule();
133 		try_to_freeze();
134 	} while (0);
135 
136 	__set_current_state(TASK_RUNNING);
137 	del_timer_sync(&wait.cpu_timer);
138 	destroy_timer_on_stack(&wait.cpu_timer);
139 	bch2_io_timer_del(clock, &wait.io_timer);
140 }
141 
142 static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
143 {
144 	struct io_timer *ret = NULL;
145 	const struct min_heap_callbacks callbacks = {
146 		.less = io_timer_cmp,
147 		.swp = io_timer_swp,
148 	};
149 
150 	if (clock->timers.nr &&
151 	    time_after_eq64(now, clock->timers.data[0]->expire)) {
152 		ret = *min_heap_peek(&clock->timers);
153 		min_heap_pop(&clock->timers, &callbacks, NULL);
154 	}
155 
156 	return ret;
157 }
158 
159 void __bch2_increment_clock(struct io_clock *clock, u64 sectors)
160 {
161 	struct io_timer *timer;
162 	u64 now = atomic64_add_return(sectors, &clock->now);
163 
164 	spin_lock(&clock->timer_lock);
165 	while ((timer = get_expired_timer(clock, now)))
166 		timer->fn(timer);
167 	spin_unlock(&clock->timer_lock);
168 }
169 
170 void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
171 {
172 	out->atomic++;
173 	spin_lock(&clock->timer_lock);
174 	u64 now = atomic64_read(&clock->now);
175 
176 	printbuf_tabstop_push(out, 40);
177 	prt_printf(out, "current time:\t%llu\n", now);
178 
179 	for (unsigned i = 0; i < clock->timers.nr; i++)
180 		prt_printf(out, "%ps %ps:\t%llu\n",
181 		       clock->timers.data[i]->fn,
182 		       clock->timers.data[i]->fn2,
183 		       clock->timers.data[i]->expire);
184 	spin_unlock(&clock->timer_lock);
185 	--out->atomic;
186 }
187 
188 void bch2_io_clock_exit(struct io_clock *clock)
189 {
190 	free_heap(&clock->timers);
191 	free_percpu(clock->pcpu_buf);
192 }
193 
194 int bch2_io_clock_init(struct io_clock *clock)
195 {
196 	atomic64_set(&clock->now, 0);
197 	spin_lock_init(&clock->timer_lock);
198 
199 	clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
200 
201 	clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
202 	if (!clock->pcpu_buf)
203 		return -BCH_ERR_ENOMEM_io_clock_init;
204 
205 	if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
206 		return -BCH_ERR_ENOMEM_io_clock_init;
207 
208 	return 0;
209 }
210