xref: /linux/fs/bcachefs/clock.c (revision 3e7819886281e077e82006fe4804b0d6b0f5643b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "clock.h"
4 
5 #include <linux/freezer.h>
6 #include <linux/kthread.h>
7 #include <linux/preempt.h>
8 
9 static inline long io_timer_cmp(io_timer_heap *h,
10 				struct io_timer *l,
11 				struct io_timer *r)
12 {
13 	return l->expire - r->expire;
14 }
15 
16 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
17 {
18 	size_t i;
19 
20 	spin_lock(&clock->timer_lock);
21 
22 	if (time_after_eq((unsigned long) atomic64_read(&clock->now),
23 			  timer->expire)) {
24 		spin_unlock(&clock->timer_lock);
25 		timer->fn(timer);
26 		return;
27 	}
28 
29 	for (i = 0; i < clock->timers.used; i++)
30 		if (clock->timers.data[i] == timer)
31 			goto out;
32 
33 	BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
34 out:
35 	spin_unlock(&clock->timer_lock);
36 }
37 
38 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
39 {
40 	size_t i;
41 
42 	spin_lock(&clock->timer_lock);
43 
44 	for (i = 0; i < clock->timers.used; i++)
45 		if (clock->timers.data[i] == timer) {
46 			heap_del(&clock->timers, i, io_timer_cmp, NULL);
47 			break;
48 		}
49 
50 	spin_unlock(&clock->timer_lock);
51 }
52 
53 struct io_clock_wait {
54 	struct io_timer		io_timer;
55 	struct timer_list	cpu_timer;
56 	struct task_struct	*task;
57 	int			expired;
58 };
59 
60 static void io_clock_wait_fn(struct io_timer *timer)
61 {
62 	struct io_clock_wait *wait = container_of(timer,
63 				struct io_clock_wait, io_timer);
64 
65 	wait->expired = 1;
66 	wake_up_process(wait->task);
67 }
68 
69 static void io_clock_cpu_timeout(struct timer_list *timer)
70 {
71 	struct io_clock_wait *wait = container_of(timer,
72 				struct io_clock_wait, cpu_timer);
73 
74 	wait->expired = 1;
75 	wake_up_process(wait->task);
76 }
77 
78 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
79 {
80 	struct io_clock_wait wait;
81 
82 	/* XXX: calculate sleep time rigorously */
83 	wait.io_timer.expire	= until;
84 	wait.io_timer.fn	= io_clock_wait_fn;
85 	wait.task		= current;
86 	wait.expired		= 0;
87 	bch2_io_timer_add(clock, &wait.io_timer);
88 
89 	schedule();
90 
91 	bch2_io_timer_del(clock, &wait.io_timer);
92 }
93 
94 void bch2_kthread_io_clock_wait(struct io_clock *clock,
95 				unsigned long io_until,
96 				unsigned long cpu_timeout)
97 {
98 	bool kthread = (current->flags & PF_KTHREAD) != 0;
99 	struct io_clock_wait wait;
100 
101 	wait.io_timer.expire	= io_until;
102 	wait.io_timer.fn	= io_clock_wait_fn;
103 	wait.task		= current;
104 	wait.expired		= 0;
105 	bch2_io_timer_add(clock, &wait.io_timer);
106 
107 	timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
108 
109 	if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
110 		mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
111 
112 	do {
113 		set_current_state(TASK_INTERRUPTIBLE);
114 		if (kthread && kthread_should_stop())
115 			break;
116 
117 		if (wait.expired)
118 			break;
119 
120 		schedule();
121 		try_to_freeze();
122 	} while (0);
123 
124 	__set_current_state(TASK_RUNNING);
125 	del_timer_sync(&wait.cpu_timer);
126 	destroy_timer_on_stack(&wait.cpu_timer);
127 	bch2_io_timer_del(clock, &wait.io_timer);
128 }
129 
130 static struct io_timer *get_expired_timer(struct io_clock *clock,
131 					  unsigned long now)
132 {
133 	struct io_timer *ret = NULL;
134 
135 	if (clock->timers.used &&
136 	    time_after_eq(now, clock->timers.data[0]->expire))
137 		heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
138 	return ret;
139 }
140 
141 void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
142 {
143 	struct io_timer *timer;
144 	unsigned long now = atomic64_add_return(sectors, &clock->now);
145 
146 	spin_lock(&clock->timer_lock);
147 	while ((timer = get_expired_timer(clock, now)))
148 		timer->fn(timer);
149 	spin_unlock(&clock->timer_lock);
150 }
151 
152 void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
153 {
154 	unsigned long now;
155 	unsigned i;
156 
157 	out->atomic++;
158 	spin_lock(&clock->timer_lock);
159 	now = atomic64_read(&clock->now);
160 
161 	for (i = 0; i < clock->timers.used; i++)
162 		prt_printf(out, "%ps:\t%li\n",
163 		       clock->timers.data[i]->fn,
164 		       clock->timers.data[i]->expire - now);
165 	spin_unlock(&clock->timer_lock);
166 	--out->atomic;
167 }
168 
169 void bch2_io_clock_exit(struct io_clock *clock)
170 {
171 	free_heap(&clock->timers);
172 	free_percpu(clock->pcpu_buf);
173 }
174 
175 int bch2_io_clock_init(struct io_clock *clock)
176 {
177 	atomic64_set(&clock->now, 0);
178 	spin_lock_init(&clock->timer_lock);
179 
180 	clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
181 
182 	clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
183 	if (!clock->pcpu_buf)
184 		return -BCH_ERR_ENOMEM_io_clock_init;
185 
186 	if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
187 		return -BCH_ERR_ENOMEM_io_clock_init;
188 
189 	return 0;
190 }
191