Lines Matching +full:read +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005-2007 Red Hat GmbH
17 #include <linux/delay.h>
19 #include <linux/device-mapper.h>
21 #define DM_MSG_PREFIX "delay"
28 unsigned int delay; member
43 struct delay_class read; member
61 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios); in handle_delayed_timer()
66 timer_reduce(&dc->delay_timer, expires); in queue_timeout()
71 return !!dc->worker; in delay_is_fast()
79 n = bio->bi_next; in flush_bios()
80 bio->bi_next = NULL; in flush_bios()
95 mutex_lock(&dc->process_bios_lock); in flush_delayed_bios()
96 spin_lock(&dc->delayed_bios_lock); in flush_delayed_bios()
97 list_replace_init(&dc->delayed_bios, &local_list); in flush_delayed_bios()
98 spin_unlock(&dc->delayed_bios_lock); in flush_delayed_bios()
101 if (flush_all || time_after_eq(jiffies, delayed->expires)) { in flush_delayed_bios()
104 list_del(&delayed->list); in flush_delayed_bios()
106 delayed->class->ops--; in flush_delayed_bios()
113 next_expires = delayed->expires; in flush_delayed_bios()
115 next_expires = min(next_expires, delayed->expires); in flush_delayed_bios()
119 spin_lock(&dc->delayed_bios_lock); in flush_delayed_bios()
120 list_splice(&local_list, &dc->delayed_bios); in flush_delayed_bios()
121 spin_unlock(&dc->delayed_bios_lock); in flush_delayed_bios()
122 mutex_unlock(&dc->process_bios_lock); in flush_delayed_bios()
136 spin_lock(&dc->delayed_bios_lock); in flush_worker_fn()
137 if (unlikely(list_empty(&dc->delayed_bios))) { in flush_worker_fn()
139 spin_unlock(&dc->delayed_bios_lock); in flush_worker_fn()
142 spin_unlock(&dc->delayed_bios_lock); in flush_worker_fn()
143 fsleep(dc->worker_sleep_us); in flush_worker_fn()
161 struct delay_c *dc = ti->private; in delay_dtr()
163 if (dc->kdelayd_wq) { in delay_dtr()
164 timer_shutdown_sync(&dc->delay_timer); in delay_dtr()
165 destroy_workqueue(dc->kdelayd_wq); in delay_dtr()
168 if (dc->read.dev) in delay_dtr()
169 dm_put_device(ti, dc->read.dev); in delay_dtr()
170 if (dc->write.dev) in delay_dtr()
171 dm_put_device(ti, dc->write.dev); in delay_dtr()
172 if (dc->flush.dev) in delay_dtr()
173 dm_put_device(ti, dc->flush.dev); in delay_dtr()
174 if (dc->worker) in delay_dtr()
175 kthread_stop(dc->worker); in delay_dtr()
177 mutex_destroy(&dc->process_bios_lock); in delay_dtr()
189 ti->error = "Invalid device sector"; in delay_class_ctr()
190 return -EINVAL; in delay_class_ctr()
192 c->start = tmpll; in delay_class_ctr()
194 if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) { in delay_class_ctr()
195 ti->error = "Invalid delay"; in delay_class_ctr()
196 return -EINVAL; in delay_class_ctr()
199 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev); in delay_class_ctr()
201 ti->error = "Device lookup failed"; in delay_class_ctr()
210 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
223 ti->error = "Requires exactly 3, 6 or 9 arguments"; in delay_ctr()
224 return -EINVAL; in delay_ctr()
229 ti->error = "Cannot allocate context"; in delay_ctr()
230 return -ENOMEM; in delay_ctr()
233 ti->private = dc; in delay_ctr()
234 INIT_LIST_HEAD(&dc->delayed_bios); in delay_ctr()
235 mutex_init(&dc->process_bios_lock); in delay_ctr()
236 spin_lock_init(&dc->delayed_bios_lock); in delay_ctr()
237 dc->may_delay = true; in delay_ctr()
238 dc->argc = argc; in delay_ctr()
240 ret = delay_class_ctr(ti, &dc->read, argv); in delay_ctr()
243 min_delay = max_delay = dc->read.delay; in delay_ctr()
246 ret = delay_class_ctr(ti, &dc->write, argv); in delay_ctr()
249 ret = delay_class_ctr(ti, &dc->flush, argv); in delay_ctr()
255 ret = delay_class_ctr(ti, &dc->write, argv + 3); in delay_ctr()
258 max_delay = max(max_delay, dc->write.delay); in delay_ctr()
259 min_delay = min_not_zero(min_delay, dc->write.delay); in delay_ctr()
262 ret = delay_class_ctr(ti, &dc->flush, argv + 3); in delay_ctr()
268 ret = delay_class_ctr(ti, &dc->flush, argv + 6); in delay_ctr()
271 max_delay = max(max_delay, dc->flush.delay); in delay_ctr()
272 min_delay = min_not_zero(min_delay, dc->flush.delay); in delay_ctr()
277 dc->worker_sleep_us = 1000; in delay_ctr()
279 dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT; in delay_ctr()
284 dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker"); in delay_ctr()
285 if (IS_ERR(dc->worker)) { in delay_ctr()
286 ret = PTR_ERR(dc->worker); in delay_ctr()
287 dc->worker = NULL; in delay_ctr()
291 timer_setup(&dc->delay_timer, handle_delayed_timer, 0); in delay_ctr()
292 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); in delay_ctr()
293 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); in delay_ctr()
294 if (!dc->kdelayd_wq) { in delay_ctr()
295 ret = -EINVAL; in delay_ctr()
301 ti->num_flush_bios = 1; in delay_ctr()
302 ti->num_discard_bios = 1; in delay_ctr()
303 ti->accounts_remapped_io = true; in delay_ctr()
304 ti->per_io_data_size = sizeof(struct dm_delay_info); in delay_ctr()
317 if (!c->delay) in delay_bio()
322 delayed->context = dc; in delay_bio()
323 delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); in delay_bio()
325 spin_lock(&dc->delayed_bios_lock); in delay_bio()
326 if (unlikely(!dc->may_delay)) { in delay_bio()
327 spin_unlock(&dc->delayed_bios_lock); in delay_bio()
330 c->ops++; in delay_bio()
331 list_add_tail(&delayed->list, &dc->delayed_bios); in delay_bio()
332 spin_unlock(&dc->delayed_bios_lock); in delay_bio()
335 wake_up_process(dc->worker); in delay_bio()
344 struct delay_c *dc = ti->private; in delay_presuspend()
346 spin_lock(&dc->delayed_bios_lock); in delay_presuspend()
347 dc->may_delay = false; in delay_presuspend()
348 spin_unlock(&dc->delayed_bios_lock); in delay_presuspend()
351 timer_delete(&dc->delay_timer); in delay_presuspend()
357 struct delay_c *dc = ti->private; in delay_resume()
359 dc->may_delay = true; in delay_resume()
364 struct delay_c *dc = ti->private; in delay_map()
369 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) in delay_map()
370 c = &dc->flush; in delay_map()
372 c = &dc->write; in delay_map()
374 c = &dc->read; in delay_map()
376 delayed->class = c; in delay_map()
377 bio_set_dev(bio, c->dev->bdev); in delay_map()
378 bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
387 struct delay_c *dc = ti->private; in delay_report_zones()
388 struct delay_class *c = &dc->read; in delay_report_zones()
390 return dm_report_zones(c->dev->bdev, c->start, in delay_report_zones()
391 c->start + dm_target_offset(ti, args->next_sector), in delay_report_zones()
399 DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
404 struct delay_c *dc = ti->private; in delay_status()
409 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops); in delay_status()
413 DMEMIT_DELAY_CLASS(&dc->read); in delay_status()
414 if (dc->argc >= 6) { in delay_status()
416 DMEMIT_DELAY_CLASS(&dc->write); in delay_status()
418 if (dc->argc >= 9) { in delay_status()
420 DMEMIT_DELAY_CLASS(&dc->flush); in delay_status()
433 struct delay_c *dc = ti->private; in delay_iterate_devices()
436 ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data); in delay_iterate_devices()
439 ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data); in delay_iterate_devices()
442 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data); in delay_iterate_devices()
451 .name = "delay",
464 module_dm(delay);
466 MODULE_DESCRIPTION(DM_NAME " delay target");