dm-thin.c (6f47c7ae8c7afaf9ad291d39f0d3974f191a7946) dm-thin.c (0a94a469a4f02bdcc223517fd578810ffc21c548)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2012 Red Hat UK.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-thin-metadata.h"

--- 578 unchanged lines hidden (view full) ---

587 struct thin_c *tc;
588 struct dm_deferred_entry *shared_read_entry;
589 struct dm_deferred_entry *all_io_entry;
590 struct dm_thin_new_mapping *overwrite_mapping;
591 struct rb_node rb_node;
592 struct dm_bio_prison_cell *cell;
593};
594
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2012 Red Hat UK.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-thin-metadata.h"

--- 578 unchanged lines hidden (view full) ---

587 struct thin_c *tc;
588 struct dm_deferred_entry *shared_read_entry;
589 struct dm_deferred_entry *all_io_entry;
590 struct dm_thin_new_mapping *overwrite_mapping;
591 struct rb_node rb_node;
592 struct dm_bio_prison_cell *cell;
593};
594
595static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
596{
597 bio_list_merge(bios, master);
598 bio_list_init(master);
599}
600
601static void error_bio_list(struct bio_list *bios, blk_status_t error)
602{
603 struct bio *bio;
604
605 while ((bio = bio_list_pop(bios))) {
606 bio->bi_status = error;
607 bio_endio(bio);
608 }
609}
610
611static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
612 blk_status_t error)
613{
614 struct bio_list bios;
615
616 bio_list_init(&bios);
617
618 spin_lock_irq(&tc->lock);
595static void error_bio_list(struct bio_list *bios, blk_status_t error)
596{
597 struct bio *bio;
598
599 while ((bio = bio_list_pop(bios))) {
600 bio->bi_status = error;
601 bio_endio(bio);
602 }
603}
604
605static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
606 blk_status_t error)
607{
608 struct bio_list bios;
609
610 bio_list_init(&bios);
611
612 spin_lock_irq(&tc->lock);
619 __merge_bio_list(&bios, master);
613 bio_list_merge_init(&bios, master);
620 spin_unlock_irq(&tc->lock);
621
622 error_bio_list(&bios, error);
623}
624
625static void requeue_deferred_cells(struct thin_c *tc)
626{
627 struct pool *pool = tc->pool;

--- 12 unchanged lines hidden (view full) ---

640
641static void requeue_io(struct thin_c *tc)
642{
643 struct bio_list bios;
644
645 bio_list_init(&bios);
646
647 spin_lock_irq(&tc->lock);
614 spin_unlock_irq(&tc->lock);
615
616 error_bio_list(&bios, error);
617}
618
619static void requeue_deferred_cells(struct thin_c *tc)
620{
621 struct pool *pool = tc->pool;

--- 12 unchanged lines hidden (view full) ---

634
635static void requeue_io(struct thin_c *tc)
636{
637 struct bio_list bios;
638
639 bio_list_init(&bios);
640
641 spin_lock_irq(&tc->lock);
648 __merge_bio_list(&bios, &tc->deferred_bio_list);
649 __merge_bio_list(&bios, &tc->retry_on_resume_list);
642 bio_list_merge_init(&bios, &tc->deferred_bio_list);
643 bio_list_merge_init(&bios, &tc->retry_on_resume_list);
650 spin_unlock_irq(&tc->lock);
651
652 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
653 requeue_deferred_cells(tc);
654}
655
656static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
657{

--- 3422 unchanged lines hidden (view full) ---

4080
4081 /*
4082 * If the system-determined stacked limits are compatible with the
4083 * pool's blocksize (io_opt is a factor) do not override them.
4084 */
4085 if (io_opt_sectors < pool->sectors_per_block ||
4086 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
4087 if (is_factor(pool->sectors_per_block, limits->max_sectors))
644 spin_unlock_irq(&tc->lock);
645
646 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
647 requeue_deferred_cells(tc);
648}
649
650static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
651{

--- 3422 unchanged lines hidden (view full) ---

4074
4075 /*
4076 * If the system-determined stacked limits are compatible with the
4077 * pool's blocksize (io_opt is a factor) do not override them.
4078 */
4079 if (io_opt_sectors < pool->sectors_per_block ||
4080 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
4081 if (is_factor(pool->sectors_per_block, limits->max_sectors))
4088 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
4082 limits->io_min = limits->max_sectors << SECTOR_SHIFT;
4089 else
4083 else
4090 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
4091 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
4084 limits->io_min = pool->sectors_per_block << SECTOR_SHIFT;
4085 limits->io_opt = pool->sectors_per_block << SECTOR_SHIFT;
4092 }
4093
4094 /*
4095 * pt->adjusted_pf is a staging area for the actual features to use.
4096 * They get transferred to the live pool in bind_control_target()
4097 * called from pool_preresume().
4098 */
4099
4100 if (pt->adjusted_pf.discard_enabled) {
4101 disable_discard_passdown_if_not_supported(pt);
4102 if (!pt->adjusted_pf.discard_passdown)
4086 }
4087
4088 /*
4089 * pt->adjusted_pf is a staging area for the actual features to use.
4090 * They get transferred to the live pool in bind_control_target()
4091 * called from pool_preresume().
4092 */
4093
4094 if (pt->adjusted_pf.discard_enabled) {
4095 disable_discard_passdown_if_not_supported(pt);
4096 if (!pt->adjusted_pf.discard_passdown)
4103 limits->max_discard_sectors = 0;
4097 limits->max_hw_discard_sectors = 0;
4104 /*
4105 * The pool uses the same discard limits as the underlying data
4106 * device. DM core has already set this up.
4107 */
4108 } else {
4109 /*
4110 * Must explicitly disallow stacking discard limits otherwise the
4111 * block layer will stack them if pool's data device has support.

--- 380 unchanged lines hidden (view full) ---

4492
4493static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4494{
4495 struct thin_c *tc = ti->private;
4496 struct pool *pool = tc->pool;
4497
4498 if (pool->pf.discard_enabled) {
4499 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4098 /*
4099 * The pool uses the same discard limits as the underlying data
4100 * device. DM core has already set this up.
4101 */
4102 } else {
4103 /*
4104 * Must explicitly disallow stacking discard limits otherwise the
4105 * block layer will stack them if pool's data device has support.

--- 380 unchanged lines hidden (view full) ---

4486
4487static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4488{
4489 struct thin_c *tc = ti->private;
4490 struct pool *pool = tc->pool;
4491
4492 if (pool->pf.discard_enabled) {
4493 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4500 limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
4494 limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
4501 }
4502}
4503
4504static struct target_type thin_target = {
4505 .name = "thin",
4506 .version = {1, 23, 0},
4507 .module = THIS_MODULE,
4508 .ctr = thin_ctr,

--- 60 unchanged lines hidden ---
4495 }
4496}
4497
4498static struct target_type thin_target = {
4499 .name = "thin",
4500 .version = {1, 23, 0},
4501 .module = THIS_MODULE,
4502 .ctr = thin_ctr,

--- 60 unchanged lines hidden ---