xref: /linux/drivers/md/dm-vdo/packer.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
158a55a59SMatthew Sakai // SPDX-License-Identifier: GPL-2.0-only
258a55a59SMatthew Sakai /*
358a55a59SMatthew Sakai  * Copyright 2023 Red Hat
458a55a59SMatthew Sakai  */
558a55a59SMatthew Sakai 
658a55a59SMatthew Sakai #include "packer.h"
758a55a59SMatthew Sakai 
858a55a59SMatthew Sakai #include <linux/atomic.h>
958a55a59SMatthew Sakai #include <linux/blkdev.h>
1058a55a59SMatthew Sakai 
1158a55a59SMatthew Sakai #include "logger.h"
1258a55a59SMatthew Sakai #include "memory-alloc.h"
1358a55a59SMatthew Sakai #include "permassert.h"
1458a55a59SMatthew Sakai #include "string-utils.h"
1558a55a59SMatthew Sakai 
1658a55a59SMatthew Sakai #include "admin-state.h"
1758a55a59SMatthew Sakai #include "completion.h"
1858a55a59SMatthew Sakai #include "constants.h"
1958a55a59SMatthew Sakai #include "data-vio.h"
2058a55a59SMatthew Sakai #include "dedupe.h"
2158a55a59SMatthew Sakai #include "encodings.h"
2258a55a59SMatthew Sakai #include "io-submitter.h"
2358a55a59SMatthew Sakai #include "physical-zone.h"
2458a55a59SMatthew Sakai #include "status-codes.h"
2558a55a59SMatthew Sakai #include "vdo.h"
2658a55a59SMatthew Sakai #include "vio.h"
2758a55a59SMatthew Sakai 
2858a55a59SMatthew Sakai static const struct version_number COMPRESSED_BLOCK_1_0 = {
2958a55a59SMatthew Sakai 	.major_version = 1,
3058a55a59SMatthew Sakai 	.minor_version = 0,
3158a55a59SMatthew Sakai };
3258a55a59SMatthew Sakai 
336008d526SBruce Johnston #define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
3458a55a59SMatthew Sakai 
3558a55a59SMatthew Sakai /**
3658a55a59SMatthew Sakai  * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
3758a55a59SMatthew Sakai  *                                       block.
3858a55a59SMatthew Sakai  * @mapping_state [in] The mapping state for the look up.
3958a55a59SMatthew Sakai  * @compressed_block [in] The compressed block that was read from disk.
4058a55a59SMatthew Sakai  * @fragment_offset [out] The offset of the fragment within a compressed block.
4158a55a59SMatthew Sakai  * @fragment_size [out] The size of the fragment.
4258a55a59SMatthew Sakai  *
4358a55a59SMatthew Sakai  * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if
4458a55a59SMatthew Sakai  *         the fragment is invalid.
4558a55a59SMatthew Sakai  */
vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,struct compressed_block * block,u16 * fragment_offset,u16 * fragment_size)4658a55a59SMatthew Sakai int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
4758a55a59SMatthew Sakai 				      struct compressed_block *block,
4858a55a59SMatthew Sakai 				      u16 *fragment_offset, u16 *fragment_size)
4958a55a59SMatthew Sakai {
5058a55a59SMatthew Sakai 	u16 compressed_size;
5158a55a59SMatthew Sakai 	u16 offset = 0;
5258a55a59SMatthew Sakai 	unsigned int i;
5358a55a59SMatthew Sakai 	u8 slot;
5458a55a59SMatthew Sakai 	struct version_number version;
5558a55a59SMatthew Sakai 
5658a55a59SMatthew Sakai 	if (!vdo_is_state_compressed(mapping_state))
5758a55a59SMatthew Sakai 		return VDO_INVALID_FRAGMENT;
5858a55a59SMatthew Sakai 
5958a55a59SMatthew Sakai 	version = vdo_unpack_version_number(block->header.version);
6058a55a59SMatthew Sakai 	if (!vdo_are_same_version(version, COMPRESSED_BLOCK_1_0))
6158a55a59SMatthew Sakai 		return VDO_INVALID_FRAGMENT;
6258a55a59SMatthew Sakai 
6358a55a59SMatthew Sakai 	slot = mapping_state - VDO_MAPPING_STATE_COMPRESSED_BASE;
6458a55a59SMatthew Sakai 	if (slot >= VDO_MAX_COMPRESSION_SLOTS)
6558a55a59SMatthew Sakai 		return VDO_INVALID_FRAGMENT;
6658a55a59SMatthew Sakai 
6758a55a59SMatthew Sakai 	compressed_size = __le16_to_cpu(block->header.sizes[slot]);
6858a55a59SMatthew Sakai 	for (i = 0; i < slot; i++) {
6958a55a59SMatthew Sakai 		offset += __le16_to_cpu(block->header.sizes[i]);
7058a55a59SMatthew Sakai 		if (offset >= VDO_COMPRESSED_BLOCK_DATA_SIZE)
7158a55a59SMatthew Sakai 			return VDO_INVALID_FRAGMENT;
7258a55a59SMatthew Sakai 	}
7358a55a59SMatthew Sakai 
7458a55a59SMatthew Sakai 	if ((offset + compressed_size) > VDO_COMPRESSED_BLOCK_DATA_SIZE)
7558a55a59SMatthew Sakai 		return VDO_INVALID_FRAGMENT;
7658a55a59SMatthew Sakai 
7758a55a59SMatthew Sakai 	*fragment_offset = offset;
7858a55a59SMatthew Sakai 	*fragment_size = compressed_size;
7958a55a59SMatthew Sakai 	return VDO_SUCCESS;
8058a55a59SMatthew Sakai }
8158a55a59SMatthew Sakai 
8258a55a59SMatthew Sakai /**
8358a55a59SMatthew Sakai  * assert_on_packer_thread() - Check that we are on the packer thread.
8458a55a59SMatthew Sakai  * @packer: The packer.
8558a55a59SMatthew Sakai  * @caller: The function which is asserting.
8658a55a59SMatthew Sakai  */
assert_on_packer_thread(struct packer * packer,const char * caller)8758a55a59SMatthew Sakai static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
8858a55a59SMatthew Sakai {
896a79248bSMike Snitzer 	VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
9058a55a59SMatthew Sakai 			    "%s() called from packer thread", caller);
9158a55a59SMatthew Sakai }
9258a55a59SMatthew Sakai 
9358a55a59SMatthew Sakai /**
9458a55a59SMatthew Sakai  * insert_in_sorted_list() - Insert a bin to the list.
9558a55a59SMatthew Sakai  * @packer: The packer.
9658a55a59SMatthew Sakai  * @bin: The bin to move to its sorted position.
9758a55a59SMatthew Sakai  *
9858a55a59SMatthew Sakai  * The list is in ascending order of free space. Since all bins are already in the list, this
9958a55a59SMatthew Sakai  * actually moves the bin to the correct position in the list.
10058a55a59SMatthew Sakai  */
insert_in_sorted_list(struct packer * packer,struct packer_bin * bin)10158a55a59SMatthew Sakai static void insert_in_sorted_list(struct packer *packer, struct packer_bin *bin)
10258a55a59SMatthew Sakai {
10358a55a59SMatthew Sakai 	struct packer_bin *active_bin;
10458a55a59SMatthew Sakai 
10558a55a59SMatthew Sakai 	list_for_each_entry(active_bin, &packer->bins, list)
10658a55a59SMatthew Sakai 		if (active_bin->free_space > bin->free_space) {
10758a55a59SMatthew Sakai 			list_move_tail(&bin->list, &active_bin->list);
10858a55a59SMatthew Sakai 			return;
10958a55a59SMatthew Sakai 		}
11058a55a59SMatthew Sakai 
11158a55a59SMatthew Sakai 	list_move_tail(&bin->list, &packer->bins);
11258a55a59SMatthew Sakai }
11358a55a59SMatthew Sakai 
11458a55a59SMatthew Sakai /**
11558a55a59SMatthew Sakai  * make_bin() - Allocate a bin and put it into the packer's list.
11658a55a59SMatthew Sakai  * @packer: The packer.
11758a55a59SMatthew Sakai  */
make_bin(struct packer * packer)11858a55a59SMatthew Sakai static int __must_check make_bin(struct packer *packer)
11958a55a59SMatthew Sakai {
12058a55a59SMatthew Sakai 	struct packer_bin *bin;
12158a55a59SMatthew Sakai 	int result;
12258a55a59SMatthew Sakai 
1230eea6b6eSMike Snitzer 	result = vdo_allocate_extended(struct packer_bin, VDO_MAX_COMPRESSION_SLOTS,
12458a55a59SMatthew Sakai 				       struct vio *, __func__, &bin);
12558a55a59SMatthew Sakai 	if (result != VDO_SUCCESS)
12658a55a59SMatthew Sakai 		return result;
12758a55a59SMatthew Sakai 
12858a55a59SMatthew Sakai 	bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
12958a55a59SMatthew Sakai 	INIT_LIST_HEAD(&bin->list);
13058a55a59SMatthew Sakai 	list_add_tail(&bin->list, &packer->bins);
13158a55a59SMatthew Sakai 	return VDO_SUCCESS;
13258a55a59SMatthew Sakai }
13358a55a59SMatthew Sakai 
13458a55a59SMatthew Sakai /**
13558a55a59SMatthew Sakai  * vdo_make_packer() - Make a new block packer.
13658a55a59SMatthew Sakai  *
13758a55a59SMatthew Sakai  * @vdo: The vdo to which this packer belongs.
13858a55a59SMatthew Sakai  * @bin_count: The number of partial bins to keep in memory.
13958a55a59SMatthew Sakai  * @packer_ptr: A pointer to hold the new packer.
14058a55a59SMatthew Sakai  *
14158a55a59SMatthew Sakai  * Return: VDO_SUCCESS or an error
14258a55a59SMatthew Sakai  */
vdo_make_packer(struct vdo * vdo,block_count_t bin_count,struct packer ** packer_ptr)14358a55a59SMatthew Sakai int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **packer_ptr)
14458a55a59SMatthew Sakai {
14558a55a59SMatthew Sakai 	struct packer *packer;
14658a55a59SMatthew Sakai 	block_count_t i;
14758a55a59SMatthew Sakai 	int result;
14858a55a59SMatthew Sakai 
1490eea6b6eSMike Snitzer 	result = vdo_allocate(1, struct packer, __func__, &packer);
15058a55a59SMatthew Sakai 	if (result != VDO_SUCCESS)
15158a55a59SMatthew Sakai 		return result;
15258a55a59SMatthew Sakai 
15358a55a59SMatthew Sakai 	packer->thread_id = vdo->thread_config.packer_thread;
15458a55a59SMatthew Sakai 	packer->size = bin_count;
15558a55a59SMatthew Sakai 	INIT_LIST_HEAD(&packer->bins);
15658a55a59SMatthew Sakai 	vdo_set_admin_state_code(&packer->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
15758a55a59SMatthew Sakai 
15858a55a59SMatthew Sakai 	for (i = 0; i < bin_count; i++) {
15958a55a59SMatthew Sakai 		result = make_bin(packer);
16058a55a59SMatthew Sakai 		if (result != VDO_SUCCESS) {
16158a55a59SMatthew Sakai 			vdo_free_packer(packer);
16258a55a59SMatthew Sakai 			return result;
16358a55a59SMatthew Sakai 		}
16458a55a59SMatthew Sakai 	}
16558a55a59SMatthew Sakai 
16658a55a59SMatthew Sakai 	/*
16758a55a59SMatthew Sakai 	 * The canceled bin can hold up to half the number of user vios. Every canceled vio in the
16858a55a59SMatthew Sakai 	 * bin must have a canceler for which it is waiting, and any canceler will only have
16958a55a59SMatthew Sakai 	 * canceled one lock holder at a time.
17058a55a59SMatthew Sakai 	 */
1710eea6b6eSMike Snitzer 	result = vdo_allocate_extended(struct packer_bin, MAXIMUM_VDO_USER_VIOS / 2,
17258a55a59SMatthew Sakai 				       struct vio *, __func__, &packer->canceled_bin);
17358a55a59SMatthew Sakai 	if (result != VDO_SUCCESS) {
17458a55a59SMatthew Sakai 		vdo_free_packer(packer);
17558a55a59SMatthew Sakai 		return result;
17658a55a59SMatthew Sakai 	}
17758a55a59SMatthew Sakai 
17858a55a59SMatthew Sakai 	result = vdo_make_default_thread(vdo, packer->thread_id);
17958a55a59SMatthew Sakai 	if (result != VDO_SUCCESS) {
18058a55a59SMatthew Sakai 		vdo_free_packer(packer);
18158a55a59SMatthew Sakai 		return result;
18258a55a59SMatthew Sakai 	}
18358a55a59SMatthew Sakai 
18458a55a59SMatthew Sakai 	*packer_ptr = packer;
18558a55a59SMatthew Sakai 	return VDO_SUCCESS;
18658a55a59SMatthew Sakai }
18758a55a59SMatthew Sakai 
18858a55a59SMatthew Sakai /**
18958a55a59SMatthew Sakai  * vdo_free_packer() - Free a block packer.
19058a55a59SMatthew Sakai  * @packer: The packer to free.
19158a55a59SMatthew Sakai  */
vdo_free_packer(struct packer * packer)19258a55a59SMatthew Sakai void vdo_free_packer(struct packer *packer)
19358a55a59SMatthew Sakai {
19458a55a59SMatthew Sakai 	struct packer_bin *bin, *tmp;
19558a55a59SMatthew Sakai 
19658a55a59SMatthew Sakai 	if (packer == NULL)
19758a55a59SMatthew Sakai 		return;
19858a55a59SMatthew Sakai 
19958a55a59SMatthew Sakai 	list_for_each_entry_safe(bin, tmp, &packer->bins, list) {
20058a55a59SMatthew Sakai 		list_del_init(&bin->list);
2010eea6b6eSMike Snitzer 		vdo_free(bin);
20258a55a59SMatthew Sakai 	}
20358a55a59SMatthew Sakai 
2040eea6b6eSMike Snitzer 	vdo_free(vdo_forget(packer->canceled_bin));
2050eea6b6eSMike Snitzer 	vdo_free(packer);
20658a55a59SMatthew Sakai }
20758a55a59SMatthew Sakai 
20858a55a59SMatthew Sakai /**
20958a55a59SMatthew Sakai  * get_packer_from_data_vio() - Get the packer from a data_vio.
21058a55a59SMatthew Sakai  * @data_vio: The data_vio.
21158a55a59SMatthew Sakai  *
21258a55a59SMatthew Sakai  * Return: The packer from the VDO to which the data_vio belongs.
21358a55a59SMatthew Sakai  */
get_packer_from_data_vio(struct data_vio * data_vio)21458a55a59SMatthew Sakai static inline struct packer *get_packer_from_data_vio(struct data_vio *data_vio)
21558a55a59SMatthew Sakai {
21658a55a59SMatthew Sakai 	return vdo_from_data_vio(data_vio)->packer;
21758a55a59SMatthew Sakai }
21858a55a59SMatthew Sakai 
21958a55a59SMatthew Sakai /**
22058a55a59SMatthew Sakai  * vdo_get_packer_statistics() - Get the current statistics from the packer.
22158a55a59SMatthew Sakai  * @packer: The packer to query.
22258a55a59SMatthew Sakai  *
22358a55a59SMatthew Sakai  * Return: a copy of the current statistics for the packer.
22458a55a59SMatthew Sakai  */
vdo_get_packer_statistics(const struct packer * packer)22558a55a59SMatthew Sakai struct packer_statistics vdo_get_packer_statistics(const struct packer *packer)
22658a55a59SMatthew Sakai {
22758a55a59SMatthew Sakai 	const struct packer_statistics *stats = &packer->statistics;
22858a55a59SMatthew Sakai 
22958a55a59SMatthew Sakai 	return (struct packer_statistics) {
23058a55a59SMatthew Sakai 		.compressed_fragments_written = READ_ONCE(stats->compressed_fragments_written),
23158a55a59SMatthew Sakai 		.compressed_blocks_written = READ_ONCE(stats->compressed_blocks_written),
23258a55a59SMatthew Sakai 		.compressed_fragments_in_packer = READ_ONCE(stats->compressed_fragments_in_packer),
23358a55a59SMatthew Sakai 	};
23458a55a59SMatthew Sakai }
23558a55a59SMatthew Sakai 
23658a55a59SMatthew Sakai /**
23758a55a59SMatthew Sakai  * abort_packing() - Abort packing a data_vio.
23858a55a59SMatthew Sakai  * @data_vio: The data_vio to abort.
23958a55a59SMatthew Sakai  */
abort_packing(struct data_vio * data_vio)24058a55a59SMatthew Sakai static void abort_packing(struct data_vio *data_vio)
24158a55a59SMatthew Sakai {
24258a55a59SMatthew Sakai 	struct packer *packer = get_packer_from_data_vio(data_vio);
24358a55a59SMatthew Sakai 
24458a55a59SMatthew Sakai 	WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
24558a55a59SMatthew Sakai 		   packer->statistics.compressed_fragments_in_packer - 1);
24658a55a59SMatthew Sakai 
24758a55a59SMatthew Sakai 	write_data_vio(data_vio);
24858a55a59SMatthew Sakai }
24958a55a59SMatthew Sakai 
25058a55a59SMatthew Sakai /**
25158a55a59SMatthew Sakai  * release_compressed_write_waiter() - Update a data_vio for which a successful compressed write
25258a55a59SMatthew Sakai  *                                     has completed and send it on its way.
25358a55a59SMatthew Sakai 
25458a55a59SMatthew Sakai  * @data_vio: The data_vio to release.
25558a55a59SMatthew Sakai  * @allocation: The allocation to which the compressed block was written.
25658a55a59SMatthew Sakai  */
release_compressed_write_waiter(struct data_vio * data_vio,struct allocation * allocation)25758a55a59SMatthew Sakai static void release_compressed_write_waiter(struct data_vio *data_vio,
25858a55a59SMatthew Sakai 					    struct allocation *allocation)
25958a55a59SMatthew Sakai {
26058a55a59SMatthew Sakai 	data_vio->new_mapped = (struct zoned_pbn) {
26158a55a59SMatthew Sakai 		.pbn = allocation->pbn,
26258a55a59SMatthew Sakai 		.zone = allocation->zone,
26358a55a59SMatthew Sakai 		.state = data_vio->compression.slot + VDO_MAPPING_STATE_COMPRESSED_BASE,
26458a55a59SMatthew Sakai 	};
26558a55a59SMatthew Sakai 
26658a55a59SMatthew Sakai 	vdo_share_compressed_write_lock(data_vio, allocation->lock);
26758a55a59SMatthew Sakai 	update_metadata_for_data_vio_write(data_vio, allocation->lock);
26858a55a59SMatthew Sakai }
26958a55a59SMatthew Sakai 
27058a55a59SMatthew Sakai /**
27158a55a59SMatthew Sakai  * finish_compressed_write() - Finish a compressed block write.
27258a55a59SMatthew Sakai  * @completion: The compressed write completion.
27358a55a59SMatthew Sakai  *
27458a55a59SMatthew Sakai  * This callback is registered in continue_after_allocation().
27558a55a59SMatthew Sakai  */
finish_compressed_write(struct vdo_completion * completion)27658a55a59SMatthew Sakai static void finish_compressed_write(struct vdo_completion *completion)
27758a55a59SMatthew Sakai {
27858a55a59SMatthew Sakai 	struct data_vio *agent = as_data_vio(completion);
27958a55a59SMatthew Sakai 	struct data_vio *client, *next;
28058a55a59SMatthew Sakai 
28158a55a59SMatthew Sakai 	assert_data_vio_in_allocated_zone(agent);
28258a55a59SMatthew Sakai 
28358a55a59SMatthew Sakai 	/*
28458a55a59SMatthew Sakai 	 * Process all the non-agent waiters first to ensure that the pbn lock can not be released
28558a55a59SMatthew Sakai 	 * until all of them have had a chance to journal their increfs.
28658a55a59SMatthew Sakai 	 */
28758a55a59SMatthew Sakai 	for (client = agent->compression.next_in_batch; client != NULL; client = next) {
28858a55a59SMatthew Sakai 		next = client->compression.next_in_batch;
28958a55a59SMatthew Sakai 		release_compressed_write_waiter(client, &agent->allocation);
29058a55a59SMatthew Sakai 	}
29158a55a59SMatthew Sakai 
29258a55a59SMatthew Sakai 	completion->error_handler = handle_data_vio_error;
29358a55a59SMatthew Sakai 	release_compressed_write_waiter(agent, &agent->allocation);
29458a55a59SMatthew Sakai }
29558a55a59SMatthew Sakai 
handle_compressed_write_error(struct vdo_completion * completion)29658a55a59SMatthew Sakai static void handle_compressed_write_error(struct vdo_completion *completion)
29758a55a59SMatthew Sakai {
29858a55a59SMatthew Sakai 	struct data_vio *agent = as_data_vio(completion);
29958a55a59SMatthew Sakai 	struct allocation *allocation = &agent->allocation;
30058a55a59SMatthew Sakai 	struct data_vio *client, *next;
30158a55a59SMatthew Sakai 
30258a55a59SMatthew Sakai 	if (vdo_requeue_completion_if_needed(completion, allocation->zone->thread_id))
30358a55a59SMatthew Sakai 		return;
30458a55a59SMatthew Sakai 
30558a55a59SMatthew Sakai 	update_vio_error_stats(as_vio(completion),
30658a55a59SMatthew Sakai 			       "Completing compressed write vio for physical block %llu with error",
30758a55a59SMatthew Sakai 			       (unsigned long long) allocation->pbn);
30858a55a59SMatthew Sakai 
30958a55a59SMatthew Sakai 	for (client = agent->compression.next_in_batch; client != NULL; client = next) {
31058a55a59SMatthew Sakai 		next = client->compression.next_in_batch;
31158a55a59SMatthew Sakai 		write_data_vio(client);
31258a55a59SMatthew Sakai 	}
31358a55a59SMatthew Sakai 
31458a55a59SMatthew Sakai 	/* Now that we've released the batch from the packer, forget the error and continue on. */
31558a55a59SMatthew Sakai 	vdo_reset_completion(completion);
31658a55a59SMatthew Sakai 	completion->error_handler = handle_data_vio_error;
31758a55a59SMatthew Sakai 	write_data_vio(agent);
31858a55a59SMatthew Sakai }
31958a55a59SMatthew Sakai 
32058a55a59SMatthew Sakai /**
32158a55a59SMatthew Sakai  * add_to_bin() - Put a data_vio in a specific packer_bin in which it will definitely fit.
32258a55a59SMatthew Sakai  * @bin: The bin in which to put the data_vio.
32358a55a59SMatthew Sakai  * @data_vio: The data_vio to add.
32458a55a59SMatthew Sakai  */
add_to_bin(struct packer_bin * bin,struct data_vio * data_vio)32558a55a59SMatthew Sakai static void add_to_bin(struct packer_bin *bin, struct data_vio *data_vio)
32658a55a59SMatthew Sakai {
32758a55a59SMatthew Sakai 	data_vio->compression.bin = bin;
32858a55a59SMatthew Sakai 	data_vio->compression.slot = bin->slots_used;
32958a55a59SMatthew Sakai 	bin->incoming[bin->slots_used++] = data_vio;
33058a55a59SMatthew Sakai }
33158a55a59SMatthew Sakai 
33258a55a59SMatthew Sakai /**
33358a55a59SMatthew Sakai  * remove_from_bin() - Get the next data_vio whose compression has not been canceled from a bin.
33458a55a59SMatthew Sakai  * @packer: The packer.
33558a55a59SMatthew Sakai  * @bin: The bin from which to get a data_vio.
33658a55a59SMatthew Sakai  *
33758a55a59SMatthew Sakai  * Any canceled data_vios will be moved to the canceled bin.
33858a55a59SMatthew Sakai  * Return: An uncanceled data_vio from the bin or NULL if there are none.
33958a55a59SMatthew Sakai  */
remove_from_bin(struct packer * packer,struct packer_bin * bin)34058a55a59SMatthew Sakai static struct data_vio *remove_from_bin(struct packer *packer, struct packer_bin *bin)
34158a55a59SMatthew Sakai {
34258a55a59SMatthew Sakai 	while (bin->slots_used > 0) {
34358a55a59SMatthew Sakai 		struct data_vio *data_vio = bin->incoming[--bin->slots_used];
34458a55a59SMatthew Sakai 
34558a55a59SMatthew Sakai 		if (!advance_data_vio_compression_stage(data_vio).may_not_compress) {
34658a55a59SMatthew Sakai 			data_vio->compression.bin = NULL;
34758a55a59SMatthew Sakai 			return data_vio;
34858a55a59SMatthew Sakai 		}
34958a55a59SMatthew Sakai 
35058a55a59SMatthew Sakai 		add_to_bin(packer->canceled_bin, data_vio);
35158a55a59SMatthew Sakai 	}
35258a55a59SMatthew Sakai 
35358a55a59SMatthew Sakai 	/* The bin is now empty. */
35458a55a59SMatthew Sakai 	bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
35558a55a59SMatthew Sakai 	return NULL;
35658a55a59SMatthew Sakai }
35758a55a59SMatthew Sakai 
35858a55a59SMatthew Sakai /**
35958a55a59SMatthew Sakai  * initialize_compressed_block() - Initialize a compressed block.
36058a55a59SMatthew Sakai  * @block: The compressed block to initialize.
36158a55a59SMatthew Sakai  * @size: The size of the agent's fragment.
36258a55a59SMatthew Sakai  *
36358a55a59SMatthew Sakai  * This method initializes the compressed block in the compressed write agent. Because the
36458a55a59SMatthew Sakai  * compressor already put the agent's compressed fragment at the start of the compressed block's
36558a55a59SMatthew Sakai  * data field, it needn't be copied. So all we need do is initialize the header and set the size of
36658a55a59SMatthew Sakai  * the agent's fragment.
36758a55a59SMatthew Sakai  */
initialize_compressed_block(struct compressed_block * block,u16 size)36858a55a59SMatthew Sakai static void initialize_compressed_block(struct compressed_block *block, u16 size)
36958a55a59SMatthew Sakai {
37058a55a59SMatthew Sakai 	/*
37158a55a59SMatthew Sakai 	 * Make sure the block layout isn't accidentally changed by changing the length of the
37258a55a59SMatthew Sakai 	 * block header.
37358a55a59SMatthew Sakai 	 */
37458a55a59SMatthew Sakai 	BUILD_BUG_ON(sizeof(struct compressed_block_header) != COMPRESSED_BLOCK_1_0_SIZE);
37558a55a59SMatthew Sakai 
37658a55a59SMatthew Sakai 	block->header.version = vdo_pack_version_number(COMPRESSED_BLOCK_1_0);
37758a55a59SMatthew Sakai 	block->header.sizes[0] = __cpu_to_le16(size);
37858a55a59SMatthew Sakai }
37958a55a59SMatthew Sakai 
38058a55a59SMatthew Sakai /**
38158a55a59SMatthew Sakai  * pack_fragment() - Pack a data_vio's fragment into the compressed block in which it is already
38258a55a59SMatthew Sakai  *                   known to fit.
38358a55a59SMatthew Sakai  * @compression: The agent's compression_state to pack in to.
38458a55a59SMatthew Sakai  * @data_vio: The data_vio to pack.
38558a55a59SMatthew Sakai  * @offset: The offset into the compressed block at which to pack the fragment.
38658a55a59SMatthew Sakai  * @compressed_block: The compressed block which will be written out when batch is fully packed.
38758a55a59SMatthew Sakai  *
38858a55a59SMatthew Sakai  * Return: The new amount of space used.
38958a55a59SMatthew Sakai  */
pack_fragment(struct compression_state * compression,struct data_vio * data_vio,block_size_t offset,slot_number_t slot,struct compressed_block * block)39058a55a59SMatthew Sakai static block_size_t __must_check pack_fragment(struct compression_state *compression,
39158a55a59SMatthew Sakai 					       struct data_vio *data_vio,
39258a55a59SMatthew Sakai 					       block_size_t offset, slot_number_t slot,
39358a55a59SMatthew Sakai 					       struct compressed_block *block)
39458a55a59SMatthew Sakai {
39558a55a59SMatthew Sakai 	struct compression_state *to_pack = &data_vio->compression;
39658a55a59SMatthew Sakai 	char *fragment = to_pack->block->data;
39758a55a59SMatthew Sakai 
39858a55a59SMatthew Sakai 	to_pack->next_in_batch = compression->next_in_batch;
39958a55a59SMatthew Sakai 	compression->next_in_batch = data_vio;
40058a55a59SMatthew Sakai 	to_pack->slot = slot;
40158a55a59SMatthew Sakai 	block->header.sizes[slot] = __cpu_to_le16(to_pack->size);
40258a55a59SMatthew Sakai 	memcpy(&block->data[offset], fragment, to_pack->size);
40358a55a59SMatthew Sakai 	return (offset + to_pack->size);
40458a55a59SMatthew Sakai }
40558a55a59SMatthew Sakai 
40658a55a59SMatthew Sakai /**
40758a55a59SMatthew Sakai  * compressed_write_end_io() - The bio_end_io for a compressed block write.
40858a55a59SMatthew Sakai  * @bio: The bio for the compressed write.
40958a55a59SMatthew Sakai  */
compressed_write_end_io(struct bio * bio)41058a55a59SMatthew Sakai static void compressed_write_end_io(struct bio *bio)
41158a55a59SMatthew Sakai {
41258a55a59SMatthew Sakai 	struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
41358a55a59SMatthew Sakai 
41458a55a59SMatthew Sakai 	vdo_count_completed_bios(bio);
41558a55a59SMatthew Sakai 	set_data_vio_allocated_zone_callback(data_vio, finish_compressed_write);
41658a55a59SMatthew Sakai 	continue_data_vio_with_error(data_vio, blk_status_to_errno(bio->bi_status));
41758a55a59SMatthew Sakai }
41858a55a59SMatthew Sakai 
41958a55a59SMatthew Sakai /**
42058a55a59SMatthew Sakai  * write_bin() - Write out a bin.
42158a55a59SMatthew Sakai  * @packer: The packer.
42258a55a59SMatthew Sakai  * @bin: The bin to write.
42358a55a59SMatthew Sakai  */
write_bin(struct packer * packer,struct packer_bin * bin)42458a55a59SMatthew Sakai static void write_bin(struct packer *packer, struct packer_bin *bin)
42558a55a59SMatthew Sakai {
42658a55a59SMatthew Sakai 	int result;
42758a55a59SMatthew Sakai 	block_size_t offset;
42858a55a59SMatthew Sakai 	slot_number_t slot = 1;
42958a55a59SMatthew Sakai 	struct compression_state *compression;
43058a55a59SMatthew Sakai 	struct compressed_block *block;
43158a55a59SMatthew Sakai 	struct data_vio *agent = remove_from_bin(packer, bin);
43258a55a59SMatthew Sakai 	struct data_vio *client;
43358a55a59SMatthew Sakai 	struct packer_statistics *stats;
43458a55a59SMatthew Sakai 
43558a55a59SMatthew Sakai 	if (agent == NULL)
43658a55a59SMatthew Sakai 		return;
43758a55a59SMatthew Sakai 
43858a55a59SMatthew Sakai 	compression = &agent->compression;
43958a55a59SMatthew Sakai 	compression->slot = 0;
44058a55a59SMatthew Sakai 	block = compression->block;
44158a55a59SMatthew Sakai 	initialize_compressed_block(block, compression->size);
44258a55a59SMatthew Sakai 	offset = compression->size;
44358a55a59SMatthew Sakai 
44458a55a59SMatthew Sakai 	while ((client = remove_from_bin(packer, bin)) != NULL)
44558a55a59SMatthew Sakai 		offset = pack_fragment(compression, client, offset, slot++, block);
44658a55a59SMatthew Sakai 
44758a55a59SMatthew Sakai 	/*
44858a55a59SMatthew Sakai 	 * If the batch contains only a single vio, then we save nothing by saving the compressed
44958a55a59SMatthew Sakai 	 * form. Continue processing the single vio in the batch.
45058a55a59SMatthew Sakai 	 */
45158a55a59SMatthew Sakai 	if (slot == 1) {
45258a55a59SMatthew Sakai 		abort_packing(agent);
45358a55a59SMatthew Sakai 		return;
45458a55a59SMatthew Sakai 	}
45558a55a59SMatthew Sakai 
45658a55a59SMatthew Sakai 	if (slot < VDO_MAX_COMPRESSION_SLOTS) {
45758a55a59SMatthew Sakai 		/* Clear out the sizes of the unused slots */
45858a55a59SMatthew Sakai 		memset(&block->header.sizes[slot], 0,
45958a55a59SMatthew Sakai 		       (VDO_MAX_COMPRESSION_SLOTS - slot) * sizeof(__le16));
46058a55a59SMatthew Sakai 	}
46158a55a59SMatthew Sakai 
46258a55a59SMatthew Sakai 	agent->vio.completion.error_handler = handle_compressed_write_error;
46358a55a59SMatthew Sakai 	if (vdo_is_read_only(vdo_from_data_vio(agent))) {
46458a55a59SMatthew Sakai 		continue_data_vio_with_error(agent, VDO_READ_ONLY);
46558a55a59SMatthew Sakai 		return;
46658a55a59SMatthew Sakai 	}
46758a55a59SMatthew Sakai 
46858a55a59SMatthew Sakai 	result = vio_reset_bio(&agent->vio, (char *) block, compressed_write_end_io,
46958a55a59SMatthew Sakai 			       REQ_OP_WRITE, agent->allocation.pbn);
47058a55a59SMatthew Sakai 	if (result != VDO_SUCCESS) {
47158a55a59SMatthew Sakai 		continue_data_vio_with_error(agent, result);
47258a55a59SMatthew Sakai 		return;
47358a55a59SMatthew Sakai 	}
47458a55a59SMatthew Sakai 
47558a55a59SMatthew Sakai 	/*
47658a55a59SMatthew Sakai 	 * Once the compressed write is submitted, the fragments are no longer in the packer, so
47758a55a59SMatthew Sakai 	 * update stats now.
47858a55a59SMatthew Sakai 	 */
47958a55a59SMatthew Sakai 	stats = &packer->statistics;
48058a55a59SMatthew Sakai 	WRITE_ONCE(stats->compressed_fragments_in_packer,
48158a55a59SMatthew Sakai 		   (stats->compressed_fragments_in_packer - slot));
48258a55a59SMatthew Sakai 	WRITE_ONCE(stats->compressed_fragments_written,
48358a55a59SMatthew Sakai 		   (stats->compressed_fragments_written + slot));
48458a55a59SMatthew Sakai 	WRITE_ONCE(stats->compressed_blocks_written,
48558a55a59SMatthew Sakai 		   stats->compressed_blocks_written + 1);
48658a55a59SMatthew Sakai 
487d58d3c86SMike Snitzer 	vdo_submit_data_vio(agent);
48858a55a59SMatthew Sakai }
48958a55a59SMatthew Sakai 
49058a55a59SMatthew Sakai /**
49158a55a59SMatthew Sakai  * add_data_vio_to_packer_bin() - Add a data_vio to a bin's incoming queue
49258a55a59SMatthew Sakai  * @packer: The packer.
49358a55a59SMatthew Sakai  * @bin: The bin to which to add the data_vio.
49458a55a59SMatthew Sakai  * @data_vio: The data_vio to add to the bin's queue.
49558a55a59SMatthew Sakai  *
49658a55a59SMatthew Sakai  * Adds a data_vio to a bin's incoming queue, handles logical space change, and calls physical
49758a55a59SMatthew Sakai  * space processor.
49858a55a59SMatthew Sakai  */
add_data_vio_to_packer_bin(struct packer * packer,struct packer_bin * bin,struct data_vio * data_vio)49958a55a59SMatthew Sakai static void add_data_vio_to_packer_bin(struct packer *packer, struct packer_bin *bin,
50058a55a59SMatthew Sakai 				       struct data_vio *data_vio)
50158a55a59SMatthew Sakai {
50258a55a59SMatthew Sakai 	/* If the selected bin doesn't have room, start a new batch to make room. */
50358a55a59SMatthew Sakai 	if (bin->free_space < data_vio->compression.size)
50458a55a59SMatthew Sakai 		write_bin(packer, bin);
50558a55a59SMatthew Sakai 
50658a55a59SMatthew Sakai 	add_to_bin(bin, data_vio);
50758a55a59SMatthew Sakai 	bin->free_space -= data_vio->compression.size;
50858a55a59SMatthew Sakai 
50958a55a59SMatthew Sakai 	/* If we happen to exactly fill the bin, start a new batch. */
51058a55a59SMatthew Sakai 	if ((bin->slots_used == VDO_MAX_COMPRESSION_SLOTS) ||
51158a55a59SMatthew Sakai 	    (bin->free_space == 0))
51258a55a59SMatthew Sakai 		write_bin(packer, bin);
51358a55a59SMatthew Sakai 
51458a55a59SMatthew Sakai 	/* Now that we've finished changing the free space, restore the sort order. */
51558a55a59SMatthew Sakai 	insert_in_sorted_list(packer, bin);
51658a55a59SMatthew Sakai }
51758a55a59SMatthew Sakai 
51858a55a59SMatthew Sakai /**
51958a55a59SMatthew Sakai  * select_bin() - Select the bin that should be used to pack the compressed data in a data_vio with
52058a55a59SMatthew Sakai  *                other data_vios.
52158a55a59SMatthew Sakai  * @packer: The packer.
52258a55a59SMatthew Sakai  * @data_vio: The data_vio.
52358a55a59SMatthew Sakai  */
select_bin(struct packer * packer,struct data_vio * data_vio)52458a55a59SMatthew Sakai static struct packer_bin * __must_check select_bin(struct packer *packer,
52558a55a59SMatthew Sakai 						   struct data_vio *data_vio)
52658a55a59SMatthew Sakai {
52758a55a59SMatthew Sakai 	/*
52858a55a59SMatthew Sakai 	 * First best fit: select the bin with the least free space that has enough room for the
52958a55a59SMatthew Sakai 	 * compressed data in the data_vio.
53058a55a59SMatthew Sakai 	 */
53158a55a59SMatthew Sakai 	struct packer_bin *bin, *fullest_bin;
53258a55a59SMatthew Sakai 
53358a55a59SMatthew Sakai 	list_for_each_entry(bin, &packer->bins, list) {
53458a55a59SMatthew Sakai 		if (bin->free_space >= data_vio->compression.size)
53558a55a59SMatthew Sakai 			return bin;
53658a55a59SMatthew Sakai 	}
53758a55a59SMatthew Sakai 
53858a55a59SMatthew Sakai 	/*
53958a55a59SMatthew Sakai 	 * None of the bins have enough space for the data_vio. We're not allowed to create new
54058a55a59SMatthew Sakai 	 * bins, so we have to overflow one of the existing bins. It's pretty intuitive to select
54158a55a59SMatthew Sakai 	 * the fullest bin, since that "wastes" the least amount of free space in the compressed
54258a55a59SMatthew Sakai 	 * block. But if the space currently used in the fullest bin is smaller than the compressed
54358a55a59SMatthew Sakai 	 * size of the incoming block, it seems wrong to force that bin to write when giving up on
54458a55a59SMatthew Sakai 	 * compressing the incoming data_vio would likewise "waste" the least amount of free space.
54558a55a59SMatthew Sakai 	 */
54658a55a59SMatthew Sakai 	fullest_bin = list_first_entry(&packer->bins, struct packer_bin, list);
54758a55a59SMatthew Sakai 	if (data_vio->compression.size >=
54858a55a59SMatthew Sakai 	    (VDO_COMPRESSED_BLOCK_DATA_SIZE - fullest_bin->free_space))
54958a55a59SMatthew Sakai 		return NULL;
55058a55a59SMatthew Sakai 
55158a55a59SMatthew Sakai 	/*
55258a55a59SMatthew Sakai 	 * The fullest bin doesn't have room, but writing it out and starting a new batch with the
55358a55a59SMatthew Sakai 	 * incoming data_vio will increase the packer's free space.
55458a55a59SMatthew Sakai 	 */
55558a55a59SMatthew Sakai 	return fullest_bin;
55658a55a59SMatthew Sakai }
55758a55a59SMatthew Sakai 
55858a55a59SMatthew Sakai /**
55958a55a59SMatthew Sakai  * vdo_attempt_packing() - Attempt to rewrite the data in this data_vio as part of a compressed
56058a55a59SMatthew Sakai  *                         block.
56158a55a59SMatthew Sakai  * @data_vio: The data_vio to pack.
56258a55a59SMatthew Sakai  */
vdo_attempt_packing(struct data_vio * data_vio)56358a55a59SMatthew Sakai void vdo_attempt_packing(struct data_vio *data_vio)
56458a55a59SMatthew Sakai {
56558a55a59SMatthew Sakai 	int result;
56658a55a59SMatthew Sakai 	struct packer_bin *bin;
56758a55a59SMatthew Sakai 	struct data_vio_compression_status status = get_data_vio_compression_status(data_vio);
56858a55a59SMatthew Sakai 	struct packer *packer = get_packer_from_data_vio(data_vio);
56958a55a59SMatthew Sakai 
57058a55a59SMatthew Sakai 	assert_on_packer_thread(packer, __func__);
57158a55a59SMatthew Sakai 
5726a79248bSMike Snitzer 	result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
57358a55a59SMatthew Sakai 			    "attempt to pack data_vio not ready for packing, stage: %u",
57458a55a59SMatthew Sakai 			    status.stage);
57558a55a59SMatthew Sakai 	if (result != VDO_SUCCESS)
57658a55a59SMatthew Sakai 		return;
57758a55a59SMatthew Sakai 
57858a55a59SMatthew Sakai 	/*
57958a55a59SMatthew Sakai 	 * Increment whether or not this data_vio will be packed or not since abort_packing()
58058a55a59SMatthew Sakai 	 * always decrements the counter.
58158a55a59SMatthew Sakai 	 */
58258a55a59SMatthew Sakai 	WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
58358a55a59SMatthew Sakai 		   packer->statistics.compressed_fragments_in_packer + 1);
58458a55a59SMatthew Sakai 
58558a55a59SMatthew Sakai 	/*
58658a55a59SMatthew Sakai 	 * If packing of this data_vio is disallowed for administrative reasons, give up before
58758a55a59SMatthew Sakai 	 * making any state changes.
58858a55a59SMatthew Sakai 	 */
58958a55a59SMatthew Sakai 	if (!vdo_is_state_normal(&packer->state) ||
59058a55a59SMatthew Sakai 	    (data_vio->flush_generation < packer->flush_generation)) {
59158a55a59SMatthew Sakai 		abort_packing(data_vio);
59258a55a59SMatthew Sakai 		return;
59358a55a59SMatthew Sakai 	}
59458a55a59SMatthew Sakai 
59558a55a59SMatthew Sakai 	/*
596dcd1332bSSusan LeGendre-McGhee 	 * The advance_data_vio_compression_stage() check here verifies that the data_vio is
597dcd1332bSSusan LeGendre-McGhee 	 * allowed to be compressed (if it has already been canceled, we'll fall out here). Once
598dcd1332bSSusan LeGendre-McGhee 	 * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin
599dcd1332bSSusan LeGendre-McGhee 	 * before any more requests can be processed by the packer thread. Otherwise, a canceling
600dcd1332bSSusan LeGendre-McGhee 	 * data_vio could attempt to remove the canceled data_vio from the packer and fail to
601dcd1332bSSusan LeGendre-McGhee 	 * rendezvous with it. Thus, we must call select_bin() first to ensure that we will
602dcd1332bSSusan LeGendre-McGhee 	 * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage.
60358a55a59SMatthew Sakai 	 */
60458a55a59SMatthew Sakai 	bin = select_bin(packer, data_vio);
60558a55a59SMatthew Sakai 	if ((bin == NULL) ||
60658a55a59SMatthew Sakai 	    (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_PACKING)) {
60758a55a59SMatthew Sakai 		abort_packing(data_vio);
60858a55a59SMatthew Sakai 		return;
60958a55a59SMatthew Sakai 	}
61058a55a59SMatthew Sakai 
61158a55a59SMatthew Sakai 	add_data_vio_to_packer_bin(packer, bin, data_vio);
61258a55a59SMatthew Sakai }
61358a55a59SMatthew Sakai 
61458a55a59SMatthew Sakai /**
61558a55a59SMatthew Sakai  * check_for_drain_complete() - Check whether the packer has drained.
61658a55a59SMatthew Sakai  * @packer: The packer.
61758a55a59SMatthew Sakai  */
check_for_drain_complete(struct packer * packer)61858a55a59SMatthew Sakai static void check_for_drain_complete(struct packer *packer)
61958a55a59SMatthew Sakai {
62058a55a59SMatthew Sakai 	if (vdo_is_state_draining(&packer->state) && (packer->canceled_bin->slots_used == 0))
62158a55a59SMatthew Sakai 		vdo_finish_draining(&packer->state);
62258a55a59SMatthew Sakai }
62358a55a59SMatthew Sakai 
62458a55a59SMatthew Sakai /**
62558a55a59SMatthew Sakai  * write_all_non_empty_bins() - Write out all non-empty bins on behalf of a flush or suspend.
62658a55a59SMatthew Sakai  * @packer: The packer being flushed.
62758a55a59SMatthew Sakai  */
write_all_non_empty_bins(struct packer * packer)62858a55a59SMatthew Sakai static void write_all_non_empty_bins(struct packer *packer)
62958a55a59SMatthew Sakai {
63058a55a59SMatthew Sakai 	struct packer_bin *bin;
63158a55a59SMatthew Sakai 
63258a55a59SMatthew Sakai 	list_for_each_entry(bin, &packer->bins, list)
63358a55a59SMatthew Sakai 		write_bin(packer, bin);
63458a55a59SMatthew Sakai 		/*
63558a55a59SMatthew Sakai 		 * We don't need to re-sort the bin here since this loop will make every bin have
63658a55a59SMatthew Sakai 		 * the same amount of free space, so every ordering is sorted.
63758a55a59SMatthew Sakai 		 */
63858a55a59SMatthew Sakai 
63958a55a59SMatthew Sakai 	check_for_drain_complete(packer);
64058a55a59SMatthew Sakai }
64158a55a59SMatthew Sakai 
64258a55a59SMatthew Sakai /**
64358a55a59SMatthew Sakai  * vdo_flush_packer() - Request that the packer flush asynchronously.
64458a55a59SMatthew Sakai  * @packer: The packer to flush.
64558a55a59SMatthew Sakai  *
64658a55a59SMatthew Sakai  * All bins with at least two compressed data blocks will be written out, and any solitary pending
64758a55a59SMatthew Sakai  * VIOs will be released from the packer. While flushing is in progress, any VIOs submitted to
64858a55a59SMatthew Sakai  * vdo_attempt_packing() will be continued immediately without attempting to pack them.
64958a55a59SMatthew Sakai  */
vdo_flush_packer(struct packer * packer)65058a55a59SMatthew Sakai void vdo_flush_packer(struct packer *packer)
65158a55a59SMatthew Sakai {
65258a55a59SMatthew Sakai 	assert_on_packer_thread(packer, __func__);
65358a55a59SMatthew Sakai 	if (vdo_is_state_normal(&packer->state))
65458a55a59SMatthew Sakai 		write_all_non_empty_bins(packer);
65558a55a59SMatthew Sakai }
65658a55a59SMatthew Sakai 
65758a55a59SMatthew Sakai /**
65858a55a59SMatthew Sakai  * vdo_remove_lock_holder_from_packer() - Remove a lock holder from the packer.
65958a55a59SMatthew Sakai  * @completion: The data_vio which needs a lock held by a data_vio in the packer. The data_vio's
66058a55a59SMatthew Sakai  *              compression.lock_holder field will point to the data_vio to remove.
66158a55a59SMatthew Sakai  */
vdo_remove_lock_holder_from_packer(struct vdo_completion * completion)66258a55a59SMatthew Sakai void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion)
66358a55a59SMatthew Sakai {
66458a55a59SMatthew Sakai 	struct data_vio *data_vio = as_data_vio(completion);
66558a55a59SMatthew Sakai 	struct packer *packer = get_packer_from_data_vio(data_vio);
66658a55a59SMatthew Sakai 	struct data_vio *lock_holder;
66758a55a59SMatthew Sakai 	struct packer_bin *bin;
66858a55a59SMatthew Sakai 	slot_number_t slot;
66958a55a59SMatthew Sakai 
67058a55a59SMatthew Sakai 	assert_data_vio_in_packer_zone(data_vio);
67158a55a59SMatthew Sakai 
6720eea6b6eSMike Snitzer 	lock_holder = vdo_forget(data_vio->compression.lock_holder);
67358a55a59SMatthew Sakai 	bin = lock_holder->compression.bin;
6746a79248bSMike Snitzer 	VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
67558a55a59SMatthew Sakai 
67658a55a59SMatthew Sakai 	slot = lock_holder->compression.slot;
67758a55a59SMatthew Sakai 	bin->slots_used--;
67858a55a59SMatthew Sakai 	if (slot < bin->slots_used) {
67958a55a59SMatthew Sakai 		bin->incoming[slot] = bin->incoming[bin->slots_used];
68058a55a59SMatthew Sakai 		bin->incoming[slot]->compression.slot = slot;
68158a55a59SMatthew Sakai 	}
68258a55a59SMatthew Sakai 
68358a55a59SMatthew Sakai 	lock_holder->compression.bin = NULL;
68458a55a59SMatthew Sakai 	lock_holder->compression.slot = 0;
68558a55a59SMatthew Sakai 
68658a55a59SMatthew Sakai 	if (bin != packer->canceled_bin) {
68758a55a59SMatthew Sakai 		bin->free_space += lock_holder->compression.size;
68858a55a59SMatthew Sakai 		insert_in_sorted_list(packer, bin);
68958a55a59SMatthew Sakai 	}
69058a55a59SMatthew Sakai 
69158a55a59SMatthew Sakai 	abort_packing(lock_holder);
69258a55a59SMatthew Sakai 	check_for_drain_complete(packer);
69358a55a59SMatthew Sakai }
69458a55a59SMatthew Sakai 
69558a55a59SMatthew Sakai /**
69658a55a59SMatthew Sakai  * vdo_increment_packer_flush_generation() - Increment the flush generation in the packer.
69758a55a59SMatthew Sakai  * @packer: The packer.
69858a55a59SMatthew Sakai  *
69958a55a59SMatthew Sakai  * This will also cause the packer to flush so that any VIOs from previous generations will exit
70058a55a59SMatthew Sakai  * the packer.
70158a55a59SMatthew Sakai  */
vdo_increment_packer_flush_generation(struct packer * packer)70258a55a59SMatthew Sakai void vdo_increment_packer_flush_generation(struct packer *packer)
70358a55a59SMatthew Sakai {
70458a55a59SMatthew Sakai 	assert_on_packer_thread(packer, __func__);
70558a55a59SMatthew Sakai 	packer->flush_generation++;
70658a55a59SMatthew Sakai 	vdo_flush_packer(packer);
70758a55a59SMatthew Sakai }
70858a55a59SMatthew Sakai 
70958a55a59SMatthew Sakai /**
71058a55a59SMatthew Sakai  * initiate_drain() - Initiate a drain.
71158a55a59SMatthew Sakai  *
71258a55a59SMatthew Sakai  * Implements vdo_admin_initiator_fn.
71358a55a59SMatthew Sakai  */
initiate_drain(struct admin_state * state)71458a55a59SMatthew Sakai static void initiate_drain(struct admin_state *state)
71558a55a59SMatthew Sakai {
71658a55a59SMatthew Sakai 	struct packer *packer = container_of(state, struct packer, state);
71758a55a59SMatthew Sakai 
71858a55a59SMatthew Sakai 	write_all_non_empty_bins(packer);
71958a55a59SMatthew Sakai }
72058a55a59SMatthew Sakai 
72158a55a59SMatthew Sakai /**
72258a55a59SMatthew Sakai  * vdo_drain_packer() - Drain the packer by preventing any more VIOs from entering the packer and
72358a55a59SMatthew Sakai  *                      then flushing.
72458a55a59SMatthew Sakai  * @packer: The packer to drain.
72558a55a59SMatthew Sakai  * @completion: The completion to finish when the packer has drained.
72658a55a59SMatthew Sakai  */
vdo_drain_packer(struct packer * packer,struct vdo_completion * completion)72758a55a59SMatthew Sakai void vdo_drain_packer(struct packer *packer, struct vdo_completion *completion)
72858a55a59SMatthew Sakai {
72958a55a59SMatthew Sakai 	assert_on_packer_thread(packer, __func__);
73058a55a59SMatthew Sakai 	vdo_start_draining(&packer->state, VDO_ADMIN_STATE_SUSPENDING, completion,
73158a55a59SMatthew Sakai 			   initiate_drain);
73258a55a59SMatthew Sakai }
73358a55a59SMatthew Sakai 
73458a55a59SMatthew Sakai /**
73558a55a59SMatthew Sakai  * vdo_resume_packer() - Resume a packer which has been suspended.
73658a55a59SMatthew Sakai  * @packer: The packer to resume.
73758a55a59SMatthew Sakai  * @parent: The completion to finish when the packer has resumed.
73858a55a59SMatthew Sakai  */
vdo_resume_packer(struct packer * packer,struct vdo_completion * parent)73958a55a59SMatthew Sakai void vdo_resume_packer(struct packer *packer, struct vdo_completion *parent)
74058a55a59SMatthew Sakai {
74158a55a59SMatthew Sakai 	assert_on_packer_thread(packer, __func__);
74258a55a59SMatthew Sakai 	vdo_continue_completion(parent, vdo_resume_if_quiescent(&packer->state));
74358a55a59SMatthew Sakai }
74458a55a59SMatthew Sakai 
dump_packer_bin(const struct packer_bin * bin,bool canceled)74558a55a59SMatthew Sakai static void dump_packer_bin(const struct packer_bin *bin, bool canceled)
74658a55a59SMatthew Sakai {
74758a55a59SMatthew Sakai 	if (bin->slots_used == 0)
74858a55a59SMatthew Sakai 		/* Don't dump empty bins. */
74958a55a59SMatthew Sakai 		return;
75058a55a59SMatthew Sakai 
751*3584240bSMike Snitzer 	vdo_log_info("	  %sBin slots_used=%u free_space=%zu",
75258a55a59SMatthew Sakai 		     (canceled ? "Canceled" : ""), bin->slots_used, bin->free_space);
75358a55a59SMatthew Sakai 
75458a55a59SMatthew Sakai 	/*
75558a55a59SMatthew Sakai 	 * FIXME: dump vios in bin->incoming? The vios should have been dumped from the vio pool.
75658a55a59SMatthew Sakai 	 * Maybe just dump their addresses so it's clear they're here?
75758a55a59SMatthew Sakai 	 */
75858a55a59SMatthew Sakai }
75958a55a59SMatthew Sakai 
76058a55a59SMatthew Sakai /**
76158a55a59SMatthew Sakai  * vdo_dump_packer() - Dump the packer.
76258a55a59SMatthew Sakai  * @packer: The packer.
76358a55a59SMatthew Sakai  *
76458a55a59SMatthew Sakai  * Context: dumps in a thread-unsafe fashion.
76558a55a59SMatthew Sakai  */
vdo_dump_packer(const struct packer * packer)76658a55a59SMatthew Sakai void vdo_dump_packer(const struct packer *packer)
76758a55a59SMatthew Sakai {
76858a55a59SMatthew Sakai 	struct packer_bin *bin;
76958a55a59SMatthew Sakai 
770*3584240bSMike Snitzer 	vdo_log_info("packer");
771*3584240bSMike Snitzer 	vdo_log_info("	flushGeneration=%llu state %s  packer_bin_count=%llu",
77258a55a59SMatthew Sakai 		     (unsigned long long) packer->flush_generation,
77358a55a59SMatthew Sakai 		     vdo_get_admin_state_code(&packer->state)->name,
77458a55a59SMatthew Sakai 		     (unsigned long long) packer->size);
77558a55a59SMatthew Sakai 
77658a55a59SMatthew Sakai 	list_for_each_entry(bin, &packer->bins, list)
77758a55a59SMatthew Sakai 		dump_packer_bin(bin, false);
77858a55a59SMatthew Sakai 
77958a55a59SMatthew Sakai 	dump_packer_bin(packer->canceled_bin, true);
78058a55a59SMatthew Sakai }
781