1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2023 Red Hat
4 */
5
6 #ifndef UDS_VOLUME_H
7 #define UDS_VOLUME_H
8
9 #include <linux/atomic.h>
10 #include <linux/cache.h>
11 #include <linux/dm-bufio.h>
12 #include <linux/limits.h>
13
14 #include "permassert.h"
15 #include "thread-utils.h"
16
17 #include "chapter-index.h"
18 #include "config.h"
19 #include "geometry.h"
20 #include "indexer.h"
21 #include "index-layout.h"
22 #include "index-page-map.h"
23 #include "radix-sort.h"
24 #include "sparse-cache.h"
25
26 /*
27 * The volume manages deduplication records on permanent storage. The term "volume" can also refer
28 * to the region of permanent storage where the records (and the chapters containing them) are
29 * stored. The volume handles all I/O to this region by reading, caching, and writing chapter pages
30 * as necessary.
31 */
32
33 enum index_lookup_mode {
34 /* Always do lookups in all chapters normally */
35 LOOKUP_NORMAL,
36 /* Only do a subset of lookups needed when rebuilding an index */
37 LOOKUP_FOR_REBUILD,
38 };
39
40 struct queued_read {
41 bool invalid;
42 bool reserved;
43 u32 physical_page;
44 struct uds_request *first_request;
45 struct uds_request *last_request;
46 };
47
__aligned(L1_CACHE_BYTES)48 struct __aligned(L1_CACHE_BYTES) search_pending_counter {
49 u64 atomic_value;
50 };
51
52 struct cached_page {
53 /* Whether this page is currently being read asynchronously */
54 bool read_pending;
55 /* The physical page stored in this cache entry */
56 u32 physical_page;
57 /* The value of the volume clock when this page was last used */
58 s64 last_used;
59 /* The cached page buffer */
60 struct dm_buffer *buffer;
61 /* The chapter index page, meaningless for record pages */
62 struct delta_index_page index_page;
63 };
64
65 struct page_cache {
66 /* The number of zones */
67 unsigned int zone_count;
68 /* The number of volume pages that can be cached */
69 u32 indexable_pages;
70 /* The maximum number of simultaneously cached pages */
71 u16 cache_slots;
72 /* An index for each physical page noting where it is in the cache */
73 u16 *index;
74 /* The array of cached pages */
75 struct cached_page *cache;
76 /* A counter for each zone tracking if a search is occurring there */
77 struct search_pending_counter *search_pending_counters;
78 /* The read queue entries as a circular array */
79 struct queued_read *read_queue;
80
81 /* All entries above this point are constant after initialization. */
82
83 /*
84 * These values are all indexes into the array of read queue entries. New entries in the
85 * read queue are enqueued at read_queue_last. To dequeue entries, a reader thread gets the
86 * lock and then claims the entry pointed to by read_queue_next_read and increments that
87 * value. After the read is completed, the reader thread calls release_read_queue_entry(),
88 * which increments read_queue_first until it points to a pending read, or is equal to
89 * read_queue_next_read. This means that if multiple reads are outstanding,
90 * read_queue_first might not advance until the last of the reads finishes.
91 */
92 u16 read_queue_first;
93 u16 read_queue_next_read;
94 u16 read_queue_last;
95
96 atomic64_t clock;
97 };
98
99 struct volume {
100 struct index_geometry *geometry;
101 struct dm_bufio_client *client;
102 u64 nonce;
103 size_t cache_size;
104
105 /* A single page worth of records, for sorting */
106 const struct uds_volume_record **record_pointers;
107 /* Sorter for sorting records within each page */
108 struct radix_sorter *radix_sorter;
109
110 struct sparse_cache *sparse_cache;
111 struct page_cache page_cache;
112 struct index_page_map *index_page_map;
113
114 struct mutex read_threads_mutex;
115 struct cond_var read_threads_cond;
116 struct cond_var read_threads_read_done_cond;
117 struct thread **reader_threads;
118 unsigned int read_thread_count;
119 bool read_threads_exiting;
120
121 enum index_lookup_mode lookup_mode;
122 unsigned int reserved_buffers;
123 };
124
125 int __must_check uds_make_volume(const struct uds_configuration *config,
126 struct index_layout *layout,
127 struct volume **new_volume);
128
129 void uds_free_volume(struct volume *volume);
130
131 int __must_check uds_replace_volume_storage(struct volume *volume,
132 struct index_layout *layout,
133 struct block_device *bdev);
134
135 int __must_check uds_find_volume_chapter_boundaries(struct volume *volume,
136 u64 *lowest_vcn, u64 *highest_vcn,
137 bool *is_empty);
138
139 int __must_check uds_search_volume_page_cache(struct volume *volume,
140 struct uds_request *request,
141 bool *found);
142
143 int __must_check uds_search_volume_page_cache_for_rebuild(struct volume *volume,
144 const struct uds_record_name *name,
145 u64 virtual_chapter,
146 bool *found);
147
148 int __must_check uds_search_cached_record_page(struct volume *volume,
149 struct uds_request *request, u32 chapter,
150 u16 record_page_number, bool *found);
151
152 void uds_forget_chapter(struct volume *volume, u64 chapter);
153
154 int __must_check uds_write_chapter(struct volume *volume,
155 struct open_chapter_index *chapter_index,
156 const struct uds_volume_record records[]);
157
158 void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter);
159
160 int __must_check uds_read_chapter_index_from_volume(const struct volume *volume,
161 u64 virtual_chapter,
162 struct dm_buffer *volume_buffers[],
163 struct delta_index_page index_pages[]);
164
165 int __must_check uds_get_volume_record_page(struct volume *volume, u32 chapter,
166 u32 page_number, u8 **data_ptr);
167
168 int __must_check uds_get_volume_index_page(struct volume *volume, u32 chapter,
169 u32 page_number,
170 struct delta_index_page **page_ptr);
171
172 #endif /* UDS_VOLUME_H */
173