xref: /linux/fs/udf/partition.c (revision 7210de3a328c4df5cb8b25b2ef5703c72d8842e9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * partition.c
4  *
5  * PURPOSE
6  *      Partition handling routines for the OSTA-UDF(tm) filesystem.
7  *
8  * COPYRIGHT
9  *  (C) 1998-2001 Ben Fennema
10  *
11  * HISTORY
12  *
13  * 12/06/98 blf  Created file.
14  *
15  */
16 
17 #include "udfdecl.h"
18 #include "udf_sb.h"
19 #include "udf_i.h"
20 
21 #include <linux/fs.h>
22 #include <linux/string.h>
23 #include <linux/mutex.h>
24 
25 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
26 			uint16_t partition, uint32_t offset)
27 {
28 	struct udf_sb_info *sbi = UDF_SB(sb);
29 	struct udf_part_map *map;
30 	if (partition >= sbi->s_partitions) {
31 		udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n",
32 			  block, partition, offset);
33 		return 0xFFFFFFFF;
34 	}
35 	map = &sbi->s_partmaps[partition];
36 	if (map->s_partition_func)
37 		return map->s_partition_func(sb, block, partition, offset);
38 	else
39 		return map->s_partition_root + block + offset;
40 }
41 
42 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
43 			       uint16_t partition, uint32_t offset)
44 {
45 	struct buffer_head *bh = NULL;
46 	uint32_t newblock;
47 	uint32_t index;
48 	uint32_t loc;
49 	struct udf_sb_info *sbi = UDF_SB(sb);
50 	struct udf_part_map *map;
51 	struct udf_virtual_data *vdata;
52 	struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
53 	int err;
54 
55 	map = &sbi->s_partmaps[partition];
56 	vdata = &map->s_type_specific.s_virtual;
57 
58 	if (block > vdata->s_num_entries) {
59 		udf_debug("Trying to access block beyond end of VAT (%u max %u)\n",
60 			  block, vdata->s_num_entries);
61 		return 0xFFFFFFFF;
62 	}
63 
64 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
65 		loc = le32_to_cpu(((__le32 *)(iinfo->i_data +
66 			vdata->s_start_offset))[block]);
67 		goto translate;
68 	}
69 	index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
70 	if (block >= index) {
71 		block -= index;
72 		newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
73 		index = block % (sb->s_blocksize / sizeof(uint32_t));
74 	} else {
75 		newblock = 0;
76 		index = vdata->s_start_offset / sizeof(uint32_t) + block;
77 	}
78 
79 	bh = udf_bread(sbi->s_vat_inode, newblock, 0, &err);
80 	if (!bh) {
81 		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u)\n",
82 			  sb, block, partition);
83 		return 0xFFFFFFFF;
84 	}
85 
86 	loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
87 
88 	brelse(bh);
89 
90 translate:
91 	if (iinfo->i_location.partitionReferenceNum == partition) {
92 		udf_debug("recursive call to udf_get_pblock!\n");
93 		return 0xFFFFFFFF;
94 	}
95 
96 	return udf_get_pblock(sb, loc,
97 			      iinfo->i_location.partitionReferenceNum,
98 			      offset);
99 }
100 
101 inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
102 				      uint16_t partition, uint32_t offset)
103 {
104 	return udf_get_pblock_virt15(sb, block, partition, offset);
105 }
106 
107 uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
108 			       uint16_t partition, uint32_t offset)
109 {
110 	int i;
111 	struct sparingTable *st = NULL;
112 	struct udf_sb_info *sbi = UDF_SB(sb);
113 	struct udf_part_map *map;
114 	uint32_t packet;
115 	struct udf_sparing_data *sdata;
116 
117 	map = &sbi->s_partmaps[partition];
118 	sdata = &map->s_type_specific.s_sparing;
119 	packet = (block + offset) & ~(sdata->s_packet_len - 1);
120 
121 	for (i = 0; i < 4; i++) {
122 		if (sdata->s_spar_map[i] != NULL) {
123 			st = (struct sparingTable *)
124 					sdata->s_spar_map[i]->b_data;
125 			break;
126 		}
127 	}
128 
129 	if (st) {
130 		for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
131 			struct sparingEntry *entry = &st->mapEntry[i];
132 			u32 origLoc = le32_to_cpu(entry->origLocation);
133 			if (origLoc >= 0xFFFFFFF0)
134 				break;
135 			else if (origLoc == packet)
136 				return le32_to_cpu(entry->mappedLocation) +
137 					((block + offset) &
138 						(sdata->s_packet_len - 1));
139 			else if (origLoc > packet)
140 				break;
141 		}
142 	}
143 
144 	return map->s_partition_root + block + offset;
145 }
146 
147 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
148 {
149 	struct udf_sparing_data *sdata;
150 	struct sparingTable *st = NULL;
151 	struct sparingEntry mapEntry;
152 	uint32_t packet;
153 	int i, j, k, l;
154 	struct udf_sb_info *sbi = UDF_SB(sb);
155 	u16 reallocationTableLen;
156 	struct buffer_head *bh;
157 	int ret = 0;
158 
159 	mutex_lock(&sbi->s_alloc_mutex);
160 	for (i = 0; i < sbi->s_partitions; i++) {
161 		struct udf_part_map *map = &sbi->s_partmaps[i];
162 		if (old_block > map->s_partition_root &&
163 		    old_block < map->s_partition_root + map->s_partition_len) {
164 			sdata = &map->s_type_specific.s_sparing;
165 			packet = (old_block - map->s_partition_root) &
166 						~(sdata->s_packet_len - 1);
167 
168 			for (j = 0; j < 4; j++)
169 				if (sdata->s_spar_map[j] != NULL) {
170 					st = (struct sparingTable *)
171 						sdata->s_spar_map[j]->b_data;
172 					break;
173 				}
174 
175 			if (!st) {
176 				ret = 1;
177 				goto out;
178 			}
179 
180 			reallocationTableLen =
181 					le16_to_cpu(st->reallocationTableLen);
182 			for (k = 0; k < reallocationTableLen; k++) {
183 				struct sparingEntry *entry = &st->mapEntry[k];
184 				u32 origLoc = le32_to_cpu(entry->origLocation);
185 
186 				if (origLoc == 0xFFFFFFFF) {
187 					for (; j < 4; j++) {
188 						int len;
189 						bh = sdata->s_spar_map[j];
190 						if (!bh)
191 							continue;
192 
193 						st = (struct sparingTable *)
194 								bh->b_data;
195 						entry->origLocation =
196 							cpu_to_le32(packet);
197 						len =
198 						  sizeof(struct sparingTable) +
199 						  reallocationTableLen *
200 						  sizeof(struct sparingEntry);
201 						udf_update_tag((char *)st, len);
202 						mark_buffer_dirty(bh);
203 					}
204 					*new_block = le32_to_cpu(
205 							entry->mappedLocation) +
206 						     ((old_block -
207 							map->s_partition_root) &
208 						     (sdata->s_packet_len - 1));
209 					ret = 0;
210 					goto out;
211 				} else if (origLoc == packet) {
212 					*new_block = le32_to_cpu(
213 							entry->mappedLocation) +
214 						     ((old_block -
215 							map->s_partition_root) &
216 						     (sdata->s_packet_len - 1));
217 					ret = 0;
218 					goto out;
219 				} else if (origLoc > packet)
220 					break;
221 			}
222 
223 			for (l = k; l < reallocationTableLen; l++) {
224 				struct sparingEntry *entry = &st->mapEntry[l];
225 				u32 origLoc = le32_to_cpu(entry->origLocation);
226 
227 				if (origLoc != 0xFFFFFFFF)
228 					continue;
229 
230 				for (; j < 4; j++) {
231 					bh = sdata->s_spar_map[j];
232 					if (!bh)
233 						continue;
234 
235 					st = (struct sparingTable *)bh->b_data;
236 					mapEntry = st->mapEntry[l];
237 					mapEntry.origLocation =
238 							cpu_to_le32(packet);
239 					memmove(&st->mapEntry[k + 1],
240 						&st->mapEntry[k],
241 						(l - k) *
242 						sizeof(struct sparingEntry));
243 					st->mapEntry[k] = mapEntry;
244 					udf_update_tag((char *)st,
245 						sizeof(struct sparingTable) +
246 						reallocationTableLen *
247 						sizeof(struct sparingEntry));
248 					mark_buffer_dirty(bh);
249 				}
250 				*new_block =
251 					le32_to_cpu(
252 					      st->mapEntry[k].mappedLocation) +
253 					((old_block - map->s_partition_root) &
254 					 (sdata->s_packet_len - 1));
255 				ret = 0;
256 				goto out;
257 			}
258 
259 			ret = 1;
260 			goto out;
261 		} /* if old_block */
262 	}
263 
264 	if (i == sbi->s_partitions) {
265 		/* outside of partitions */
266 		/* for now, fail =) */
267 		ret = 1;
268 	}
269 
270 out:
271 	mutex_unlock(&sbi->s_alloc_mutex);
272 	return ret;
273 }
274 
275 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
276 					uint16_t partition, uint32_t offset)
277 {
278 	struct super_block *sb = inode->i_sb;
279 	struct udf_part_map *map;
280 	struct kernel_lb_addr eloc;
281 	uint32_t elen;
282 	sector_t ext_offset;
283 	struct extent_position epos = {};
284 	uint32_t phyblock;
285 
286 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
287 						(EXT_RECORDED_ALLOCATED >> 30))
288 		phyblock = 0xFFFFFFFF;
289 	else {
290 		map = &UDF_SB(sb)->s_partmaps[partition];
291 		/* map to sparable/physical partition desc */
292 		phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
293 			map->s_type_specific.s_metadata.s_phys_partition_ref,
294 			ext_offset + offset);
295 	}
296 
297 	brelse(epos.bh);
298 	return phyblock;
299 }
300 
301 uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
302 				uint16_t partition, uint32_t offset)
303 {
304 	struct udf_sb_info *sbi = UDF_SB(sb);
305 	struct udf_part_map *map;
306 	struct udf_meta_data *mdata;
307 	uint32_t retblk;
308 	struct inode *inode;
309 
310 	udf_debug("READING from METADATA\n");
311 
312 	map = &sbi->s_partmaps[partition];
313 	mdata = &map->s_type_specific.s_metadata;
314 	inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
315 
316 	if (!inode)
317 		return 0xFFFFFFFF;
318 
319 	retblk = udf_try_read_meta(inode, block, partition, offset);
320 	if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
321 		udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
322 		if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
323 			mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
324 				mdata->s_mirror_file_loc,
325 				mdata->s_phys_partition_ref);
326 			if (IS_ERR(mdata->s_mirror_fe))
327 				mdata->s_mirror_fe = NULL;
328 			mdata->s_flags |= MF_MIRROR_FE_LOADED;
329 		}
330 
331 		inode = mdata->s_mirror_fe;
332 		if (!inode)
333 			return 0xFFFFFFFF;
334 		retblk = udf_try_read_meta(inode, block, partition, offset);
335 	}
336 
337 	return retblk;
338 }
339