xref: /linux/fs/hfs/extent.c (revision bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9)
1 /*
2  *  linux/fs/hfs/extent.c
3  *
4  * Copyright (C) 1995-1997  Paul H. Hargrove
5  * (C) 2003 Ardis Technologies <roman@ardistech.com>
6  * This file may be distributed under the terms of the GNU General Public License.
7  *
8  * This file contains the functions related to the extents B-tree.
9  */
10 
11 #include <linux/pagemap.h>
12 
13 #include "hfs_fs.h"
14 #include "btree.h"
15 
16 /*================ File-local functions ================*/
17 
18 /*
19  * build_key
20  */
21 static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
22 {
23 	key->key_len = 7;
24 	key->ext.FkType = type;
25 	key->ext.FNum = cpu_to_be32(cnid);
26 	key->ext.FABN = cpu_to_be16(block);
27 }
28 
29 /*
30  * hfs_ext_compare()
31  *
32  * Description:
33  *   This is the comparison function used for the extents B-tree.  In
34  *   comparing extent B-tree entries, the file id is the most
35  *   significant field (compared as unsigned ints); the fork type is
36  *   the second most significant field (compared as unsigned chars);
37  *   and the allocation block number field is the least significant
38  *   (compared as unsigned ints).
39  * Input Variable(s):
40  *   struct hfs_ext_key *key1: pointer to the first key to compare
41  *   struct hfs_ext_key *key2: pointer to the second key to compare
42  * Output Variable(s):
43  *   NONE
44  * Returns:
45  *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
46  * Preconditions:
47  *   key1 and key2 point to "valid" (struct hfs_ext_key)s.
48  * Postconditions:
49  *   This function has no side-effects */
50 int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
51 {
52 	__be32 fnum1, fnum2;
53 	__be16 block1, block2;
54 
55 	fnum1 = key1->ext.FNum;
56 	fnum2 = key2->ext.FNum;
57 	if (fnum1 != fnum2)
58 		return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
59 	if (key1->ext.FkType != key2->ext.FkType)
60 		return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
61 
62 	block1 = key1->ext.FABN;
63 	block2 = key2->ext.FABN;
64 	if (block1 == block2)
65 		return 0;
66 	return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
67 }
68 
69 /*
70  * hfs_ext_find_block
71  *
72  * Find a block within an extent record
73  */
74 static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
75 {
76 	int i;
77 	u16 count;
78 
79 	for (i = 0; i < 3; ext++, i++) {
80 		count = be16_to_cpu(ext->count);
81 		if (off < count)
82 			return be16_to_cpu(ext->block) + off;
83 		off -= count;
84 	}
85 	/* panic? */
86 	return 0;
87 }
88 
89 static int hfs_ext_block_count(struct hfs_extent *ext)
90 {
91 	int i;
92 	u16 count = 0;
93 
94 	for (i = 0; i < 3; ext++, i++)
95 		count += be16_to_cpu(ext->count);
96 	return count;
97 }
98 
99 static u16 hfs_ext_lastblock(struct hfs_extent *ext)
100 {
101 	int i;
102 
103 	ext += 2;
104 	for (i = 0; i < 2; ext--, i++)
105 		if (ext->count)
106 			break;
107 	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
108 }
109 
110 static void __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
111 {
112 	int res;
113 
114 	hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
115 			  HFS_IS_RSRC(inode) ?  HFS_FK_RSRC : HFS_FK_DATA);
116 	res = hfs_brec_find(fd);
117 	if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
118 		if (res != -ENOENT)
119 			return;
120 		hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
121 		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
122 	} else {
123 		if (res)
124 			return;
125 		hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
126 		HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
127 	}
128 }
129 
130 void hfs_ext_write_extent(struct inode *inode)
131 {
132 	struct hfs_find_data fd;
133 
134 	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
135 		hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
136 		__hfs_ext_write_extent(inode, &fd);
137 		hfs_find_exit(&fd);
138 	}
139 }
140 
141 static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
142 					u32 cnid, u32 block, u8 type)
143 {
144 	int res;
145 
146 	hfs_ext_build_key(fd->search_key, cnid, block, type);
147 	fd->key->ext.FNum = 0;
148 	res = hfs_brec_find(fd);
149 	if (res && res != -ENOENT)
150 		return res;
151 	if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
152 	    fd->key->ext.FkType != fd->search_key->ext.FkType)
153 		return -ENOENT;
154 	if (fd->entrylength != sizeof(hfs_extent_rec))
155 		return -EIO;
156 	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
157 	return 0;
158 }
159 
160 static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
161 {
162 	int res;
163 
164 	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY)
165 		__hfs_ext_write_extent(inode, fd);
166 
167 	res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
168 				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
169 	if (!res) {
170 		HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
171 		HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
172 	} else {
173 		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
174 		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
175 	}
176 	return res;
177 }
178 
179 static int hfs_ext_read_extent(struct inode *inode, u16 block)
180 {
181 	struct hfs_find_data fd;
182 	int res;
183 
184 	if (block >= HFS_I(inode)->cached_start &&
185 	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
186 		return 0;
187 
188 	hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
189 	res = __hfs_ext_cache_extent(&fd, inode, block);
190 	hfs_find_exit(&fd);
191 	return res;
192 }
193 
194 static void hfs_dump_extent(struct hfs_extent *extent)
195 {
196 	int i;
197 
198 	dprint(DBG_EXTENT, "   ");
199 	for (i = 0; i < 3; i++)
200 		dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block),
201 				 be16_to_cpu(extent[i].count));
202 	dprint(DBG_EXTENT, "\n");
203 }
204 
205 static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
206 			  u16 alloc_block, u16 block_count)
207 {
208 	u16 count, start;
209 	int i;
210 
211 	hfs_dump_extent(extent);
212 	for (i = 0; i < 3; extent++, i++) {
213 		count = be16_to_cpu(extent->count);
214 		if (offset == count) {
215 			start = be16_to_cpu(extent->block);
216 			if (alloc_block != start + count) {
217 				if (++i >= 3)
218 					return -ENOSPC;
219 				extent++;
220 				extent->block = cpu_to_be16(alloc_block);
221 			} else
222 				block_count += count;
223 			extent->count = cpu_to_be16(block_count);
224 			return 0;
225 		} else if (offset < count)
226 			break;
227 		offset -= count;
228 	}
229 	/* panic? */
230 	return -EIO;
231 }
232 
233 static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
234 			    u16 offset, u16 block_nr)
235 {
236 	u16 count, start;
237 	int i;
238 
239 	hfs_dump_extent(extent);
240 	for (i = 0; i < 3; extent++, i++) {
241 		count = be16_to_cpu(extent->count);
242 		if (offset == count)
243 			goto found;
244 		else if (offset < count)
245 			break;
246 		offset -= count;
247 	}
248 	/* panic? */
249 	return -EIO;
250 found:
251 	for (;;) {
252 		start = be16_to_cpu(extent->block);
253 		if (count <= block_nr) {
254 			hfs_clear_vbm_bits(sb, start, count);
255 			extent->block = 0;
256 			extent->count = 0;
257 			block_nr -= count;
258 		} else {
259 			count -= block_nr;
260 			hfs_clear_vbm_bits(sb, start + count, block_nr);
261 			extent->count = cpu_to_be16(count);
262 			block_nr = 0;
263 		}
264 		if (!block_nr || !i)
265 			return 0;
266 		i--;
267 		extent--;
268 		count = be16_to_cpu(extent->count);
269 	}
270 }
271 
272 int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
273 {
274 	struct hfs_find_data fd;
275 	u32 total_blocks, blocks, start;
276 	u32 cnid = be32_to_cpu(file->FlNum);
277 	struct hfs_extent *extent;
278 	int res, i;
279 
280 	if (type == HFS_FK_DATA) {
281 		total_blocks = be32_to_cpu(file->PyLen);
282 		extent = file->ExtRec;
283 	} else {
284 		total_blocks = be32_to_cpu(file->RPyLen);
285 		extent = file->RExtRec;
286 	}
287 	total_blocks /= HFS_SB(sb)->alloc_blksz;
288 	if (!total_blocks)
289 		return 0;
290 
291 	blocks = 0;
292 	for (i = 0; i < 3; extent++, i++)
293 		blocks += be16_to_cpu(extent[i].count);
294 
295 	res = hfs_free_extents(sb, extent, blocks, blocks);
296 	if (res)
297 		return res;
298 	if (total_blocks == blocks)
299 		return 0;
300 
301 	hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
302 	do {
303 		res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
304 		if (res)
305 			break;
306 		start = be16_to_cpu(fd.key->ext.FABN);
307 		hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
308 		hfs_brec_remove(&fd);
309 		total_blocks = start;
310 	} while (total_blocks > blocks);
311 	hfs_find_exit(&fd);
312 
313 	return res;
314 }
315 
316 /*
317  * hfs_get_block
318  */
319 int hfs_get_block(struct inode *inode, sector_t block,
320 		  struct buffer_head *bh_result, int create)
321 {
322 	struct super_block *sb;
323 	u16 dblock, ablock;
324 	int res;
325 
326 	sb = inode->i_sb;
327 	/* Convert inode block to disk allocation block */
328 	ablock = (u32)block / HFS_SB(sb)->fs_div;
329 
330 	if (block >= HFS_I(inode)->fs_blocks) {
331 		if (block > HFS_I(inode)->fs_blocks || !create)
332 			return -EIO;
333 		if (ablock >= HFS_I(inode)->alloc_blocks) {
334 			res = hfs_extend_file(inode);
335 			if (res)
336 				return res;
337 		}
338 	} else
339 		create = 0;
340 
341 	if (ablock < HFS_I(inode)->first_blocks) {
342 		dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
343 		goto done;
344 	}
345 
346 	mutex_lock(&HFS_I(inode)->extents_lock);
347 	res = hfs_ext_read_extent(inode, ablock);
348 	if (!res)
349 		dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
350 					    ablock - HFS_I(inode)->cached_start);
351 	else {
352 		mutex_unlock(&HFS_I(inode)->extents_lock);
353 		return -EIO;
354 	}
355 	mutex_unlock(&HFS_I(inode)->extents_lock);
356 
357 done:
358 	map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
359 	       dblock * HFS_SB(sb)->fs_div +
360 	       (u32)block % HFS_SB(sb)->fs_div);
361 
362 	if (create) {
363 		set_buffer_new(bh_result);
364 		HFS_I(inode)->phys_size += sb->s_blocksize;
365 		HFS_I(inode)->fs_blocks++;
366 		inode_add_bytes(inode, sb->s_blocksize);
367 		mark_inode_dirty(inode);
368 	}
369 	return 0;
370 }
371 
372 int hfs_extend_file(struct inode *inode)
373 {
374 	struct super_block *sb = inode->i_sb;
375 	u32 start, len, goal;
376 	int res;
377 
378 	mutex_lock(&HFS_I(inode)->extents_lock);
379 	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
380 		goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
381 	else {
382 		res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
383 		if (res)
384 			goto out;
385 		goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
386 	}
387 
388 	len = HFS_I(inode)->clump_blocks;
389 	start = hfs_vbm_search_free(sb, goal, &len);
390 	if (!len) {
391 		res = -ENOSPC;
392 		goto out;
393 	}
394 
395 	dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
396 	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
397 		if (!HFS_I(inode)->first_blocks) {
398 			dprint(DBG_EXTENT, "first extents\n");
399 			/* no extents yet */
400 			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
401 			HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
402 			res = 0;
403 		} else {
404 			/* try to append to extents in inode */
405 			res = hfs_add_extent(HFS_I(inode)->first_extents,
406 					     HFS_I(inode)->alloc_blocks,
407 					     start, len);
408 			if (res == -ENOSPC)
409 				goto insert_extent;
410 		}
411 		if (!res) {
412 			hfs_dump_extent(HFS_I(inode)->first_extents);
413 			HFS_I(inode)->first_blocks += len;
414 		}
415 	} else {
416 		res = hfs_add_extent(HFS_I(inode)->cached_extents,
417 				     HFS_I(inode)->alloc_blocks -
418 				     HFS_I(inode)->cached_start,
419 				     start, len);
420 		if (!res) {
421 			hfs_dump_extent(HFS_I(inode)->cached_extents);
422 			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
423 			HFS_I(inode)->cached_blocks += len;
424 		} else if (res == -ENOSPC)
425 			goto insert_extent;
426 	}
427 out:
428 	mutex_unlock(&HFS_I(inode)->extents_lock);
429 	if (!res) {
430 		HFS_I(inode)->alloc_blocks += len;
431 		mark_inode_dirty(inode);
432 		if (inode->i_ino < HFS_FIRSTUSER_CNID)
433 			set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
434 		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
435 		sb->s_dirt = 1;
436 	}
437 	return res;
438 
439 insert_extent:
440 	dprint(DBG_EXTENT, "insert new extent\n");
441 	hfs_ext_write_extent(inode);
442 
443 	memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
444 	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
445 	HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
446 	hfs_dump_extent(HFS_I(inode)->cached_extents);
447 	HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
448 	HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
449 	HFS_I(inode)->cached_blocks = len;
450 
451 	res = 0;
452 	goto out;
453 }
454 
455 void hfs_file_truncate(struct inode *inode)
456 {
457 	struct super_block *sb = inode->i_sb;
458 	struct hfs_find_data fd;
459 	u16 blk_cnt, alloc_cnt, start;
460 	u32 size;
461 	int res;
462 
463 	dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino,
464 	       (long long)HFS_I(inode)->phys_size, inode->i_size);
465 	if (inode->i_size > HFS_I(inode)->phys_size) {
466 		struct address_space *mapping = inode->i_mapping;
467 		void *fsdata;
468 		struct page *page;
469 		int res;
470 
471 		/* XXX: Can use generic_cont_expand? */
472 		size = inode->i_size - 1;
473 		res = pagecache_write_begin(NULL, mapping, size+1, 0,
474 				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
475 		if (!res) {
476 			res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
477 					page, fsdata);
478 		}
479 		if (res)
480 			inode->i_size = HFS_I(inode)->phys_size;
481 		return;
482 	} else if (inode->i_size == HFS_I(inode)->phys_size)
483 		return;
484 	size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
485 	blk_cnt = size / HFS_SB(sb)->alloc_blksz;
486 	alloc_cnt = HFS_I(inode)->alloc_blocks;
487 	if (blk_cnt == alloc_cnt)
488 		goto out;
489 
490 	mutex_lock(&HFS_I(inode)->extents_lock);
491 	hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
492 	while (1) {
493 		if (alloc_cnt == HFS_I(inode)->first_blocks) {
494 			hfs_free_extents(sb, HFS_I(inode)->first_extents,
495 					 alloc_cnt, alloc_cnt - blk_cnt);
496 			hfs_dump_extent(HFS_I(inode)->first_extents);
497 			HFS_I(inode)->first_blocks = blk_cnt;
498 			break;
499 		}
500 		res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
501 		if (res)
502 			break;
503 		start = HFS_I(inode)->cached_start;
504 		hfs_free_extents(sb, HFS_I(inode)->cached_extents,
505 				 alloc_cnt - start, alloc_cnt - blk_cnt);
506 		hfs_dump_extent(HFS_I(inode)->cached_extents);
507 		if (blk_cnt > start) {
508 			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
509 			break;
510 		}
511 		alloc_cnt = start;
512 		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
513 		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
514 		hfs_brec_remove(&fd);
515 	}
516 	hfs_find_exit(&fd);
517 	mutex_unlock(&HFS_I(inode)->extents_lock);
518 
519 	HFS_I(inode)->alloc_blocks = blk_cnt;
520 out:
521 	HFS_I(inode)->phys_size = inode->i_size;
522 	HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
523 	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
524 	mark_inode_dirty(inode);
525 }
526