xref: /linux/fs/nilfs2/mdt.c (revision e31e3f6c0ce473f7ce1e70d54ac8e3ed190509f8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Meta data file for NILFS
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  */
9 
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/mm.h>
13 #include <linux/writeback.h>
14 #include <linux/backing-dev.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include "nilfs.h"
18 #include "btnode.h"
19 #include "segment.h"
20 #include "page.h"
21 #include "mdt.h"
22 #include "alloc.h"		/* nilfs_palloc_destroy_cache() */
23 
24 #include <trace/events/nilfs2.h>
25 
26 #define NILFS_MDT_MAX_RA_BLOCKS		(16 - 1)
27 
28 
29 static int
30 nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
31 			   struct buffer_head *bh,
32 			   void (*init_block)(struct inode *,
33 					      struct buffer_head *, void *))
34 {
35 	struct nilfs_inode_info *ii = NILFS_I(inode);
36 	struct folio *folio = bh->b_folio;
37 	void *from;
38 	int ret;
39 
40 	/* Caller exclude read accesses using page lock */
41 
42 	/* set_buffer_new(bh); */
43 	bh->b_blocknr = 0;
44 
45 	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
46 	if (unlikely(ret))
47 		return ret;
48 
49 	set_buffer_mapped(bh);
50 
51 	/* Initialize block (block size > PAGE_SIZE not yet supported) */
52 	from = kmap_local_folio(folio, offset_in_folio(folio, bh->b_data));
53 	memset(from, 0, bh->b_size);
54 	if (init_block)
55 		init_block(inode, bh, from);
56 	kunmap_local(from);
57 
58 	flush_dcache_folio(folio);
59 
60 	set_buffer_uptodate(bh);
61 	mark_buffer_dirty(bh);
62 	nilfs_mdt_mark_dirty(inode);
63 
64 	trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
65 
66 	return 0;
67 }
68 
69 static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
70 				  struct buffer_head **out_bh,
71 				  void (*init_block)(struct inode *,
72 						     struct buffer_head *,
73 						     void *))
74 {
75 	struct super_block *sb = inode->i_sb;
76 	struct nilfs_transaction_info ti;
77 	struct buffer_head *bh;
78 	int err;
79 
80 	nilfs_transaction_begin(sb, &ti, 0);
81 
82 	err = -ENOMEM;
83 	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
84 	if (unlikely(!bh))
85 		goto failed_unlock;
86 
87 	err = -EEXIST;
88 	if (buffer_uptodate(bh))
89 		goto failed_bh;
90 
91 	wait_on_buffer(bh);
92 	if (buffer_uptodate(bh))
93 		goto failed_bh;
94 
95 	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
96 	if (likely(!err)) {
97 		get_bh(bh);
98 		*out_bh = bh;
99 	}
100 
101  failed_bh:
102 	folio_unlock(bh->b_folio);
103 	folio_put(bh->b_folio);
104 	brelse(bh);
105 
106  failed_unlock:
107 	if (likely(!err))
108 		err = nilfs_transaction_commit(sb);
109 	else
110 		nilfs_transaction_abort(sb);
111 
112 	return err;
113 }
114 
115 static int
116 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf,
117 		       struct buffer_head **out_bh)
118 {
119 	struct buffer_head *bh;
120 	__u64 blknum = 0;
121 	int ret = -ENOMEM;
122 
123 	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
124 	if (unlikely(!bh))
125 		goto failed;
126 
127 	ret = -EEXIST; /* internal code */
128 	if (buffer_uptodate(bh))
129 		goto out;
130 
131 	if (opf & REQ_RAHEAD) {
132 		if (!trylock_buffer(bh)) {
133 			ret = -EBUSY;
134 			goto failed_bh;
135 		}
136 	} else /* opf == REQ_OP_READ */
137 		lock_buffer(bh);
138 
139 	if (buffer_uptodate(bh)) {
140 		unlock_buffer(bh);
141 		goto out;
142 	}
143 
144 	ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
145 	if (unlikely(ret)) {
146 		unlock_buffer(bh);
147 		goto failed_bh;
148 	}
149 	map_bh(bh, inode->i_sb, (sector_t)blknum);
150 
151 	bh->b_end_io = end_buffer_read_sync;
152 	get_bh(bh);
153 	submit_bh(opf, bh);
154 	ret = 0;
155 
156 	trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff,
157 				      opf & REQ_OP_MASK);
158  out:
159 	get_bh(bh);
160 	*out_bh = bh;
161 
162  failed_bh:
163 	folio_unlock(bh->b_folio);
164 	folio_put(bh->b_folio);
165 	brelse(bh);
166  failed:
167 	return ret;
168 }
169 
170 static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
171 				int readahead, struct buffer_head **out_bh)
172 {
173 	struct buffer_head *first_bh, *bh;
174 	unsigned long blkoff;
175 	int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
176 	int err;
177 
178 	err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh);
179 	if (err == -EEXIST) /* internal code */
180 		goto out;
181 
182 	if (unlikely(err))
183 		goto failed;
184 
185 	if (readahead) {
186 		blkoff = block + 1;
187 		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
188 			err = nilfs_mdt_submit_block(inode, blkoff,
189 						REQ_OP_READ | REQ_RAHEAD, &bh);
190 			if (likely(!err || err == -EEXIST))
191 				brelse(bh);
192 			else if (err != -EBUSY)
193 				break;
194 				/* abort readahead if bmap lookup failed */
195 			if (!buffer_locked(first_bh))
196 				goto out_no_wait;
197 		}
198 	}
199 
200 	wait_on_buffer(first_bh);
201 
202  out_no_wait:
203 	err = -EIO;
204 	if (!buffer_uptodate(first_bh)) {
205 		nilfs_err(inode->i_sb,
206 			  "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
207 			  inode->i_ino, block);
208 		goto failed_bh;
209 	}
210  out:
211 	*out_bh = first_bh;
212 	return 0;
213 
214  failed_bh:
215 	brelse(first_bh);
216  failed:
217 	return err;
218 }
219 
220 /**
221  * nilfs_mdt_get_block - read or create a buffer on meta data file.
222  * @inode: inode of the meta data file
223  * @blkoff: block offset
224  * @create: create flag
225  * @init_block: initializer used for newly allocated block
226  * @out_bh: output of a pointer to the buffer_head
227  *
228  * nilfs_mdt_get_block() looks up the specified buffer and tries to create
229  * a new buffer if @create is not zero.  If (and only if) this function
230  * succeeds, it stores a pointer to the retrieved buffer head in the location
231  * pointed to by @out_bh.
232  *
233  * The retrieved buffer may be either an existing one or a newly allocated one.
234  * For a newly created buffer, if the callback function argument @init_block
235  * is non-NULL, the callback will be called with the buffer locked to format
236  * the block.
237  *
238  * Return: 0 on success, or one of the following negative error codes on
239  * failure:
240  * * %-EIO	- I/O error (including metadata corruption).
241  * * %-ENOENT	- The specified block does not exist (hole block).
242  * * %-ENOMEM	- Insufficient memory available.
243  * * %-EROFS	- Read only filesystem (for create mode).
244  */
245 int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
246 			void (*init_block)(struct inode *,
247 					   struct buffer_head *, void *),
248 			struct buffer_head **out_bh)
249 {
250 	int ret;
251 
252 	/* Should be rewritten with merging nilfs_mdt_read_block() */
253  retry:
254 	ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
255 	if (!create || ret != -ENOENT)
256 		return ret;
257 
258 	ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
259 	if (unlikely(ret == -EEXIST)) {
260 		/* create = 0; */  /* limit read-create loop retries */
261 		goto retry;
262 	}
263 	return ret;
264 }
265 
266 /**
267  * nilfs_mdt_find_block - find and get a buffer on meta data file.
268  * @inode: inode of the meta data file
269  * @start: start block offset (inclusive)
270  * @end: end block offset (inclusive)
271  * @blkoff: block offset
272  * @out_bh: place to store a pointer to buffer_head struct
273  *
274  * nilfs_mdt_find_block() looks up an existing block in range of
275  * [@start, @end] and stores pointer to a buffer head of the block to
276  * @out_bh, and block offset to @blkoff, respectively.  @out_bh and
277  * @blkoff are substituted only when zero is returned.
278  *
279  * Return: 0 on success, or one of the following negative error codes on
280  * failure:
281  * * %-EIO	- I/O error (including metadata corruption).
282  * * %-ENOENT	- No block was found in the range.
283  * * %-ENOMEM	- Insufficient memory available.
284  */
285 int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
286 			 unsigned long end, unsigned long *blkoff,
287 			 struct buffer_head **out_bh)
288 {
289 	__u64 next;
290 	int ret;
291 
292 	if (unlikely(start > end))
293 		return -ENOENT;
294 
295 	ret = nilfs_mdt_read_block(inode, start, true, out_bh);
296 	if (!ret) {
297 		*blkoff = start;
298 		goto out;
299 	}
300 	if (unlikely(ret != -ENOENT || start == ULONG_MAX))
301 		goto out;
302 
303 	ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
304 	if (!ret) {
305 		if (next <= end) {
306 			ret = nilfs_mdt_read_block(inode, next, true, out_bh);
307 			if (!ret)
308 				*blkoff = next;
309 		} else {
310 			ret = -ENOENT;
311 		}
312 	}
313 out:
314 	return ret;
315 }
316 
317 /**
318  * nilfs_mdt_delete_block - make a hole on the meta data file.
319  * @inode: inode of the meta data file
320  * @block: block offset
321  *
322  * Return: 0 on success, or one of the following negative error codes on
323  * failure:
324  * * %-EIO	- I/O error (including metadata corruption).
325  * * %-ENOENT	- Non-existent block.
326  * * %-ENOMEM	- Insufficient memory available.
327  */
328 int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
329 {
330 	struct nilfs_inode_info *ii = NILFS_I(inode);
331 	int err;
332 
333 	err = nilfs_bmap_delete(ii->i_bmap, block);
334 	if (!err || err == -ENOENT) {
335 		nilfs_mdt_mark_dirty(inode);
336 		nilfs_mdt_forget_block(inode, block);
337 	}
338 	return err;
339 }
340 
341 /**
342  * nilfs_mdt_forget_block - discard dirty state and try to remove the page
343  * @inode: inode of the meta data file
344  * @block: block offset
345  *
346  * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
347  * tries to release the page including the buffer from a page cache.
348  *
349  * Return: 0 on success, or one of the following negative error codes on
350  * failure:
351  * * %-EBUSY	- Page has an active buffer.
352  * * %-ENOENT	- Page cache has no page addressed by the offset.
353  */
354 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
355 {
356 	pgoff_t index = block >> (PAGE_SHIFT - inode->i_blkbits);
357 	struct folio *folio;
358 	struct buffer_head *bh;
359 	int ret = 0;
360 	int still_dirty;
361 
362 	folio = filemap_lock_folio(inode->i_mapping, index);
363 	if (IS_ERR(folio))
364 		return -ENOENT;
365 
366 	folio_wait_writeback(folio);
367 
368 	bh = folio_buffers(folio);
369 	if (bh) {
370 		unsigned long first_block = index <<
371 				(PAGE_SHIFT - inode->i_blkbits);
372 		bh = get_nth_bh(bh, block - first_block);
373 		nilfs_forget_buffer(bh);
374 	}
375 	still_dirty = folio_test_dirty(folio);
376 	folio_unlock(folio);
377 	folio_put(folio);
378 
379 	if (still_dirty ||
380 	    invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
381 		ret = -EBUSY;
382 	return ret;
383 }
384 
385 int nilfs_mdt_fetch_dirty(struct inode *inode)
386 {
387 	struct nilfs_inode_info *ii = NILFS_I(inode);
388 
389 	if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
390 		set_bit(NILFS_I_DIRTY, &ii->i_state);
391 		return 1;
392 	}
393 	return test_bit(NILFS_I_DIRTY, &ii->i_state);
394 }
395 
396 static int nilfs_mdt_write_folio(struct folio *folio,
397 		struct writeback_control *wbc)
398 {
399 	struct inode *inode = folio->mapping->host;
400 	struct super_block *sb;
401 	int err = 0;
402 
403 	if (inode && sb_rdonly(inode->i_sb)) {
404 		/*
405 		 * It means that filesystem was remounted in read-only
406 		 * mode because of error or metadata corruption. But we
407 		 * have dirty folios that try to be flushed in background.
408 		 * So, here we simply discard this dirty folio.
409 		 */
410 		nilfs_clear_folio_dirty(folio);
411 		folio_unlock(folio);
412 		return -EROFS;
413 	}
414 
415 	folio_redirty_for_writepage(wbc, folio);
416 	folio_unlock(folio);
417 
418 	if (!inode)
419 		return 0;
420 
421 	sb = inode->i_sb;
422 
423 	if (wbc->sync_mode == WB_SYNC_ALL)
424 		err = nilfs_construct_segment(sb);
425 	else if (wbc->for_reclaim)
426 		nilfs_flush_segment(sb, inode->i_ino);
427 
428 	return err;
429 }
430 
431 static int nilfs_mdt_writeback(struct address_space *mapping,
432 		struct writeback_control *wbc)
433 {
434 	struct folio *folio = NULL;
435 	int error;
436 
437 	while ((folio = writeback_iter(mapping, wbc, folio, &error)))
438 		error = nilfs_mdt_write_folio(folio, wbc);
439 
440 	return error;
441 }
442 
443 static const struct address_space_operations def_mdt_aops = {
444 	.dirty_folio		= block_dirty_folio,
445 	.invalidate_folio	= block_invalidate_folio,
446 	.writepages		= nilfs_mdt_writeback,
447 	.migrate_folio		= buffer_migrate_folio_norefs,
448 };
449 
450 static const struct inode_operations def_mdt_iops;
451 static const struct file_operations def_mdt_fops;
452 
453 
454 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
455 {
456 	struct nilfs_mdt_info *mi;
457 
458 	mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
459 	if (!mi)
460 		return -ENOMEM;
461 
462 	init_rwsem(&mi->mi_sem);
463 	inode->i_private = mi;
464 
465 	inode->i_mode = S_IFREG;
466 	mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
467 
468 	inode->i_op = &def_mdt_iops;
469 	inode->i_fop = &def_mdt_fops;
470 	inode->i_mapping->a_ops = &def_mdt_aops;
471 
472 	return 0;
473 }
474 
475 /**
476  * nilfs_mdt_clear - do cleanup for the metadata file
477  * @inode: inode of the metadata file
478  */
479 void nilfs_mdt_clear(struct inode *inode)
480 {
481 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
482 	struct nilfs_shadow_map *shadow = mdi->mi_shadow;
483 
484 	if (mdi->mi_palloc_cache)
485 		nilfs_palloc_destroy_cache(inode);
486 
487 	if (shadow) {
488 		struct inode *s_inode = shadow->inode;
489 
490 		shadow->inode = NULL;
491 		iput(s_inode);
492 		mdi->mi_shadow = NULL;
493 	}
494 }
495 
496 /**
497  * nilfs_mdt_destroy - release resources used by the metadata file
498  * @inode: inode of the metadata file
499  */
500 void nilfs_mdt_destroy(struct inode *inode)
501 {
502 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
503 
504 	kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
505 	kfree(mdi);
506 }
507 
508 void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
509 			      unsigned int header_size)
510 {
511 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
512 
513 	mi->mi_entry_size = entry_size;
514 	mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
515 	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
516 }
517 
518 /**
519  * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
520  * @inode: inode of the metadata file
521  * @shadow: shadow mapping
522  *
523  * Return: 0 on success, or a negative error code on failure.
524  */
525 int nilfs_mdt_setup_shadow_map(struct inode *inode,
526 			       struct nilfs_shadow_map *shadow)
527 {
528 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
529 	struct inode *s_inode;
530 
531 	INIT_LIST_HEAD(&shadow->frozen_buffers);
532 
533 	s_inode = nilfs_iget_for_shadow(inode);
534 	if (IS_ERR(s_inode))
535 		return PTR_ERR(s_inode);
536 
537 	shadow->inode = s_inode;
538 	mi->mi_shadow = shadow;
539 	return 0;
540 }
541 
542 /**
543  * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
544  * @inode: inode of the metadata file
545  *
546  * Return: 0 on success, or a negative error code on failure.
547  */
548 int nilfs_mdt_save_to_shadow_map(struct inode *inode)
549 {
550 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
551 	struct nilfs_inode_info *ii = NILFS_I(inode);
552 	struct nilfs_shadow_map *shadow = mi->mi_shadow;
553 	struct inode *s_inode = shadow->inode;
554 	int ret;
555 
556 	ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
557 	if (ret)
558 		goto out;
559 
560 	ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
561 				     ii->i_assoc_inode->i_mapping);
562 	if (ret)
563 		goto out;
564 
565 	nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
566  out:
567 	return ret;
568 }
569 
570 int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
571 {
572 	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
573 	struct buffer_head *bh_frozen;
574 	struct folio *folio;
575 	int blkbits = inode->i_blkbits;
576 
577 	folio = filemap_grab_folio(shadow->inode->i_mapping,
578 			bh->b_folio->index);
579 	if (IS_ERR(folio))
580 		return PTR_ERR(folio);
581 
582 	bh_frozen = folio_buffers(folio);
583 	if (!bh_frozen)
584 		bh_frozen = create_empty_buffers(folio, 1 << blkbits, 0);
585 
586 	bh_frozen = get_nth_bh(bh_frozen,
587 			       offset_in_folio(folio, bh->b_data) >> blkbits);
588 
589 	if (!buffer_uptodate(bh_frozen))
590 		nilfs_copy_buffer(bh_frozen, bh);
591 	if (list_empty(&bh_frozen->b_assoc_buffers)) {
592 		list_add_tail(&bh_frozen->b_assoc_buffers,
593 			      &shadow->frozen_buffers);
594 		set_buffer_nilfs_redirected(bh);
595 	} else {
596 		brelse(bh_frozen); /* already frozen */
597 	}
598 
599 	folio_unlock(folio);
600 	folio_put(folio);
601 	return 0;
602 }
603 
604 struct buffer_head *
605 nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
606 {
607 	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
608 	struct buffer_head *bh_frozen = NULL;
609 	struct folio *folio;
610 	int n;
611 
612 	folio = filemap_lock_folio(shadow->inode->i_mapping,
613 			bh->b_folio->index);
614 	if (!IS_ERR(folio)) {
615 		bh_frozen = folio_buffers(folio);
616 		if (bh_frozen) {
617 			n = offset_in_folio(folio, bh->b_data) >>
618 				inode->i_blkbits;
619 			bh_frozen = get_nth_bh(bh_frozen, n);
620 		}
621 		folio_unlock(folio);
622 		folio_put(folio);
623 	}
624 	return bh_frozen;
625 }
626 
627 static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
628 {
629 	struct list_head *head = &shadow->frozen_buffers;
630 	struct buffer_head *bh;
631 
632 	while (!list_empty(head)) {
633 		bh = list_first_entry(head, struct buffer_head,
634 				      b_assoc_buffers);
635 		list_del_init(&bh->b_assoc_buffers);
636 		brelse(bh); /* drop ref-count to make it releasable */
637 	}
638 }
639 
640 /**
641  * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
642  * @inode: inode of the metadata file
643  */
644 void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
645 {
646 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
647 	struct nilfs_inode_info *ii = NILFS_I(inode);
648 	struct nilfs_shadow_map *shadow = mi->mi_shadow;
649 
650 	down_write(&mi->mi_sem);
651 
652 	if (mi->mi_palloc_cache)
653 		nilfs_palloc_clear_cache(inode);
654 
655 	nilfs_clear_dirty_pages(inode->i_mapping);
656 	nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
657 
658 	nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping);
659 	nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
660 			      NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
661 
662 	nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
663 
664 	up_write(&mi->mi_sem);
665 }
666 
667 /**
668  * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
669  * @inode: inode of the metadata file
670  */
671 void nilfs_mdt_clear_shadow_map(struct inode *inode)
672 {
673 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
674 	struct nilfs_shadow_map *shadow = mi->mi_shadow;
675 	struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
676 
677 	down_write(&mi->mi_sem);
678 	nilfs_release_frozen_buffers(shadow);
679 	truncate_inode_pages(shadow->inode->i_mapping, 0);
680 	truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
681 	up_write(&mi->mi_sem);
682 }
683