xref: /linux/fs/nilfs2/gcinode.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * gcinode.c - dummy inodes to buffer blocks for garbage collection
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>,
21  *            and Ryusuke Konishi <ryusuke@osrg.net>.
22  * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
23  *
24  */
25 /*
26  * This file adds the cache of on-disk blocks to be moved in garbage
27  * collection.  The disk blocks are held with dummy inodes (called
28  * gcinodes), and this file provides lookup function of the dummy
29  * inodes and their buffer read function.
30  *
31  * Since NILFS2 keeps up multiple checkpoints/snapshots across GC, it
32  * has to treat blocks that belong to a same file but have different
33  * checkpoint numbers.  To avoid interference among generations, dummy
34  * inodes are managed separately from actual inodes, and their lookup
35  * function (nilfs_gc_iget) is designed to be specified with a
36  * checkpoint number argument as well as an inode number.
37  *
38  * Buffers and pages held by the dummy inodes will be released each
39  * time after they are copied to a new log.  Dirty blocks made on the
40  * current generation and the blocks to be moved by GC never overlap
41  * because the dirty blocks make a new generation; they rather must be
42  * written individually.
43  */
44 
45 #include <linux/buffer_head.h>
46 #include <linux/mpage.h>
47 #include <linux/hash.h>
48 #include <linux/slab.h>
49 #include <linux/swap.h>
50 #include "nilfs.h"
51 #include "btree.h"
52 #include "btnode.h"
53 #include "page.h"
54 #include "mdt.h"
55 #include "dat.h"
56 #include "ifile.h"
57 
58 static const struct address_space_operations def_gcinode_aops = {
59 	.sync_page		= block_sync_page,
60 };
61 
62 /*
63  * nilfs_gccache_submit_read_data() - add data buffer and submit read request
64  * @inode - gc inode
65  * @blkoff - dummy offset treated as the key for the page cache
66  * @pbn - physical block number of the block
67  * @vbn - virtual block number of the block, 0 for non-virtual block
68  * @out_bh - indirect pointer to a buffer_head struct to receive the results
69  *
70  * Description: nilfs_gccache_submit_read_data() registers the data buffer
71  * specified by @pbn to the GC pagecache with the key @blkoff.
72  * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
73  *
74  * Return Value: On success, 0 is returned. On Error, one of the following
75  * negative error code is returned.
76  *
77  * %-EIO - I/O error.
78  *
79  * %-ENOMEM - Insufficient amount of memory available.
80  *
81  * %-ENOENT - The block specified with @pbn does not exist.
82  */
83 int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
84 				   sector_t pbn, __u64 vbn,
85 				   struct buffer_head **out_bh)
86 {
87 	struct buffer_head *bh;
88 	int err;
89 
90 	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
91 	if (unlikely(!bh))
92 		return -ENOMEM;
93 
94 	if (buffer_uptodate(bh))
95 		goto out;
96 
97 	if (pbn == 0) {
98 		struct inode *dat_inode = NILFS_I_NILFS(inode)->ns_dat;
99 					  /* use original dat, not gc dat. */
100 		err = nilfs_dat_translate(dat_inode, vbn, &pbn);
101 		if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
102 			brelse(bh);
103 			goto failed;
104 		}
105 	}
106 
107 	lock_buffer(bh);
108 	if (buffer_uptodate(bh)) {
109 		unlock_buffer(bh);
110 		goto out;
111 	}
112 
113 	if (!buffer_mapped(bh)) {
114 		bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
115 		set_buffer_mapped(bh);
116 	}
117 	bh->b_blocknr = pbn;
118 	bh->b_end_io = end_buffer_read_sync;
119 	get_bh(bh);
120 	submit_bh(READ, bh);
121 	if (vbn)
122 		bh->b_blocknr = vbn;
123  out:
124 	err = 0;
125 	*out_bh = bh;
126 
127  failed:
128 	unlock_page(bh->b_page);
129 	page_cache_release(bh->b_page);
130 	return err;
131 }
132 
133 /*
134  * nilfs_gccache_submit_read_node() - add node buffer and submit read request
135  * @inode - gc inode
136  * @pbn - physical block number for the block
137  * @vbn - virtual block number for the block
138  * @out_bh - indirect pointer to a buffer_head struct to receive the results
139  *
140  * Description: nilfs_gccache_submit_read_node() registers the node buffer
141  * specified by @vbn to the GC pagecache.  @pbn can be supplied by the
142  * caller to avoid translation of the disk block address.
143  *
144  * Return Value: On success, 0 is returned. On Error, one of the following
145  * negative error code is returned.
146  *
147  * %-EIO - I/O error.
148  *
149  * %-ENOMEM - Insufficient amount of memory available.
150  */
151 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
152 				   __u64 vbn, struct buffer_head **out_bh)
153 {
154 	int ret;
155 
156 	ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
157 					vbn ? : pbn, pbn, READ, out_bh, &pbn);
158 	if (ret == -EEXIST) /* internal code (cache hit) */
159 		ret = 0;
160 	return ret;
161 }
162 
163 int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
164 {
165 	wait_on_buffer(bh);
166 	if (!buffer_uptodate(bh))
167 		return -EIO;
168 	if (buffer_dirty(bh))
169 		return -EEXIST;
170 
171 	if (buffer_nilfs_node(bh)) {
172 		if (nilfs_btree_broken_node_block(bh)) {
173 			clear_buffer_uptodate(bh);
174 			return -EIO;
175 		}
176 		nilfs_btnode_mark_dirty(bh);
177 	} else {
178 		nilfs_mdt_mark_buffer_dirty(bh);
179 	}
180 	return 0;
181 }
182 
183 /*
184  * nilfs_init_gccache() - allocate and initialize gc_inode hash table
185  * @nilfs - the_nilfs
186  *
187  * Return Value: On success, 0.
188  * On error, a negative error code is returned.
189  */
190 int nilfs_init_gccache(struct the_nilfs *nilfs)
191 {
192 	int loop;
193 
194 	BUG_ON(nilfs->ns_gc_inodes_h);
195 
196 	INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
197 
198 	nilfs->ns_gc_inodes_h =
199 		kmalloc(sizeof(struct hlist_head) * NILFS_GCINODE_HASH_SIZE,
200 			GFP_NOFS);
201 	if (nilfs->ns_gc_inodes_h == NULL)
202 		return -ENOMEM;
203 
204 	for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++)
205 		INIT_HLIST_HEAD(&nilfs->ns_gc_inodes_h[loop]);
206 	return 0;
207 }
208 
209 /*
210  * nilfs_destroy_gccache() - free gc_inode hash table
211  * @nilfs - the nilfs
212  */
213 void nilfs_destroy_gccache(struct the_nilfs *nilfs)
214 {
215 	if (nilfs->ns_gc_inodes_h) {
216 		nilfs_remove_all_gcinode(nilfs);
217 		kfree(nilfs->ns_gc_inodes_h);
218 		nilfs->ns_gc_inodes_h = NULL;
219 	}
220 }
221 
222 static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
223 				   __u64 cno)
224 {
225 	struct inode *inode;
226 	struct nilfs_inode_info *ii;
227 
228 	inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
229 	if (!inode)
230 		return NULL;
231 
232 	inode->i_op = NULL;
233 	inode->i_fop = NULL;
234 	inode->i_mapping->a_ops = &def_gcinode_aops;
235 
236 	ii = NILFS_I(inode);
237 	ii->i_cno = cno;
238 	ii->i_flags = 0;
239 	ii->i_state = 1 << NILFS_I_GCINODE;
240 	ii->i_bh = NULL;
241 	nilfs_bmap_init_gc(ii->i_bmap);
242 
243 	return inode;
244 }
245 
246 static unsigned long ihash(ino_t ino, __u64 cno)
247 {
248 	return hash_long((unsigned long)((ino << 2) + cno),
249 			 NILFS_GCINODE_HASH_BITS);
250 }
251 
252 /*
253  * nilfs_gc_iget() - find or create gc inode with specified (ino,cno)
254  */
255 struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
256 {
257 	struct hlist_head *head = nilfs->ns_gc_inodes_h + ihash(ino, cno);
258 	struct hlist_node *node;
259 	struct inode *inode;
260 
261 	hlist_for_each_entry(inode, node, head, i_hash) {
262 		if (inode->i_ino == ino && NILFS_I(inode)->i_cno == cno)
263 			return inode;
264 	}
265 
266 	inode = alloc_gcinode(nilfs, ino, cno);
267 	if (likely(inode)) {
268 		hlist_add_head(&inode->i_hash, head);
269 		list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes);
270 	}
271 	return inode;
272 }
273 
274 /*
275  * nilfs_clear_gcinode() - clear and free a gc inode
276  */
277 void nilfs_clear_gcinode(struct inode *inode)
278 {
279 	nilfs_mdt_destroy(inode);
280 }
281 
282 /*
283  * nilfs_remove_all_gcinode() - remove all inodes from the_nilfs
284  */
285 void nilfs_remove_all_gcinode(struct the_nilfs *nilfs)
286 {
287 	struct hlist_head *head = nilfs->ns_gc_inodes_h;
288 	struct hlist_node *node, *n;
289 	struct inode *inode;
290 	int loop;
291 
292 	for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++, head++) {
293 		hlist_for_each_entry_safe(inode, node, n, head, i_hash) {
294 			hlist_del_init(&inode->i_hash);
295 			list_del_init(&NILFS_I(inode)->i_dirty);
296 			nilfs_clear_gcinode(inode); /* might sleep */
297 		}
298 	}
299 }
300