xref: /linux/fs/nilfs2/dat.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * dat.c - NILFS disk address translation.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * Written by Koji Sato.
17  */
18 
19 #include <linux/types.h>
20 #include <linux/buffer_head.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include "nilfs.h"
24 #include "mdt.h"
25 #include "alloc.h"
26 #include "dat.h"
27 
28 
29 #define NILFS_CNO_MIN	((__u64)1)
30 #define NILFS_CNO_MAX	(~(__u64)0)
31 
32 /**
33  * struct nilfs_dat_info - on-memory private data of DAT file
34  * @mi: on-memory private data of metadata file
35  * @palloc_cache: persistent object allocator cache of DAT file
36  * @shadow: shadow map of DAT file
37  */
38 struct nilfs_dat_info {
39 	struct nilfs_mdt_info mi;
40 	struct nilfs_palloc_cache palloc_cache;
41 	struct nilfs_shadow_map shadow;
42 };
43 
44 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
45 {
46 	return (struct nilfs_dat_info *)NILFS_MDT(dat);
47 }
48 
49 static int nilfs_dat_prepare_entry(struct inode *dat,
50 				   struct nilfs_palloc_req *req, int create)
51 {
52 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
53 					    create, &req->pr_entry_bh);
54 }
55 
56 static void nilfs_dat_commit_entry(struct inode *dat,
57 				   struct nilfs_palloc_req *req)
58 {
59 	mark_buffer_dirty(req->pr_entry_bh);
60 	nilfs_mdt_mark_dirty(dat);
61 	brelse(req->pr_entry_bh);
62 }
63 
64 static void nilfs_dat_abort_entry(struct inode *dat,
65 				  struct nilfs_palloc_req *req)
66 {
67 	brelse(req->pr_entry_bh);
68 }
69 
70 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
71 {
72 	int ret;
73 
74 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
75 	if (ret < 0)
76 		return ret;
77 
78 	ret = nilfs_dat_prepare_entry(dat, req, 1);
79 	if (ret < 0)
80 		nilfs_palloc_abort_alloc_entry(dat, req);
81 
82 	return ret;
83 }
84 
85 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
86 {
87 	struct nilfs_dat_entry *entry;
88 	void *kaddr;
89 
90 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
91 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
92 					     req->pr_entry_bh, kaddr);
93 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
94 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
95 	entry->de_blocknr = cpu_to_le64(0);
96 	kunmap_atomic(kaddr);
97 
98 	nilfs_palloc_commit_alloc_entry(dat, req);
99 	nilfs_dat_commit_entry(dat, req);
100 }
101 
102 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
103 {
104 	nilfs_dat_abort_entry(dat, req);
105 	nilfs_palloc_abort_alloc_entry(dat, req);
106 }
107 
108 static void nilfs_dat_commit_free(struct inode *dat,
109 				  struct nilfs_palloc_req *req)
110 {
111 	struct nilfs_dat_entry *entry;
112 	void *kaddr;
113 
114 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
115 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
116 					     req->pr_entry_bh, kaddr);
117 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
118 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
119 	entry->de_blocknr = cpu_to_le64(0);
120 	kunmap_atomic(kaddr);
121 
122 	nilfs_dat_commit_entry(dat, req);
123 	nilfs_palloc_commit_free_entry(dat, req);
124 }
125 
126 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
127 {
128 	int ret;
129 
130 	ret = nilfs_dat_prepare_entry(dat, req, 0);
131 	WARN_ON(ret == -ENOENT);
132 	return ret;
133 }
134 
135 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
136 			    sector_t blocknr)
137 {
138 	struct nilfs_dat_entry *entry;
139 	void *kaddr;
140 
141 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
142 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
143 					     req->pr_entry_bh, kaddr);
144 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
145 	entry->de_blocknr = cpu_to_le64(blocknr);
146 	kunmap_atomic(kaddr);
147 
148 	nilfs_dat_commit_entry(dat, req);
149 }
150 
151 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
152 {
153 	struct nilfs_dat_entry *entry;
154 	sector_t blocknr;
155 	void *kaddr;
156 	int ret;
157 
158 	ret = nilfs_dat_prepare_entry(dat, req, 0);
159 	if (ret < 0) {
160 		WARN_ON(ret == -ENOENT);
161 		return ret;
162 	}
163 
164 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
165 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
166 					     req->pr_entry_bh, kaddr);
167 	blocknr = le64_to_cpu(entry->de_blocknr);
168 	kunmap_atomic(kaddr);
169 
170 	if (blocknr == 0) {
171 		ret = nilfs_palloc_prepare_free_entry(dat, req);
172 		if (ret < 0) {
173 			nilfs_dat_abort_entry(dat, req);
174 			return ret;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
182 			  int dead)
183 {
184 	struct nilfs_dat_entry *entry;
185 	__u64 start, end;
186 	sector_t blocknr;
187 	void *kaddr;
188 
189 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
190 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
191 					     req->pr_entry_bh, kaddr);
192 	end = start = le64_to_cpu(entry->de_start);
193 	if (!dead) {
194 		end = nilfs_mdt_cno(dat);
195 		WARN_ON(start > end);
196 	}
197 	entry->de_end = cpu_to_le64(end);
198 	blocknr = le64_to_cpu(entry->de_blocknr);
199 	kunmap_atomic(kaddr);
200 
201 	if (blocknr == 0)
202 		nilfs_dat_commit_free(dat, req);
203 	else
204 		nilfs_dat_commit_entry(dat, req);
205 }
206 
207 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
208 {
209 	struct nilfs_dat_entry *entry;
210 	__u64 start;
211 	sector_t blocknr;
212 	void *kaddr;
213 
214 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
215 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
216 					     req->pr_entry_bh, kaddr);
217 	start = le64_to_cpu(entry->de_start);
218 	blocknr = le64_to_cpu(entry->de_blocknr);
219 	kunmap_atomic(kaddr);
220 
221 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
222 		nilfs_palloc_abort_free_entry(dat, req);
223 	nilfs_dat_abort_entry(dat, req);
224 }
225 
226 int nilfs_dat_prepare_update(struct inode *dat,
227 			     struct nilfs_palloc_req *oldreq,
228 			     struct nilfs_palloc_req *newreq)
229 {
230 	int ret;
231 
232 	ret = nilfs_dat_prepare_end(dat, oldreq);
233 	if (!ret) {
234 		ret = nilfs_dat_prepare_alloc(dat, newreq);
235 		if (ret < 0)
236 			nilfs_dat_abort_end(dat, oldreq);
237 	}
238 	return ret;
239 }
240 
241 void nilfs_dat_commit_update(struct inode *dat,
242 			     struct nilfs_palloc_req *oldreq,
243 			     struct nilfs_palloc_req *newreq, int dead)
244 {
245 	nilfs_dat_commit_end(dat, oldreq, dead);
246 	nilfs_dat_commit_alloc(dat, newreq);
247 }
248 
249 void nilfs_dat_abort_update(struct inode *dat,
250 			    struct nilfs_palloc_req *oldreq,
251 			    struct nilfs_palloc_req *newreq)
252 {
253 	nilfs_dat_abort_end(dat, oldreq);
254 	nilfs_dat_abort_alloc(dat, newreq);
255 }
256 
257 /**
258  * nilfs_dat_mark_dirty -
259  * @dat: DAT file inode
260  * @vblocknr: virtual block number
261  *
262  * Description:
263  *
264  * Return Value: On success, 0 is returned. On error, one of the following
265  * negative error codes is returned.
266  *
267  * %-EIO - I/O error.
268  *
269  * %-ENOMEM - Insufficient amount of memory available.
270  */
271 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
272 {
273 	struct nilfs_palloc_req req;
274 	int ret;
275 
276 	req.pr_entry_nr = vblocknr;
277 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
278 	if (ret == 0)
279 		nilfs_dat_commit_entry(dat, &req);
280 	return ret;
281 }
282 
283 /**
284  * nilfs_dat_freev - free virtual block numbers
285  * @dat: DAT file inode
286  * @vblocknrs: array of virtual block numbers
287  * @nitems: number of virtual block numbers
288  *
289  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
290  * @vblocknrs and @nitems.
291  *
292  * Return Value: On success, 0 is returned. On error, one of the following
293  * negative error codes is returned.
294  *
295  * %-EIO - I/O error.
296  *
297  * %-ENOMEM - Insufficient amount of memory available.
298  *
299  * %-ENOENT - The virtual block number have not been allocated.
300  */
301 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
302 {
303 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
304 }
305 
306 /**
307  * nilfs_dat_move - change a block number
308  * @dat: DAT file inode
309  * @vblocknr: virtual block number
310  * @blocknr: block number
311  *
312  * Description: nilfs_dat_move() changes the block number associated with
313  * @vblocknr to @blocknr.
314  *
315  * Return Value: On success, 0 is returned. On error, one of the following
316  * negative error codes is returned.
317  *
318  * %-EIO - I/O error.
319  *
320  * %-ENOMEM - Insufficient amount of memory available.
321  */
322 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
323 {
324 	struct buffer_head *entry_bh;
325 	struct nilfs_dat_entry *entry;
326 	void *kaddr;
327 	int ret;
328 
329 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
330 	if (ret < 0)
331 		return ret;
332 
333 	/*
334 	 * The given disk block number (blocknr) is not yet written to
335 	 * the device at this point.
336 	 *
337 	 * To prevent nilfs_dat_translate() from returning the
338 	 * uncommitted block number, this makes a copy of the entry
339 	 * buffer and redirects nilfs_dat_translate() to the copy.
340 	 */
341 	if (!buffer_nilfs_redirected(entry_bh)) {
342 		ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
343 		if (ret) {
344 			brelse(entry_bh);
345 			return ret;
346 		}
347 	}
348 
349 	kaddr = kmap_atomic(entry_bh->b_page);
350 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
351 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
352 		nilfs_msg(dat->i_sb, KERN_CRIT,
353 			  "%s: invalid vblocknr = %llu, [%llu, %llu)",
354 			  __func__, (unsigned long long)vblocknr,
355 			  (unsigned long long)le64_to_cpu(entry->de_start),
356 			  (unsigned long long)le64_to_cpu(entry->de_end));
357 		kunmap_atomic(kaddr);
358 		brelse(entry_bh);
359 		return -EINVAL;
360 	}
361 	WARN_ON(blocknr == 0);
362 	entry->de_blocknr = cpu_to_le64(blocknr);
363 	kunmap_atomic(kaddr);
364 
365 	mark_buffer_dirty(entry_bh);
366 	nilfs_mdt_mark_dirty(dat);
367 
368 	brelse(entry_bh);
369 
370 	return 0;
371 }
372 
373 /**
374  * nilfs_dat_translate - translate a virtual block number to a block number
375  * @dat: DAT file inode
376  * @vblocknr: virtual block number
377  * @blocknrp: pointer to a block number
378  *
379  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
380  * to the corresponding block number.
381  *
382  * Return Value: On success, 0 is returned and the block number associated
383  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
384  * of the following negative error codes is returned.
385  *
386  * %-EIO - I/O error.
387  *
388  * %-ENOMEM - Insufficient amount of memory available.
389  *
390  * %-ENOENT - A block number associated with @vblocknr does not exist.
391  */
392 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
393 {
394 	struct buffer_head *entry_bh, *bh;
395 	struct nilfs_dat_entry *entry;
396 	sector_t blocknr;
397 	void *kaddr;
398 	int ret;
399 
400 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
401 	if (ret < 0)
402 		return ret;
403 
404 	if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
405 		bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
406 		if (bh) {
407 			WARN_ON(!buffer_uptodate(bh));
408 			brelse(entry_bh);
409 			entry_bh = bh;
410 		}
411 	}
412 
413 	kaddr = kmap_atomic(entry_bh->b_page);
414 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
415 	blocknr = le64_to_cpu(entry->de_blocknr);
416 	if (blocknr == 0) {
417 		ret = -ENOENT;
418 		goto out;
419 	}
420 	*blocknrp = blocknr;
421 
422  out:
423 	kunmap_atomic(kaddr);
424 	brelse(entry_bh);
425 	return ret;
426 }
427 
428 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
429 			    size_t nvi)
430 {
431 	struct buffer_head *entry_bh;
432 	struct nilfs_dat_entry *entry;
433 	struct nilfs_vinfo *vinfo = buf;
434 	__u64 first, last;
435 	void *kaddr;
436 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
437 	int i, j, n, ret;
438 
439 	for (i = 0; i < nvi; i += n) {
440 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
441 						   0, &entry_bh);
442 		if (ret < 0)
443 			return ret;
444 		kaddr = kmap_atomic(entry_bh->b_page);
445 		/* last virtual block number in this block */
446 		first = vinfo->vi_vblocknr;
447 		do_div(first, entries_per_block);
448 		first *= entries_per_block;
449 		last = first + entries_per_block - 1;
450 		for (j = i, n = 0;
451 		     j < nvi && vinfo->vi_vblocknr >= first &&
452 			     vinfo->vi_vblocknr <= last;
453 		     j++, n++, vinfo = (void *)vinfo + visz) {
454 			entry = nilfs_palloc_block_get_entry(
455 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
456 			vinfo->vi_start = le64_to_cpu(entry->de_start);
457 			vinfo->vi_end = le64_to_cpu(entry->de_end);
458 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
459 		}
460 		kunmap_atomic(kaddr);
461 		brelse(entry_bh);
462 	}
463 
464 	return nvi;
465 }
466 
467 /**
468  * nilfs_dat_read - read or get dat inode
469  * @sb: super block instance
470  * @entry_size: size of a dat entry
471  * @raw_inode: on-disk dat inode
472  * @inodep: buffer to store the inode
473  */
474 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
475 		   struct nilfs_inode *raw_inode, struct inode **inodep)
476 {
477 	static struct lock_class_key dat_lock_key;
478 	struct inode *dat;
479 	struct nilfs_dat_info *di;
480 	int err;
481 
482 	if (entry_size > sb->s_blocksize) {
483 		nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
484 			  entry_size);
485 		return -EINVAL;
486 	} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
487 		nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
488 			  entry_size);
489 		return -EINVAL;
490 	}
491 
492 	dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
493 	if (unlikely(!dat))
494 		return -ENOMEM;
495 	if (!(dat->i_state & I_NEW))
496 		goto out;
497 
498 	err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
499 	if (err)
500 		goto failed;
501 
502 	err = nilfs_palloc_init_blockgroup(dat, entry_size);
503 	if (err)
504 		goto failed;
505 
506 	di = NILFS_DAT_I(dat);
507 	lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
508 	nilfs_palloc_setup_cache(dat, &di->palloc_cache);
509 	nilfs_mdt_setup_shadow_map(dat, &di->shadow);
510 
511 	err = nilfs_read_inode_common(dat, raw_inode);
512 	if (err)
513 		goto failed;
514 
515 	unlock_new_inode(dat);
516  out:
517 	*inodep = dat;
518 	return 0;
519  failed:
520 	iget_failed(dat);
521 	return err;
522 }
523