xref: /linux/fs/nilfs2/dat.c (revision e26207a3819684e9b4450a2d30bdd065fa92d9c7)
1 /*
2  * dat.c - NILFS disk address translation.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Koji Sato <koji@osrg.net>.
21  */
22 
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
31 
32 
33 #define NILFS_CNO_MIN	((__u64)1)
34 #define NILFS_CNO_MAX	(~(__u64)0)
35 
36 struct nilfs_dat_info {
37 	struct nilfs_mdt_info mi;
38 	struct nilfs_palloc_cache palloc_cache;
39 };
40 
41 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
42 {
43 	return (struct nilfs_dat_info *)NILFS_MDT(dat);
44 }
45 
46 static int nilfs_dat_prepare_entry(struct inode *dat,
47 				   struct nilfs_palloc_req *req, int create)
48 {
49 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
50 					    create, &req->pr_entry_bh);
51 }
52 
53 static void nilfs_dat_commit_entry(struct inode *dat,
54 				   struct nilfs_palloc_req *req)
55 {
56 	nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
57 	nilfs_mdt_mark_dirty(dat);
58 	brelse(req->pr_entry_bh);
59 }
60 
61 static void nilfs_dat_abort_entry(struct inode *dat,
62 				  struct nilfs_palloc_req *req)
63 {
64 	brelse(req->pr_entry_bh);
65 }
66 
67 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
68 {
69 	int ret;
70 
71 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
72 	if (ret < 0)
73 		return ret;
74 
75 	ret = nilfs_dat_prepare_entry(dat, req, 1);
76 	if (ret < 0)
77 		nilfs_palloc_abort_alloc_entry(dat, req);
78 
79 	return ret;
80 }
81 
82 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
83 {
84 	struct nilfs_dat_entry *entry;
85 	void *kaddr;
86 
87 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
88 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
89 					     req->pr_entry_bh, kaddr);
90 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
91 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
92 	entry->de_blocknr = cpu_to_le64(0);
93 	kunmap_atomic(kaddr, KM_USER0);
94 
95 	nilfs_palloc_commit_alloc_entry(dat, req);
96 	nilfs_dat_commit_entry(dat, req);
97 }
98 
99 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
100 {
101 	nilfs_dat_abort_entry(dat, req);
102 	nilfs_palloc_abort_alloc_entry(dat, req);
103 }
104 
105 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
106 {
107 	struct nilfs_dat_entry *entry;
108 	void *kaddr;
109 
110 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
111 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
112 					     req->pr_entry_bh, kaddr);
113 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
114 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
115 	entry->de_blocknr = cpu_to_le64(0);
116 	kunmap_atomic(kaddr, KM_USER0);
117 
118 	nilfs_dat_commit_entry(dat, req);
119 	nilfs_palloc_commit_free_entry(dat, req);
120 }
121 
122 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
123 {
124 	int ret;
125 
126 	ret = nilfs_dat_prepare_entry(dat, req, 0);
127 	WARN_ON(ret == -ENOENT);
128 	return ret;
129 }
130 
131 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
132 			    sector_t blocknr)
133 {
134 	struct nilfs_dat_entry *entry;
135 	void *kaddr;
136 
137 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
138 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
139 					     req->pr_entry_bh, kaddr);
140 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
141 	entry->de_blocknr = cpu_to_le64(blocknr);
142 	kunmap_atomic(kaddr, KM_USER0);
143 
144 	nilfs_dat_commit_entry(dat, req);
145 }
146 
147 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
148 {
149 	struct nilfs_dat_entry *entry;
150 	__u64 start;
151 	sector_t blocknr;
152 	void *kaddr;
153 	int ret;
154 
155 	ret = nilfs_dat_prepare_entry(dat, req, 0);
156 	if (ret < 0) {
157 		WARN_ON(ret == -ENOENT);
158 		return ret;
159 	}
160 
161 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
162 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
163 					     req->pr_entry_bh, kaddr);
164 	start = le64_to_cpu(entry->de_start);
165 	blocknr = le64_to_cpu(entry->de_blocknr);
166 	kunmap_atomic(kaddr, KM_USER0);
167 
168 	if (blocknr == 0) {
169 		ret = nilfs_palloc_prepare_free_entry(dat, req);
170 		if (ret < 0) {
171 			nilfs_dat_abort_entry(dat, req);
172 			return ret;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
179 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
180 			  int dead)
181 {
182 	struct nilfs_dat_entry *entry;
183 	__u64 start, end;
184 	sector_t blocknr;
185 	void *kaddr;
186 
187 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
188 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
189 					     req->pr_entry_bh, kaddr);
190 	end = start = le64_to_cpu(entry->de_start);
191 	if (!dead) {
192 		end = nilfs_mdt_cno(dat);
193 		WARN_ON(start > end);
194 	}
195 	entry->de_end = cpu_to_le64(end);
196 	blocknr = le64_to_cpu(entry->de_blocknr);
197 	kunmap_atomic(kaddr, KM_USER0);
198 
199 	if (blocknr == 0)
200 		nilfs_dat_commit_free(dat, req);
201 	else
202 		nilfs_dat_commit_entry(dat, req);
203 }
204 
205 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
206 {
207 	struct nilfs_dat_entry *entry;
208 	__u64 start;
209 	sector_t blocknr;
210 	void *kaddr;
211 
212 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
213 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
214 					     req->pr_entry_bh, kaddr);
215 	start = le64_to_cpu(entry->de_start);
216 	blocknr = le64_to_cpu(entry->de_blocknr);
217 	kunmap_atomic(kaddr, KM_USER0);
218 
219 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
220 		nilfs_palloc_abort_free_entry(dat, req);
221 	nilfs_dat_abort_entry(dat, req);
222 }
223 
224 int nilfs_dat_prepare_update(struct inode *dat,
225 			     struct nilfs_palloc_req *oldreq,
226 			     struct nilfs_palloc_req *newreq)
227 {
228 	int ret;
229 
230 	ret = nilfs_dat_prepare_end(dat, oldreq);
231 	if (!ret) {
232 		ret = nilfs_dat_prepare_alloc(dat, newreq);
233 		if (ret < 0)
234 			nilfs_dat_abort_end(dat, oldreq);
235 	}
236 	return ret;
237 }
238 
239 void nilfs_dat_commit_update(struct inode *dat,
240 			     struct nilfs_palloc_req *oldreq,
241 			     struct nilfs_palloc_req *newreq, int dead)
242 {
243 	nilfs_dat_commit_end(dat, oldreq, dead);
244 	nilfs_dat_commit_alloc(dat, newreq);
245 }
246 
247 void nilfs_dat_abort_update(struct inode *dat,
248 			    struct nilfs_palloc_req *oldreq,
249 			    struct nilfs_palloc_req *newreq)
250 {
251 	nilfs_dat_abort_end(dat, oldreq);
252 	nilfs_dat_abort_alloc(dat, newreq);
253 }
254 
255 /**
256  * nilfs_dat_mark_dirty -
257  * @dat: DAT file inode
258  * @vblocknr: virtual block number
259  *
260  * Description:
261  *
262  * Return Value: On success, 0 is returned. On error, one of the following
263  * negative error codes is returned.
264  *
265  * %-EIO - I/O error.
266  *
267  * %-ENOMEM - Insufficient amount of memory available.
268  */
269 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
270 {
271 	struct nilfs_palloc_req req;
272 	int ret;
273 
274 	req.pr_entry_nr = vblocknr;
275 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
276 	if (ret == 0)
277 		nilfs_dat_commit_entry(dat, &req);
278 	return ret;
279 }
280 
281 /**
282  * nilfs_dat_freev - free virtual block numbers
283  * @dat: DAT file inode
284  * @vblocknrs: array of virtual block numbers
285  * @nitems: number of virtual block numbers
286  *
287  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
288  * @vblocknrs and @nitems.
289  *
290  * Return Value: On success, 0 is returned. On error, one of the following
291  * nagative error codes is returned.
292  *
293  * %-EIO - I/O error.
294  *
295  * %-ENOMEM - Insufficient amount of memory available.
296  *
297  * %-ENOENT - The virtual block number have not been allocated.
298  */
299 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
300 {
301 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
302 }
303 
304 /**
305  * nilfs_dat_move - change a block number
306  * @dat: DAT file inode
307  * @vblocknr: virtual block number
308  * @blocknr: block number
309  *
310  * Description: nilfs_dat_move() changes the block number associated with
311  * @vblocknr to @blocknr.
312  *
313  * Return Value: On success, 0 is returned. On error, one of the following
314  * negative error codes is returned.
315  *
316  * %-EIO - I/O error.
317  *
318  * %-ENOMEM - Insufficient amount of memory available.
319  */
320 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
321 {
322 	struct buffer_head *entry_bh;
323 	struct nilfs_dat_entry *entry;
324 	void *kaddr;
325 	int ret;
326 
327 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
328 	if (ret < 0)
329 		return ret;
330 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
331 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
332 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
333 		printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
334 		       (unsigned long long)vblocknr,
335 		       (unsigned long long)le64_to_cpu(entry->de_start),
336 		       (unsigned long long)le64_to_cpu(entry->de_end));
337 		kunmap_atomic(kaddr, KM_USER0);
338 		brelse(entry_bh);
339 		return -EINVAL;
340 	}
341 	WARN_ON(blocknr == 0);
342 	entry->de_blocknr = cpu_to_le64(blocknr);
343 	kunmap_atomic(kaddr, KM_USER0);
344 
345 	nilfs_mdt_mark_buffer_dirty(entry_bh);
346 	nilfs_mdt_mark_dirty(dat);
347 
348 	brelse(entry_bh);
349 
350 	return 0;
351 }
352 
353 /**
354  * nilfs_dat_translate - translate a virtual block number to a block number
355  * @dat: DAT file inode
356  * @vblocknr: virtual block number
357  * @blocknrp: pointer to a block number
358  *
359  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
360  * to the corresponding block number.
361  *
362  * Return Value: On success, 0 is returned and the block number associated
363  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
364  * of the following negative error codes is returned.
365  *
366  * %-EIO - I/O error.
367  *
368  * %-ENOMEM - Insufficient amount of memory available.
369  *
370  * %-ENOENT - A block number associated with @vblocknr does not exist.
371  */
372 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
373 {
374 	struct buffer_head *entry_bh;
375 	struct nilfs_dat_entry *entry;
376 	sector_t blocknr;
377 	void *kaddr;
378 	int ret;
379 
380 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
381 	if (ret < 0)
382 		return ret;
383 
384 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
385 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
386 	blocknr = le64_to_cpu(entry->de_blocknr);
387 	if (blocknr == 0) {
388 		ret = -ENOENT;
389 		goto out;
390 	}
391 	if (blocknrp != NULL)
392 		*blocknrp = blocknr;
393 
394  out:
395 	kunmap_atomic(kaddr, KM_USER0);
396 	brelse(entry_bh);
397 	return ret;
398 }
399 
400 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
401 			    size_t nvi)
402 {
403 	struct buffer_head *entry_bh;
404 	struct nilfs_dat_entry *entry;
405 	struct nilfs_vinfo *vinfo = buf;
406 	__u64 first, last;
407 	void *kaddr;
408 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
409 	int i, j, n, ret;
410 
411 	for (i = 0; i < nvi; i += n) {
412 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
413 						   0, &entry_bh);
414 		if (ret < 0)
415 			return ret;
416 		kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
417 		/* last virtual block number in this block */
418 		first = vinfo->vi_vblocknr;
419 		do_div(first, entries_per_block);
420 		first *= entries_per_block;
421 		last = first + entries_per_block - 1;
422 		for (j = i, n = 0;
423 		     j < nvi && vinfo->vi_vblocknr >= first &&
424 			     vinfo->vi_vblocknr <= last;
425 		     j++, n++, vinfo = (void *)vinfo + visz) {
426 			entry = nilfs_palloc_block_get_entry(
427 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
428 			vinfo->vi_start = le64_to_cpu(entry->de_start);
429 			vinfo->vi_end = le64_to_cpu(entry->de_end);
430 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
431 		}
432 		kunmap_atomic(kaddr, KM_USER0);
433 		brelse(entry_bh);
434 	}
435 
436 	return nvi;
437 }
438 
439 /**
440  * nilfs_dat_read - read dat inode
441  * @dat: dat inode
442  * @raw_inode: on-disk dat inode
443  */
444 int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
445 {
446 	return nilfs_read_inode_common(dat, raw_inode);
447 }
448 
449 /**
450  * nilfs_dat_new - create dat file
451  * @nilfs: nilfs object
452  * @entry_size: size of a dat entry
453  */
454 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
455 {
456 	static struct lock_class_key dat_lock_key;
457 	struct inode *dat;
458 	struct nilfs_dat_info *di;
459 	int err;
460 
461 	dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
462 	if (dat) {
463 		err = nilfs_palloc_init_blockgroup(dat, entry_size);
464 		if (unlikely(err)) {
465 			nilfs_mdt_destroy(dat);
466 			return NULL;
467 		}
468 
469 		di = NILFS_DAT_I(dat);
470 		lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
471 		nilfs_palloc_setup_cache(dat, &di->palloc_cache);
472 	}
473 	return dat;
474 }
475