xref: /linux/fs/nilfs2/dat.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * dat.c - NILFS disk address translation.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Koji Sato <koji@osrg.net>.
21  */
22 
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
31 
32 
33 #define NILFS_CNO_MIN	((__u64)1)
34 #define NILFS_CNO_MAX	(~(__u64)0)
35 
36 struct nilfs_dat_info {
37 	struct nilfs_mdt_info mi;
38 	struct nilfs_palloc_cache palloc_cache;
39 };
40 
41 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
42 {
43 	return (struct nilfs_dat_info *)NILFS_MDT(dat);
44 }
45 
46 static int nilfs_dat_prepare_entry(struct inode *dat,
47 				   struct nilfs_palloc_req *req, int create)
48 {
49 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
50 					    create, &req->pr_entry_bh);
51 }
52 
53 static void nilfs_dat_commit_entry(struct inode *dat,
54 				   struct nilfs_palloc_req *req)
55 {
56 	nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
57 	nilfs_mdt_mark_dirty(dat);
58 	brelse(req->pr_entry_bh);
59 }
60 
61 static void nilfs_dat_abort_entry(struct inode *dat,
62 				  struct nilfs_palloc_req *req)
63 {
64 	brelse(req->pr_entry_bh);
65 }
66 
67 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
68 {
69 	int ret;
70 
71 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
72 	if (ret < 0)
73 		return ret;
74 
75 	ret = nilfs_dat_prepare_entry(dat, req, 1);
76 	if (ret < 0)
77 		nilfs_palloc_abort_alloc_entry(dat, req);
78 
79 	return ret;
80 }
81 
82 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
83 {
84 	struct nilfs_dat_entry *entry;
85 	void *kaddr;
86 
87 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
88 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
89 					     req->pr_entry_bh, kaddr);
90 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
91 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
92 	entry->de_blocknr = cpu_to_le64(0);
93 	kunmap_atomic(kaddr, KM_USER0);
94 
95 	nilfs_palloc_commit_alloc_entry(dat, req);
96 	nilfs_dat_commit_entry(dat, req);
97 }
98 
99 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
100 {
101 	nilfs_dat_abort_entry(dat, req);
102 	nilfs_palloc_abort_alloc_entry(dat, req);
103 }
104 
105 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
106 {
107 	struct nilfs_dat_entry *entry;
108 	void *kaddr;
109 
110 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
111 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
112 					     req->pr_entry_bh, kaddr);
113 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
114 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
115 	entry->de_blocknr = cpu_to_le64(0);
116 	kunmap_atomic(kaddr, KM_USER0);
117 
118 	nilfs_dat_commit_entry(dat, req);
119 	nilfs_palloc_commit_free_entry(dat, req);
120 }
121 
122 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
123 {
124 	int ret;
125 
126 	ret = nilfs_dat_prepare_entry(dat, req, 0);
127 	WARN_ON(ret == -ENOENT);
128 	return ret;
129 }
130 
131 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
132 			    sector_t blocknr)
133 {
134 	struct nilfs_dat_entry *entry;
135 	void *kaddr;
136 
137 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
138 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
139 					     req->pr_entry_bh, kaddr);
140 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
141 	entry->de_blocknr = cpu_to_le64(blocknr);
142 	kunmap_atomic(kaddr, KM_USER0);
143 
144 	nilfs_dat_commit_entry(dat, req);
145 }
146 
147 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
148 {
149 	struct nilfs_dat_entry *entry;
150 	__u64 start;
151 	sector_t blocknr;
152 	void *kaddr;
153 	int ret;
154 
155 	ret = nilfs_dat_prepare_entry(dat, req, 0);
156 	if (ret < 0) {
157 		WARN_ON(ret == -ENOENT);
158 		return ret;
159 	}
160 
161 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
162 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
163 					     req->pr_entry_bh, kaddr);
164 	start = le64_to_cpu(entry->de_start);
165 	blocknr = le64_to_cpu(entry->de_blocknr);
166 	kunmap_atomic(kaddr, KM_USER0);
167 
168 	if (blocknr == 0) {
169 		ret = nilfs_palloc_prepare_free_entry(dat, req);
170 		if (ret < 0) {
171 			nilfs_dat_abort_entry(dat, req);
172 			return ret;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
179 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
180 			  int dead)
181 {
182 	struct nilfs_dat_entry *entry;
183 	__u64 start, end;
184 	sector_t blocknr;
185 	void *kaddr;
186 
187 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
188 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
189 					     req->pr_entry_bh, kaddr);
190 	end = start = le64_to_cpu(entry->de_start);
191 	if (!dead) {
192 		end = nilfs_mdt_cno(dat);
193 		WARN_ON(start > end);
194 	}
195 	entry->de_end = cpu_to_le64(end);
196 	blocknr = le64_to_cpu(entry->de_blocknr);
197 	kunmap_atomic(kaddr, KM_USER0);
198 
199 	if (blocknr == 0)
200 		nilfs_dat_commit_free(dat, req);
201 	else
202 		nilfs_dat_commit_entry(dat, req);
203 }
204 
205 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
206 {
207 	struct nilfs_dat_entry *entry;
208 	__u64 start;
209 	sector_t blocknr;
210 	void *kaddr;
211 
212 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
213 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
214 					     req->pr_entry_bh, kaddr);
215 	start = le64_to_cpu(entry->de_start);
216 	blocknr = le64_to_cpu(entry->de_blocknr);
217 	kunmap_atomic(kaddr, KM_USER0);
218 
219 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
220 		nilfs_palloc_abort_free_entry(dat, req);
221 	nilfs_dat_abort_entry(dat, req);
222 }
223 
224 int nilfs_dat_prepare_update(struct inode *dat,
225 			     struct nilfs_palloc_req *oldreq,
226 			     struct nilfs_palloc_req *newreq)
227 {
228 	int ret;
229 
230 	ret = nilfs_dat_prepare_end(dat, oldreq);
231 	if (!ret) {
232 		ret = nilfs_dat_prepare_alloc(dat, newreq);
233 		if (ret < 0)
234 			nilfs_dat_abort_end(dat, oldreq);
235 	}
236 	return ret;
237 }
238 
239 void nilfs_dat_commit_update(struct inode *dat,
240 			     struct nilfs_palloc_req *oldreq,
241 			     struct nilfs_palloc_req *newreq, int dead)
242 {
243 	nilfs_dat_commit_end(dat, oldreq, dead);
244 	nilfs_dat_commit_alloc(dat, newreq);
245 }
246 
247 void nilfs_dat_abort_update(struct inode *dat,
248 			    struct nilfs_palloc_req *oldreq,
249 			    struct nilfs_palloc_req *newreq)
250 {
251 	nilfs_dat_abort_end(dat, oldreq);
252 	nilfs_dat_abort_alloc(dat, newreq);
253 }
254 
255 /**
256  * nilfs_dat_mark_dirty -
257  * @dat: DAT file inode
258  * @vblocknr: virtual block number
259  *
260  * Description:
261  *
262  * Return Value: On success, 0 is returned. On error, one of the following
263  * negative error codes is returned.
264  *
265  * %-EIO - I/O error.
266  *
267  * %-ENOMEM - Insufficient amount of memory available.
268  */
269 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
270 {
271 	struct nilfs_palloc_req req;
272 	int ret;
273 
274 	req.pr_entry_nr = vblocknr;
275 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
276 	if (ret == 0)
277 		nilfs_dat_commit_entry(dat, &req);
278 	return ret;
279 }
280 
281 /**
282  * nilfs_dat_freev - free virtual block numbers
283  * @dat: DAT file inode
284  * @vblocknrs: array of virtual block numbers
285  * @nitems: number of virtual block numbers
286  *
287  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
288  * @vblocknrs and @nitems.
289  *
290  * Return Value: On success, 0 is returned. On error, one of the following
291  * negative error codes is returned.
292  *
293  * %-EIO - I/O error.
294  *
295  * %-ENOMEM - Insufficient amount of memory available.
296  *
297  * %-ENOENT - The virtual block number have not been allocated.
298  */
299 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
300 {
301 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
302 }
303 
304 /**
305  * nilfs_dat_move - change a block number
306  * @dat: DAT file inode
307  * @vblocknr: virtual block number
308  * @blocknr: block number
309  *
310  * Description: nilfs_dat_move() changes the block number associated with
311  * @vblocknr to @blocknr.
312  *
313  * Return Value: On success, 0 is returned. On error, one of the following
314  * negative error codes is returned.
315  *
316  * %-EIO - I/O error.
317  *
318  * %-ENOMEM - Insufficient amount of memory available.
319  */
320 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
321 {
322 	struct buffer_head *entry_bh;
323 	struct nilfs_dat_entry *entry;
324 	void *kaddr;
325 	int ret;
326 
327 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
328 	if (ret < 0)
329 		return ret;
330 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
331 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
332 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
333 		printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
334 		       (unsigned long long)vblocknr,
335 		       (unsigned long long)le64_to_cpu(entry->de_start),
336 		       (unsigned long long)le64_to_cpu(entry->de_end));
337 		kunmap_atomic(kaddr, KM_USER0);
338 		brelse(entry_bh);
339 		return -EINVAL;
340 	}
341 	WARN_ON(blocknr == 0);
342 	entry->de_blocknr = cpu_to_le64(blocknr);
343 	kunmap_atomic(kaddr, KM_USER0);
344 
345 	nilfs_mdt_mark_buffer_dirty(entry_bh);
346 	nilfs_mdt_mark_dirty(dat);
347 
348 	brelse(entry_bh);
349 
350 	return 0;
351 }
352 
353 /**
354  * nilfs_dat_translate - translate a virtual block number to a block number
355  * @dat: DAT file inode
356  * @vblocknr: virtual block number
357  * @blocknrp: pointer to a block number
358  *
359  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
360  * to the corresponding block number.
361  *
362  * Return Value: On success, 0 is returned and the block number associated
363  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
364  * of the following negative error codes is returned.
365  *
366  * %-EIO - I/O error.
367  *
368  * %-ENOMEM - Insufficient amount of memory available.
369  *
370  * %-ENOENT - A block number associated with @vblocknr does not exist.
371  */
372 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
373 {
374 	struct buffer_head *entry_bh;
375 	struct nilfs_dat_entry *entry;
376 	sector_t blocknr;
377 	void *kaddr;
378 	int ret;
379 
380 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
381 	if (ret < 0)
382 		return ret;
383 
384 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
385 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
386 	blocknr = le64_to_cpu(entry->de_blocknr);
387 	if (blocknr == 0) {
388 		ret = -ENOENT;
389 		goto out;
390 	}
391 	*blocknrp = blocknr;
392 
393  out:
394 	kunmap_atomic(kaddr, KM_USER0);
395 	brelse(entry_bh);
396 	return ret;
397 }
398 
399 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
400 			    size_t nvi)
401 {
402 	struct buffer_head *entry_bh;
403 	struct nilfs_dat_entry *entry;
404 	struct nilfs_vinfo *vinfo = buf;
405 	__u64 first, last;
406 	void *kaddr;
407 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
408 	int i, j, n, ret;
409 
410 	for (i = 0; i < nvi; i += n) {
411 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
412 						   0, &entry_bh);
413 		if (ret < 0)
414 			return ret;
415 		kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
416 		/* last virtual block number in this block */
417 		first = vinfo->vi_vblocknr;
418 		do_div(first, entries_per_block);
419 		first *= entries_per_block;
420 		last = first + entries_per_block - 1;
421 		for (j = i, n = 0;
422 		     j < nvi && vinfo->vi_vblocknr >= first &&
423 			     vinfo->vi_vblocknr <= last;
424 		     j++, n++, vinfo = (void *)vinfo + visz) {
425 			entry = nilfs_palloc_block_get_entry(
426 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
427 			vinfo->vi_start = le64_to_cpu(entry->de_start);
428 			vinfo->vi_end = le64_to_cpu(entry->de_end);
429 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
430 		}
431 		kunmap_atomic(kaddr, KM_USER0);
432 		brelse(entry_bh);
433 	}
434 
435 	return nvi;
436 }
437 
438 /**
439  * nilfs_dat_read - read dat inode
440  * @dat: dat inode
441  * @raw_inode: on-disk dat inode
442  */
443 int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
444 {
445 	return nilfs_read_inode_common(dat, raw_inode);
446 }
447 
448 /**
449  * nilfs_dat_new - create dat file
450  * @nilfs: nilfs object
451  * @entry_size: size of a dat entry
452  */
453 struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
454 {
455 	static struct lock_class_key dat_lock_key;
456 	struct inode *dat;
457 	struct nilfs_dat_info *di;
458 	int err;
459 
460 	dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
461 	if (dat) {
462 		err = nilfs_palloc_init_blockgroup(dat, entry_size);
463 		if (unlikely(err)) {
464 			nilfs_mdt_destroy(dat);
465 			return NULL;
466 		}
467 
468 		di = NILFS_DAT_I(dat);
469 		lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
470 		nilfs_palloc_setup_cache(dat, &di->palloc_cache);
471 	}
472 	return dat;
473 }
474