xref: /linux/fs/nilfs2/dat.c (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1 /*
2  * dat.c - NILFS disk address translation.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Koji Sato <koji@osrg.net>.
21  */
22 
23 #include <linux/types.h>
24 #include <linux/buffer_head.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include "nilfs.h"
28 #include "mdt.h"
29 #include "alloc.h"
30 #include "dat.h"
31 
32 
33 #define NILFS_CNO_MIN	((__u64)1)
34 #define NILFS_CNO_MAX	(~(__u64)0)
35 
36 static int nilfs_dat_prepare_entry(struct inode *dat,
37 				   struct nilfs_palloc_req *req, int create)
38 {
39 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
40 					    create, &req->pr_entry_bh);
41 }
42 
43 static void nilfs_dat_commit_entry(struct inode *dat,
44 				   struct nilfs_palloc_req *req)
45 {
46 	nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
47 	nilfs_mdt_mark_dirty(dat);
48 	brelse(req->pr_entry_bh);
49 }
50 
51 static void nilfs_dat_abort_entry(struct inode *dat,
52 				  struct nilfs_palloc_req *req)
53 {
54 	brelse(req->pr_entry_bh);
55 }
56 
57 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
58 {
59 	int ret;
60 
61 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
62 	if (ret < 0)
63 		return ret;
64 
65 	ret = nilfs_dat_prepare_entry(dat, req, 1);
66 	if (ret < 0)
67 		nilfs_palloc_abort_alloc_entry(dat, req);
68 
69 	return ret;
70 }
71 
72 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
73 {
74 	struct nilfs_dat_entry *entry;
75 	void *kaddr;
76 
77 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
78 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
79 					     req->pr_entry_bh, kaddr);
80 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
81 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
82 	entry->de_blocknr = cpu_to_le64(0);
83 	kunmap_atomic(kaddr, KM_USER0);
84 
85 	nilfs_palloc_commit_alloc_entry(dat, req);
86 	nilfs_dat_commit_entry(dat, req);
87 }
88 
89 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90 {
91 	nilfs_dat_abort_entry(dat, req);
92 	nilfs_palloc_abort_alloc_entry(dat, req);
93 }
94 
95 void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
96 {
97 	struct nilfs_dat_entry *entry;
98 	void *kaddr;
99 
100 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
101 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
102 					     req->pr_entry_bh, kaddr);
103 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
104 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
105 	entry->de_blocknr = cpu_to_le64(0);
106 	kunmap_atomic(kaddr, KM_USER0);
107 
108 	nilfs_dat_commit_entry(dat, req);
109 	nilfs_palloc_commit_free_entry(dat, req);
110 }
111 
112 void nilfs_dat_abort_free(struct inode *dat, struct nilfs_palloc_req *req)
113 {
114 	nilfs_dat_abort_entry(dat, req);
115 	nilfs_palloc_abort_free_entry(dat, req);
116 }
117 
118 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
119 {
120 	int ret;
121 
122 	ret = nilfs_dat_prepare_entry(dat, req, 0);
123 	WARN_ON(ret == -ENOENT);
124 	return ret;
125 }
126 
127 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
128 			    sector_t blocknr)
129 {
130 	struct nilfs_dat_entry *entry;
131 	void *kaddr;
132 
133 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
134 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
135 					     req->pr_entry_bh, kaddr);
136 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
137 	entry->de_blocknr = cpu_to_le64(blocknr);
138 	kunmap_atomic(kaddr, KM_USER0);
139 
140 	nilfs_dat_commit_entry(dat, req);
141 }
142 
143 void nilfs_dat_abort_start(struct inode *dat, struct nilfs_palloc_req *req)
144 {
145 	nilfs_dat_abort_entry(dat, req);
146 }
147 
148 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
149 {
150 	struct nilfs_dat_entry *entry;
151 	__u64 start;
152 	sector_t blocknr;
153 	void *kaddr;
154 	int ret;
155 
156 	ret = nilfs_dat_prepare_entry(dat, req, 0);
157 	if (ret < 0) {
158 		WARN_ON(ret == -ENOENT);
159 		return ret;
160 	}
161 
162 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
163 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
164 					     req->pr_entry_bh, kaddr);
165 	start = le64_to_cpu(entry->de_start);
166 	blocknr = le64_to_cpu(entry->de_blocknr);
167 	kunmap_atomic(kaddr, KM_USER0);
168 
169 	if (blocknr == 0) {
170 		ret = nilfs_palloc_prepare_free_entry(dat, req);
171 		if (ret < 0) {
172 			nilfs_dat_abort_entry(dat, req);
173 			return ret;
174 		}
175 	}
176 
177 	return 0;
178 }
179 
180 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
181 			  int dead)
182 {
183 	struct nilfs_dat_entry *entry;
184 	__u64 start, end;
185 	sector_t blocknr;
186 	void *kaddr;
187 
188 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
189 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
190 					     req->pr_entry_bh, kaddr);
191 	end = start = le64_to_cpu(entry->de_start);
192 	if (!dead) {
193 		end = nilfs_mdt_cno(dat);
194 		WARN_ON(start > end);
195 	}
196 	entry->de_end = cpu_to_le64(end);
197 	blocknr = le64_to_cpu(entry->de_blocknr);
198 	kunmap_atomic(kaddr, KM_USER0);
199 
200 	if (blocknr == 0)
201 		nilfs_dat_commit_free(dat, req);
202 	else
203 		nilfs_dat_commit_entry(dat, req);
204 }
205 
206 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
207 {
208 	struct nilfs_dat_entry *entry;
209 	__u64 start;
210 	sector_t blocknr;
211 	void *kaddr;
212 
213 	kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0);
214 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
215 					     req->pr_entry_bh, kaddr);
216 	start = le64_to_cpu(entry->de_start);
217 	blocknr = le64_to_cpu(entry->de_blocknr);
218 	kunmap_atomic(kaddr, KM_USER0);
219 
220 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
221 		nilfs_palloc_abort_free_entry(dat, req);
222 	nilfs_dat_abort_entry(dat, req);
223 }
224 
225 /**
226  * nilfs_dat_mark_dirty -
227  * @dat: DAT file inode
228  * @vblocknr: virtual block number
229  *
230  * Description:
231  *
232  * Return Value: On success, 0 is returned. On error, one of the following
233  * negative error codes is returned.
234  *
235  * %-EIO - I/O error.
236  *
237  * %-ENOMEM - Insufficient amount of memory available.
238  */
239 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
240 {
241 	struct nilfs_palloc_req req;
242 	int ret;
243 
244 	req.pr_entry_nr = vblocknr;
245 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
246 	if (ret == 0)
247 		nilfs_dat_commit_entry(dat, &req);
248 	return ret;
249 }
250 
251 /**
252  * nilfs_dat_freev - free virtual block numbers
253  * @dat: DAT file inode
254  * @vblocknrs: array of virtual block numbers
255  * @nitems: number of virtual block numbers
256  *
257  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
258  * @vblocknrs and @nitems.
259  *
260  * Return Value: On success, 0 is returned. On error, one of the following
261  * nagative error codes is returned.
262  *
263  * %-EIO - I/O error.
264  *
265  * %-ENOMEM - Insufficient amount of memory available.
266  *
267  * %-ENOENT - The virtual block number have not been allocated.
268  */
269 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
270 {
271 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
272 }
273 
274 /**
275  * nilfs_dat_move - change a block number
276  * @dat: DAT file inode
277  * @vblocknr: virtual block number
278  * @blocknr: block number
279  *
280  * Description: nilfs_dat_move() changes the block number associated with
281  * @vblocknr to @blocknr.
282  *
283  * Return Value: On success, 0 is returned. On error, one of the following
284  * negative error codes is returned.
285  *
286  * %-EIO - I/O error.
287  *
288  * %-ENOMEM - Insufficient amount of memory available.
289  */
290 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
291 {
292 	struct buffer_head *entry_bh;
293 	struct nilfs_dat_entry *entry;
294 	void *kaddr;
295 	int ret;
296 
297 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
298 	if (ret < 0)
299 		return ret;
300 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
301 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
302 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
303 		printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
304 		       (unsigned long long)vblocknr,
305 		       (unsigned long long)le64_to_cpu(entry->de_start),
306 		       (unsigned long long)le64_to_cpu(entry->de_end));
307 		kunmap_atomic(kaddr, KM_USER0);
308 		brelse(entry_bh);
309 		return -EINVAL;
310 	}
311 	WARN_ON(blocknr == 0);
312 	entry->de_blocknr = cpu_to_le64(blocknr);
313 	kunmap_atomic(kaddr, KM_USER0);
314 
315 	nilfs_mdt_mark_buffer_dirty(entry_bh);
316 	nilfs_mdt_mark_dirty(dat);
317 
318 	brelse(entry_bh);
319 
320 	return 0;
321 }
322 
323 /**
324  * nilfs_dat_translate - translate a virtual block number to a block number
325  * @dat: DAT file inode
326  * @vblocknr: virtual block number
327  * @blocknrp: pointer to a block number
328  *
329  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
330  * to the corresponding block number.
331  *
332  * Return Value: On success, 0 is returned and the block number associated
333  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
334  * of the following negative error codes is returned.
335  *
336  * %-EIO - I/O error.
337  *
338  * %-ENOMEM - Insufficient amount of memory available.
339  *
340  * %-ENOENT - A block number associated with @vblocknr does not exist.
341  */
342 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
343 {
344 	struct buffer_head *entry_bh;
345 	struct nilfs_dat_entry *entry;
346 	sector_t blocknr;
347 	void *kaddr;
348 	int ret;
349 
350 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
351 	if (ret < 0)
352 		return ret;
353 
354 	kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
355 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
356 	blocknr = le64_to_cpu(entry->de_blocknr);
357 	if (blocknr == 0) {
358 		ret = -ENOENT;
359 		goto out;
360 	}
361 	if (blocknrp != NULL)
362 		*blocknrp = blocknr;
363 
364  out:
365 	kunmap_atomic(kaddr, KM_USER0);
366 	brelse(entry_bh);
367 	return ret;
368 }
369 
370 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
371 			    size_t nvi)
372 {
373 	struct buffer_head *entry_bh;
374 	struct nilfs_dat_entry *entry;
375 	struct nilfs_vinfo *vinfo = buf;
376 	__u64 first, last;
377 	void *kaddr;
378 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
379 	int i, j, n, ret;
380 
381 	for (i = 0; i < nvi; i += n) {
382 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
383 						   0, &entry_bh);
384 		if (ret < 0)
385 			return ret;
386 		kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
387 		/* last virtual block number in this block */
388 		first = vinfo->vi_vblocknr;
389 		do_div(first, entries_per_block);
390 		first *= entries_per_block;
391 		last = first + entries_per_block - 1;
392 		for (j = i, n = 0;
393 		     j < nvi && vinfo->vi_vblocknr >= first &&
394 			     vinfo->vi_vblocknr <= last;
395 		     j++, n++, vinfo = (void *)vinfo + visz) {
396 			entry = nilfs_palloc_block_get_entry(
397 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
398 			vinfo->vi_start = le64_to_cpu(entry->de_start);
399 			vinfo->vi_end = le64_to_cpu(entry->de_end);
400 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
401 		}
402 		kunmap_atomic(kaddr, KM_USER0);
403 		brelse(entry_bh);
404 	}
405 
406 	return nvi;
407 }
408