xref: /linux/fs/f2fs/node.h (revision 7c66e12136c2fa421ae75497e02728f252108a1b)
1 /*
2  * fs/f2fs/node.h
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 /* start node id of a node block dedicated to the given node id */
12 #define	START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
13 
14 /* node block offset on the NAT area dedicated to the given start node id */
15 #define	NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
16 
17 /* # of pages to perform synchronous readahead before building free nids */
18 #define FREE_NID_PAGES	8
19 #define MAX_FREE_NIDS	(NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
20 
21 #define DEF_RA_NID_PAGES	0	/* # of nid pages to be readaheaded */
22 
23 /* maximum readahead size for node during getting data blocks */
24 #define MAX_RA_NODE		128
25 
26 /* control the memory footprint threshold (10MB per 1GB ram) */
27 #define DEF_RAM_THRESHOLD	1
28 
29 /* control dirty nats ratio threshold (default: 10% over max nid count) */
30 #define DEF_DIRTY_NAT_RATIO_THRESHOLD		10
31 /* control total # of nats */
32 #define DEF_NAT_CACHE_THRESHOLD			100000
33 
34 /* vector size for gang look-up from nat cache that consists of radix tree */
35 #define NATVEC_SIZE	64
36 #define SETVEC_SIZE	32
37 
38 /* return value for read_node_page */
39 #define LOCKED_PAGE	1
40 
41 /* For flag in struct node_info */
42 enum {
43 	IS_CHECKPOINTED,	/* is it checkpointed before? */
44 	HAS_FSYNCED_INODE,	/* is the inode fsynced before? */
45 	HAS_LAST_FSYNC,		/* has the latest node fsync mark? */
46 	IS_DIRTY,		/* this nat entry is dirty? */
47 };
48 
49 /*
50  * For node information
51  */
52 struct node_info {
53 	nid_t nid;		/* node id */
54 	nid_t ino;		/* inode number of the node's owner */
55 	block_t	blk_addr;	/* block address of the node */
56 	unsigned char version;	/* version of the node */
57 	unsigned char flag;	/* for node information bits */
58 };
59 
60 struct nat_entry {
61 	struct list_head list;	/* for clean or dirty nat list */
62 	struct node_info ni;	/* in-memory node information */
63 };
64 
65 #define nat_get_nid(nat)		(nat->ni.nid)
66 #define nat_set_nid(nat, n)		(nat->ni.nid = n)
67 #define nat_get_blkaddr(nat)		(nat->ni.blk_addr)
68 #define nat_set_blkaddr(nat, b)		(nat->ni.blk_addr = b)
69 #define nat_get_ino(nat)		(nat->ni.ino)
70 #define nat_set_ino(nat, i)		(nat->ni.ino = i)
71 #define nat_get_version(nat)		(nat->ni.version)
72 #define nat_set_version(nat, v)		(nat->ni.version = v)
73 
74 #define inc_node_version(version)	(++version)
75 
76 static inline void copy_node_info(struct node_info *dst,
77 						struct node_info *src)
78 {
79 	dst->nid = src->nid;
80 	dst->ino = src->ino;
81 	dst->blk_addr = src->blk_addr;
82 	dst->version = src->version;
83 	/* should not copy flag here */
84 }
85 
86 static inline void set_nat_flag(struct nat_entry *ne,
87 				unsigned int type, bool set)
88 {
89 	unsigned char mask = 0x01 << type;
90 	if (set)
91 		ne->ni.flag |= mask;
92 	else
93 		ne->ni.flag &= ~mask;
94 }
95 
96 static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
97 {
98 	unsigned char mask = 0x01 << type;
99 	return ne->ni.flag & mask;
100 }
101 
102 static inline void nat_reset_flag(struct nat_entry *ne)
103 {
104 	/* these states can be set only after checkpoint was done */
105 	set_nat_flag(ne, IS_CHECKPOINTED, true);
106 	set_nat_flag(ne, HAS_FSYNCED_INODE, false);
107 	set_nat_flag(ne, HAS_LAST_FSYNC, true);
108 }
109 
110 static inline void node_info_from_raw_nat(struct node_info *ni,
111 						struct f2fs_nat_entry *raw_ne)
112 {
113 	ni->ino = le32_to_cpu(raw_ne->ino);
114 	ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
115 	ni->version = raw_ne->version;
116 }
117 
118 static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
119 						struct node_info *ni)
120 {
121 	raw_ne->ino = cpu_to_le32(ni->ino);
122 	raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
123 	raw_ne->version = ni->version;
124 }
125 
126 static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
127 {
128 	return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
129 					NM_I(sbi)->dirty_nats_ratio / 100;
130 }
131 
132 static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
133 {
134 	return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
135 }
136 
137 enum mem_type {
138 	FREE_NIDS,	/* indicates the free nid list */
139 	NAT_ENTRIES,	/* indicates the cached nat entry */
140 	DIRTY_DENTS,	/* indicates dirty dentry pages */
141 	INO_ENTRIES,	/* indicates inode entries */
142 	EXTENT_CACHE,	/* indicates extent cache */
143 	BASE_CHECK,	/* check kernel status */
144 };
145 
146 struct nat_entry_set {
147 	struct list_head set_list;	/* link with other nat sets */
148 	struct list_head entry_list;	/* link with dirty nat entries */
149 	nid_t set;			/* set number*/
150 	unsigned int entry_cnt;		/* the # of nat entries in set */
151 };
152 
153 /*
154  * For free nid mangement
155  */
156 enum nid_state {
157 	NID_NEW,	/* newly added to free nid list */
158 	NID_ALLOC	/* it is allocated */
159 };
160 
161 struct free_nid {
162 	struct list_head list;	/* for free node id list */
163 	nid_t nid;		/* node id */
164 	int state;		/* in use or not: NID_NEW or NID_ALLOC */
165 };
166 
167 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
168 {
169 	struct f2fs_nm_info *nm_i = NM_I(sbi);
170 	struct free_nid *fnid;
171 
172 	spin_lock(&nm_i->free_nid_list_lock);
173 	if (nm_i->fcnt <= 0) {
174 		spin_unlock(&nm_i->free_nid_list_lock);
175 		return;
176 	}
177 	fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
178 	*nid = fnid->nid;
179 	spin_unlock(&nm_i->free_nid_list_lock);
180 }
181 
182 /*
183  * inline functions
184  */
185 static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
186 {
187 	struct f2fs_nm_info *nm_i = NM_I(sbi);
188 	memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
189 }
190 
191 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
192 {
193 	struct f2fs_nm_info *nm_i = NM_I(sbi);
194 	pgoff_t block_off;
195 	pgoff_t block_addr;
196 	int seg_off;
197 
198 	block_off = NAT_BLOCK_OFFSET(start);
199 	seg_off = block_off >> sbi->log_blocks_per_seg;
200 
201 	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
202 		(seg_off << sbi->log_blocks_per_seg << 1) +
203 		(block_off & (sbi->blocks_per_seg - 1)));
204 
205 	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
206 		block_addr += sbi->blocks_per_seg;
207 
208 	return block_addr;
209 }
210 
211 static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
212 						pgoff_t block_addr)
213 {
214 	struct f2fs_nm_info *nm_i = NM_I(sbi);
215 
216 	block_addr -= nm_i->nat_blkaddr;
217 	if ((block_addr >> sbi->log_blocks_per_seg) % 2)
218 		block_addr -= sbi->blocks_per_seg;
219 	else
220 		block_addr += sbi->blocks_per_seg;
221 
222 	return block_addr + nm_i->nat_blkaddr;
223 }
224 
225 static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
226 {
227 	unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
228 
229 	f2fs_change_bit(block_off, nm_i->nat_bitmap);
230 }
231 
232 static inline nid_t ino_of_node(struct page *node_page)
233 {
234 	struct f2fs_node *rn = F2FS_NODE(node_page);
235 	return le32_to_cpu(rn->footer.ino);
236 }
237 
238 static inline nid_t nid_of_node(struct page *node_page)
239 {
240 	struct f2fs_node *rn = F2FS_NODE(node_page);
241 	return le32_to_cpu(rn->footer.nid);
242 }
243 
244 static inline unsigned int ofs_of_node(struct page *node_page)
245 {
246 	struct f2fs_node *rn = F2FS_NODE(node_page);
247 	unsigned flag = le32_to_cpu(rn->footer.flag);
248 	return flag >> OFFSET_BIT_SHIFT;
249 }
250 
251 static inline __u64 cpver_of_node(struct page *node_page)
252 {
253 	struct f2fs_node *rn = F2FS_NODE(node_page);
254 	return le64_to_cpu(rn->footer.cp_ver);
255 }
256 
257 static inline block_t next_blkaddr_of_node(struct page *node_page)
258 {
259 	struct f2fs_node *rn = F2FS_NODE(node_page);
260 	return le32_to_cpu(rn->footer.next_blkaddr);
261 }
262 
263 static inline void fill_node_footer(struct page *page, nid_t nid,
264 				nid_t ino, unsigned int ofs, bool reset)
265 {
266 	struct f2fs_node *rn = F2FS_NODE(page);
267 	unsigned int old_flag = 0;
268 
269 	if (reset)
270 		memset(rn, 0, sizeof(*rn));
271 	else
272 		old_flag = le32_to_cpu(rn->footer.flag);
273 
274 	rn->footer.nid = cpu_to_le32(nid);
275 	rn->footer.ino = cpu_to_le32(ino);
276 
277 	/* should remain old flag bits such as COLD_BIT_SHIFT */
278 	rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
279 					(old_flag & OFFSET_BIT_MASK));
280 }
281 
282 static inline void copy_node_footer(struct page *dst, struct page *src)
283 {
284 	struct f2fs_node *src_rn = F2FS_NODE(src);
285 	struct f2fs_node *dst_rn = F2FS_NODE(dst);
286 	memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
287 }
288 
289 static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
290 {
291 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
292 	struct f2fs_node *rn = F2FS_NODE(page);
293 	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
294 	__u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
295 
296 	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
297 		__u64 crc = le32_to_cpu(*((__le32 *)
298 				((unsigned char *)ckpt + crc_offset)));
299 		cp_ver |= (crc << 32);
300 	}
301 	rn->footer.cp_ver = cpu_to_le64(cp_ver);
302 	rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
303 }
304 
305 static inline bool is_recoverable_dnode(struct page *page)
306 {
307 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
308 	size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
309 	__u64 cp_ver = cur_cp_version(ckpt);
310 
311 	if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
312 		__u64 crc = le32_to_cpu(*((__le32 *)
313 				((unsigned char *)ckpt + crc_offset)));
314 		cp_ver |= (crc << 32);
315 	}
316 	return cpu_to_le64(cp_ver) == cpver_of_node(page);
317 }
318 
319 /*
320  * f2fs assigns the following node offsets described as (num).
321  * N = NIDS_PER_BLOCK
322  *
323  *  Inode block (0)
324  *    |- direct node (1)
325  *    |- direct node (2)
326  *    |- indirect node (3)
327  *    |            `- direct node (4 => 4 + N - 1)
328  *    |- indirect node (4 + N)
329  *    |            `- direct node (5 + N => 5 + 2N - 1)
330  *    `- double indirect node (5 + 2N)
331  *                 `- indirect node (6 + 2N)
332  *                       `- direct node
333  *                 ......
334  *                 `- indirect node ((6 + 2N) + x(N + 1))
335  *                       `- direct node
336  *                 ......
337  *                 `- indirect node ((6 + 2N) + (N - 1)(N + 1))
338  *                       `- direct node
339  */
340 static inline bool IS_DNODE(struct page *node_page)
341 {
342 	unsigned int ofs = ofs_of_node(node_page);
343 
344 	if (f2fs_has_xattr_block(ofs))
345 		return false;
346 
347 	if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
348 			ofs == 5 + 2 * NIDS_PER_BLOCK)
349 		return false;
350 	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
351 		ofs -= 6 + 2 * NIDS_PER_BLOCK;
352 		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
353 			return false;
354 	}
355 	return true;
356 }
357 
358 static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
359 {
360 	struct f2fs_node *rn = F2FS_NODE(p);
361 
362 	f2fs_wait_on_page_writeback(p, NODE, true);
363 
364 	if (i)
365 		rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
366 	else
367 		rn->in.nid[off] = cpu_to_le32(nid);
368 	return set_page_dirty(p);
369 }
370 
371 static inline nid_t get_nid(struct page *p, int off, bool i)
372 {
373 	struct f2fs_node *rn = F2FS_NODE(p);
374 
375 	if (i)
376 		return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
377 	return le32_to_cpu(rn->in.nid[off]);
378 }
379 
380 /*
381  * Coldness identification:
382  *  - Mark cold files in f2fs_inode_info
383  *  - Mark cold node blocks in their node footer
384  *  - Mark cold data pages in page cache
385  */
386 static inline int is_cold_data(struct page *page)
387 {
388 	return PageChecked(page);
389 }
390 
391 static inline void set_cold_data(struct page *page)
392 {
393 	SetPageChecked(page);
394 }
395 
396 static inline void clear_cold_data(struct page *page)
397 {
398 	ClearPageChecked(page);
399 }
400 
401 static inline int is_node(struct page *page, int type)
402 {
403 	struct f2fs_node *rn = F2FS_NODE(page);
404 	return le32_to_cpu(rn->footer.flag) & (1 << type);
405 }
406 
407 #define is_cold_node(page)	is_node(page, COLD_BIT_SHIFT)
408 #define is_fsync_dnode(page)	is_node(page, FSYNC_BIT_SHIFT)
409 #define is_dent_dnode(page)	is_node(page, DENT_BIT_SHIFT)
410 
411 static inline int is_inline_node(struct page *page)
412 {
413 	return PageChecked(page);
414 }
415 
416 static inline void set_inline_node(struct page *page)
417 {
418 	SetPageChecked(page);
419 }
420 
421 static inline void clear_inline_node(struct page *page)
422 {
423 	ClearPageChecked(page);
424 }
425 
426 static inline void set_cold_node(struct inode *inode, struct page *page)
427 {
428 	struct f2fs_node *rn = F2FS_NODE(page);
429 	unsigned int flag = le32_to_cpu(rn->footer.flag);
430 
431 	if (S_ISDIR(inode->i_mode))
432 		flag &= ~(0x1 << COLD_BIT_SHIFT);
433 	else
434 		flag |= (0x1 << COLD_BIT_SHIFT);
435 	rn->footer.flag = cpu_to_le32(flag);
436 }
437 
438 static inline void set_mark(struct page *page, int mark, int type)
439 {
440 	struct f2fs_node *rn = F2FS_NODE(page);
441 	unsigned int flag = le32_to_cpu(rn->footer.flag);
442 	if (mark)
443 		flag |= (0x1 << type);
444 	else
445 		flag &= ~(0x1 << type);
446 	rn->footer.flag = cpu_to_le32(flag);
447 }
448 #define set_dentry_mark(page, mark)	set_mark(page, mark, DENT_BIT_SHIFT)
449 #define set_fsync_mark(page, mark)	set_mark(page, mark, FSYNC_BIT_SHIFT)
450