xref: /linux/fs/hfsplus/inode.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  *  linux/fs/hfsplus/inode.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Inode handling routines
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/mpage.h>
15 #include <linux/sched.h>
16 
17 #include "hfsplus_fs.h"
18 #include "hfsplus_raw.h"
19 
20 static int hfsplus_readpage(struct file *file, struct page *page)
21 {
22 	return block_read_full_page(page, hfsplus_get_block);
23 }
24 
25 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
26 {
27 	return block_write_full_page(page, hfsplus_get_block, wbc);
28 }
29 
30 static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
31 			loff_t pos, unsigned len, unsigned flags,
32 			struct page **pagep, void **fsdata)
33 {
34 	*pagep = NULL;
35 	return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
36 				hfsplus_get_block,
37 				&HFSPLUS_I(mapping->host).phys_size);
38 }
39 
40 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
41 {
42 	return generic_block_bmap(mapping, block, hfsplus_get_block);
43 }
44 
45 static int hfsplus_releasepage(struct page *page, gfp_t mask)
46 {
47 	struct inode *inode = page->mapping->host;
48 	struct super_block *sb = inode->i_sb;
49 	struct hfs_btree *tree;
50 	struct hfs_bnode *node;
51 	u32 nidx;
52 	int i, res = 1;
53 
54 	switch (inode->i_ino) {
55 	case HFSPLUS_EXT_CNID:
56 		tree = HFSPLUS_SB(sb).ext_tree;
57 		break;
58 	case HFSPLUS_CAT_CNID:
59 		tree = HFSPLUS_SB(sb).cat_tree;
60 		break;
61 	case HFSPLUS_ATTR_CNID:
62 		tree = HFSPLUS_SB(sb).attr_tree;
63 		break;
64 	default:
65 		BUG();
66 		return 0;
67 	}
68 	if (!tree)
69 		return 0;
70 	if (tree->node_size >= PAGE_CACHE_SIZE) {
71 		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
72 		spin_lock(&tree->hash_lock);
73 		node = hfs_bnode_findhash(tree, nidx);
74 		if (!node)
75 			;
76 		else if (atomic_read(&node->refcnt))
77 			res = 0;
78 		if (res && node) {
79 			hfs_bnode_unhash(node);
80 			hfs_bnode_free(node);
81 		}
82 		spin_unlock(&tree->hash_lock);
83 	} else {
84 		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
85 		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
86 		spin_lock(&tree->hash_lock);
87 		do {
88 			node = hfs_bnode_findhash(tree, nidx++);
89 			if (!node)
90 				continue;
91 			if (atomic_read(&node->refcnt)) {
92 				res = 0;
93 				break;
94 			}
95 			hfs_bnode_unhash(node);
96 			hfs_bnode_free(node);
97 		} while (--i && nidx < tree->node_count);
98 		spin_unlock(&tree->hash_lock);
99 	}
100 	return res ? try_to_free_buffers(page) : 0;
101 }
102 
103 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
104 		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
105 {
106 	struct file *file = iocb->ki_filp;
107 	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
108 
109 	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
110 				  offset, nr_segs, hfsplus_get_block, NULL);
111 }
112 
113 static int hfsplus_writepages(struct address_space *mapping,
114 			      struct writeback_control *wbc)
115 {
116 	return mpage_writepages(mapping, wbc, hfsplus_get_block);
117 }
118 
119 const struct address_space_operations hfsplus_btree_aops = {
120 	.readpage	= hfsplus_readpage,
121 	.writepage	= hfsplus_writepage,
122 	.sync_page	= block_sync_page,
123 	.write_begin	= hfsplus_write_begin,
124 	.write_end	= generic_write_end,
125 	.bmap		= hfsplus_bmap,
126 	.releasepage	= hfsplus_releasepage,
127 };
128 
129 const struct address_space_operations hfsplus_aops = {
130 	.readpage	= hfsplus_readpage,
131 	.writepage	= hfsplus_writepage,
132 	.sync_page	= block_sync_page,
133 	.write_begin	= hfsplus_write_begin,
134 	.write_end	= generic_write_end,
135 	.bmap		= hfsplus_bmap,
136 	.direct_IO	= hfsplus_direct_IO,
137 	.writepages	= hfsplus_writepages,
138 };
139 
140 const struct dentry_operations hfsplus_dentry_operations = {
141 	.d_hash       = hfsplus_hash_dentry,
142 	.d_compare    = hfsplus_compare_dentry,
143 };
144 
145 static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
146 					  struct nameidata *nd)
147 {
148 	struct hfs_find_data fd;
149 	struct super_block *sb = dir->i_sb;
150 	struct inode *inode = NULL;
151 	int err;
152 
153 	if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
154 		goto out;
155 
156 	inode = HFSPLUS_I(dir).rsrc_inode;
157 	if (inode)
158 		goto out;
159 
160 	inode = new_inode(sb);
161 	if (!inode)
162 		return ERR_PTR(-ENOMEM);
163 
164 	inode->i_ino = dir->i_ino;
165 	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
166 	mutex_init(&HFSPLUS_I(inode).extents_lock);
167 	HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
168 
169 	hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
170 	err = hfsplus_find_cat(sb, dir->i_ino, &fd);
171 	if (!err)
172 		err = hfsplus_cat_read_inode(inode, &fd);
173 	hfs_find_exit(&fd);
174 	if (err) {
175 		iput(inode);
176 		return ERR_PTR(err);
177 	}
178 	HFSPLUS_I(inode).rsrc_inode = dir;
179 	HFSPLUS_I(dir).rsrc_inode = inode;
180 	igrab(dir);
181 	hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
182 	mark_inode_dirty(inode);
183 out:
184 	d_add(dentry, inode);
185 	return NULL;
186 }
187 
188 static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
189 {
190 	struct super_block *sb = inode->i_sb;
191 	u16 mode;
192 
193 	mode = be16_to_cpu(perms->mode);
194 
195 	inode->i_uid = be32_to_cpu(perms->owner);
196 	if (!inode->i_uid && !mode)
197 		inode->i_uid = HFSPLUS_SB(sb).uid;
198 
199 	inode->i_gid = be32_to_cpu(perms->group);
200 	if (!inode->i_gid && !mode)
201 		inode->i_gid = HFSPLUS_SB(sb).gid;
202 
203 	if (dir) {
204 		mode = mode ? (mode & S_IALLUGO) :
205 			(S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
206 		mode |= S_IFDIR;
207 	} else if (!mode)
208 		mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
209 			~(HFSPLUS_SB(sb).umask));
210 	inode->i_mode = mode;
211 
212 	HFSPLUS_I(inode).rootflags = perms->rootflags;
213 	HFSPLUS_I(inode).userflags = perms->userflags;
214 	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
215 		inode->i_flags |= S_IMMUTABLE;
216 	else
217 		inode->i_flags &= ~S_IMMUTABLE;
218 	if (perms->rootflags & HFSPLUS_FLG_APPEND)
219 		inode->i_flags |= S_APPEND;
220 	else
221 		inode->i_flags &= ~S_APPEND;
222 }
223 
224 static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
225 {
226 	if (inode->i_flags & S_IMMUTABLE)
227 		perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
228 	else
229 		perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
230 	if (inode->i_flags & S_APPEND)
231 		perms->rootflags |= HFSPLUS_FLG_APPEND;
232 	else
233 		perms->rootflags &= ~HFSPLUS_FLG_APPEND;
234 	perms->userflags = HFSPLUS_I(inode).userflags;
235 	perms->mode = cpu_to_be16(inode->i_mode);
236 	perms->owner = cpu_to_be32(inode->i_uid);
237 	perms->group = cpu_to_be32(inode->i_gid);
238 	perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
239 }
240 
241 static int hfsplus_file_open(struct inode *inode, struct file *file)
242 {
243 	if (HFSPLUS_IS_RSRC(inode))
244 		inode = HFSPLUS_I(inode).rsrc_inode;
245 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
246 		return -EOVERFLOW;
247 	atomic_inc(&HFSPLUS_I(inode).opencnt);
248 	return 0;
249 }
250 
251 static int hfsplus_file_release(struct inode *inode, struct file *file)
252 {
253 	struct super_block *sb = inode->i_sb;
254 
255 	if (HFSPLUS_IS_RSRC(inode))
256 		inode = HFSPLUS_I(inode).rsrc_inode;
257 	if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
258 		mutex_lock(&inode->i_mutex);
259 		hfsplus_file_truncate(inode);
260 		if (inode->i_flags & S_DEAD) {
261 			hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
262 			hfsplus_delete_inode(inode);
263 		}
264 		mutex_unlock(&inode->i_mutex);
265 	}
266 	return 0;
267 }
268 
269 static const struct inode_operations hfsplus_file_inode_operations = {
270 	.lookup		= hfsplus_file_lookup,
271 	.truncate	= hfsplus_file_truncate,
272 	.setxattr	= hfsplus_setxattr,
273 	.getxattr	= hfsplus_getxattr,
274 	.listxattr	= hfsplus_listxattr,
275 };
276 
277 static const struct file_operations hfsplus_file_operations = {
278 	.llseek 	= generic_file_llseek,
279 	.read		= do_sync_read,
280 	.aio_read	= generic_file_aio_read,
281 	.write		= do_sync_write,
282 	.aio_write	= generic_file_aio_write,
283 	.mmap		= generic_file_mmap,
284 	.splice_read	= generic_file_splice_read,
285 	.fsync		= file_fsync,
286 	.open		= hfsplus_file_open,
287 	.release	= hfsplus_file_release,
288 	.ioctl          = hfsplus_ioctl,
289 };
290 
291 struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
292 {
293 	struct inode *inode = new_inode(sb);
294 	if (!inode)
295 		return NULL;
296 
297 	inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
298 	inode->i_mode = mode;
299 	inode->i_uid = current_fsuid();
300 	inode->i_gid = current_fsgid();
301 	inode->i_nlink = 1;
302 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
303 	INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
304 	mutex_init(&HFSPLUS_I(inode).extents_lock);
305 	atomic_set(&HFSPLUS_I(inode).opencnt, 0);
306 	HFSPLUS_I(inode).flags = 0;
307 	memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
308 	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
309 	HFSPLUS_I(inode).alloc_blocks = 0;
310 	HFSPLUS_I(inode).first_blocks = 0;
311 	HFSPLUS_I(inode).cached_start = 0;
312 	HFSPLUS_I(inode).cached_blocks = 0;
313 	HFSPLUS_I(inode).phys_size = 0;
314 	HFSPLUS_I(inode).fs_blocks = 0;
315 	HFSPLUS_I(inode).rsrc_inode = NULL;
316 	if (S_ISDIR(inode->i_mode)) {
317 		inode->i_size = 2;
318 		HFSPLUS_SB(sb).folder_count++;
319 		inode->i_op = &hfsplus_dir_inode_operations;
320 		inode->i_fop = &hfsplus_dir_operations;
321 	} else if (S_ISREG(inode->i_mode)) {
322 		HFSPLUS_SB(sb).file_count++;
323 		inode->i_op = &hfsplus_file_inode_operations;
324 		inode->i_fop = &hfsplus_file_operations;
325 		inode->i_mapping->a_ops = &hfsplus_aops;
326 		HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
327 	} else if (S_ISLNK(inode->i_mode)) {
328 		HFSPLUS_SB(sb).file_count++;
329 		inode->i_op = &page_symlink_inode_operations;
330 		inode->i_mapping->a_ops = &hfsplus_aops;
331 		HFSPLUS_I(inode).clump_blocks = 1;
332 	} else
333 		HFSPLUS_SB(sb).file_count++;
334 	insert_inode_hash(inode);
335 	mark_inode_dirty(inode);
336 	sb->s_dirt = 1;
337 
338 	return inode;
339 }
340 
341 void hfsplus_delete_inode(struct inode *inode)
342 {
343 	struct super_block *sb = inode->i_sb;
344 
345 	if (S_ISDIR(inode->i_mode)) {
346 		HFSPLUS_SB(sb).folder_count--;
347 		sb->s_dirt = 1;
348 		return;
349 	}
350 	HFSPLUS_SB(sb).file_count--;
351 	if (S_ISREG(inode->i_mode)) {
352 		if (!inode->i_nlink) {
353 			inode->i_size = 0;
354 			hfsplus_file_truncate(inode);
355 		}
356 	} else if (S_ISLNK(inode->i_mode)) {
357 		inode->i_size = 0;
358 		hfsplus_file_truncate(inode);
359 	}
360 	sb->s_dirt = 1;
361 }
362 
363 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
364 {
365 	struct super_block *sb = inode->i_sb;
366 	u32 count;
367 	int i;
368 
369 	memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
370 	       sizeof(hfsplus_extent_rec));
371 	for (count = 0, i = 0; i < 8; i++)
372 		count += be32_to_cpu(fork->extents[i].block_count);
373 	HFSPLUS_I(inode).first_blocks = count;
374 	memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
375 	HFSPLUS_I(inode).cached_start = 0;
376 	HFSPLUS_I(inode).cached_blocks = 0;
377 
378 	HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
379 	inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
380 	HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
381 	inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
382 	HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
383 	if (!HFSPLUS_I(inode).clump_blocks)
384 		HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
385 				HFSPLUS_SB(sb).data_clump_blocks;
386 }
387 
388 void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
389 {
390 	memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
391 	       sizeof(hfsplus_extent_rec));
392 	fork->total_size = cpu_to_be64(inode->i_size);
393 	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
394 }
395 
396 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
397 {
398 	hfsplus_cat_entry entry;
399 	int res = 0;
400 	u16 type;
401 
402 	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
403 
404 	HFSPLUS_I(inode).dev = 0;
405 	if (type == HFSPLUS_FOLDER) {
406 		struct hfsplus_cat_folder *folder = &entry.folder;
407 
408 		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
409 			/* panic? */;
410 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
411 					sizeof(struct hfsplus_cat_folder));
412 		hfsplus_get_perms(inode, &folder->permissions, 1);
413 		inode->i_nlink = 1;
414 		inode->i_size = 2 + be32_to_cpu(folder->valence);
415 		inode->i_atime = hfsp_mt2ut(folder->access_date);
416 		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
417 		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
418 		HFSPLUS_I(inode).create_date = folder->create_date;
419 		HFSPLUS_I(inode).fs_blocks = 0;
420 		inode->i_op = &hfsplus_dir_inode_operations;
421 		inode->i_fop = &hfsplus_dir_operations;
422 	} else if (type == HFSPLUS_FILE) {
423 		struct hfsplus_cat_file *file = &entry.file;
424 
425 		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
426 			/* panic? */;
427 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
428 					sizeof(struct hfsplus_cat_file));
429 
430 		hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
431 					&file->data_fork : &file->rsrc_fork);
432 		hfsplus_get_perms(inode, &file->permissions, 0);
433 		inode->i_nlink = 1;
434 		if (S_ISREG(inode->i_mode)) {
435 			if (file->permissions.dev)
436 				inode->i_nlink = be32_to_cpu(file->permissions.dev);
437 			inode->i_op = &hfsplus_file_inode_operations;
438 			inode->i_fop = &hfsplus_file_operations;
439 			inode->i_mapping->a_ops = &hfsplus_aops;
440 		} else if (S_ISLNK(inode->i_mode)) {
441 			inode->i_op = &page_symlink_inode_operations;
442 			inode->i_mapping->a_ops = &hfsplus_aops;
443 		} else {
444 			init_special_inode(inode, inode->i_mode,
445 					   be32_to_cpu(file->permissions.dev));
446 		}
447 		inode->i_atime = hfsp_mt2ut(file->access_date);
448 		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
449 		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
450 		HFSPLUS_I(inode).create_date = file->create_date;
451 	} else {
452 		printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
453 		res = -EIO;
454 	}
455 	return res;
456 }
457 
458 int hfsplus_cat_write_inode(struct inode *inode)
459 {
460 	struct inode *main_inode = inode;
461 	struct hfs_find_data fd;
462 	hfsplus_cat_entry entry;
463 
464 	if (HFSPLUS_IS_RSRC(inode))
465 		main_inode = HFSPLUS_I(inode).rsrc_inode;
466 
467 	if (!main_inode->i_nlink)
468 		return 0;
469 
470 	if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
471 		/* panic? */
472 		return -EIO;
473 
474 	if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
475 		/* panic? */
476 		goto out;
477 
478 	if (S_ISDIR(main_inode->i_mode)) {
479 		struct hfsplus_cat_folder *folder = &entry.folder;
480 
481 		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
482 			/* panic? */;
483 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
484 					sizeof(struct hfsplus_cat_folder));
485 		/* simple node checks? */
486 		hfsplus_set_perms(inode, &folder->permissions);
487 		folder->access_date = hfsp_ut2mt(inode->i_atime);
488 		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
489 		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
490 		folder->valence = cpu_to_be32(inode->i_size - 2);
491 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
492 					 sizeof(struct hfsplus_cat_folder));
493 	} else if (HFSPLUS_IS_RSRC(inode)) {
494 		struct hfsplus_cat_file *file = &entry.file;
495 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
496 			       sizeof(struct hfsplus_cat_file));
497 		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
498 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
499 				sizeof(struct hfsplus_cat_file));
500 	} else {
501 		struct hfsplus_cat_file *file = &entry.file;
502 
503 		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
504 			/* panic? */;
505 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
506 					sizeof(struct hfsplus_cat_file));
507 		hfsplus_inode_write_fork(inode, &file->data_fork);
508 		if (S_ISREG(inode->i_mode))
509 			HFSPLUS_I(inode).dev = inode->i_nlink;
510 		if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
511 			HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
512 		hfsplus_set_perms(inode, &file->permissions);
513 		if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
514 			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
515 		else
516 			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
517 		file->access_date = hfsp_ut2mt(inode->i_atime);
518 		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
519 		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
520 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
521 					 sizeof(struct hfsplus_cat_file));
522 	}
523 out:
524 	hfs_find_exit(&fd);
525 	return 0;
526 }
527