xref: /linux/fs/minix/dir.c (revision 2775df6e5e324be9dc375f7db2c8d3042df72bbf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/minix/dir.c
4  *
5  *  Copyright (C) 1991, 1992 Linus Torvalds
6  *
7  *  minix directory handling functions
8  *
9  *  Updated to filesystem version 3 by Daniel Aragones
10  */
11 
12 #include "minix.h"
13 #include <linux/buffer_head.h>
14 #include <linux/highmem.h>
15 #include <linux/swap.h>
16 
17 typedef struct minix_dir_entry minix_dirent;
18 typedef struct minix3_dir_entry minix3_dirent;
19 
20 static int minix_readdir(struct file *, struct dir_context *);
21 
22 const struct file_operations minix_dir_operations = {
23 	.llseek		= generic_file_llseek,
24 	.read		= generic_read_dir,
25 	.iterate_shared	= minix_readdir,
26 	.fsync		= generic_file_fsync,
27 };
28 
29 /*
30  * Return the offset into page `page_nr' of the last valid
31  * byte in that page, plus one.
32  */
33 static unsigned
minix_last_byte(struct inode * inode,unsigned long page_nr)34 minix_last_byte(struct inode *inode, unsigned long page_nr)
35 {
36 	unsigned last_byte = PAGE_SIZE;
37 
38 	if (page_nr == (inode->i_size >> PAGE_SHIFT))
39 		last_byte = inode->i_size & (PAGE_SIZE - 1);
40 	return last_byte;
41 }
42 
dir_commit_chunk(struct folio * folio,loff_t pos,unsigned len)43 static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
44 {
45 	struct address_space *mapping = folio->mapping;
46 	struct inode *dir = mapping->host;
47 
48 	block_write_end(NULL, mapping, pos, len, len, folio, NULL);
49 
50 	if (pos+len > dir->i_size) {
51 		i_size_write(dir, pos+len);
52 		mark_inode_dirty(dir);
53 	}
54 	folio_unlock(folio);
55 }
56 
minix_handle_dirsync(struct inode * dir)57 static int minix_handle_dirsync(struct inode *dir)
58 {
59 	int err;
60 
61 	err = filemap_write_and_wait(dir->i_mapping);
62 	if (!err)
63 		err = sync_inode_metadata(dir, 1);
64 	return err;
65 }
66 
dir_get_folio(struct inode * dir,unsigned long n,struct folio ** foliop)67 static void *dir_get_folio(struct inode *dir, unsigned long n,
68 		struct folio **foliop)
69 {
70 	struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
71 
72 	if (IS_ERR(folio))
73 		return ERR_CAST(folio);
74 	*foliop = folio;
75 	return kmap_local_folio(folio, 0);
76 }
77 
minix_next_entry(void * de,struct minix_sb_info * sbi)78 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
79 {
80 	return (void*)((char*)de + sbi->s_dirsize);
81 }
82 
minix_readdir(struct file * file,struct dir_context * ctx)83 static int minix_readdir(struct file *file, struct dir_context *ctx)
84 {
85 	struct inode *inode = file_inode(file);
86 	struct super_block *sb = inode->i_sb;
87 	struct minix_sb_info *sbi = minix_sb(sb);
88 	unsigned chunk_size = sbi->s_dirsize;
89 	unsigned long npages = dir_pages(inode);
90 	unsigned long pos = ctx->pos;
91 	unsigned offset;
92 	unsigned long n;
93 
94 	ctx->pos = pos = ALIGN(pos, chunk_size);
95 	if (pos >= inode->i_size)
96 		return 0;
97 
98 	offset = pos & ~PAGE_MASK;
99 	n = pos >> PAGE_SHIFT;
100 
101 	for ( ; n < npages; n++, offset = 0) {
102 		char *p, *kaddr, *limit;
103 		struct folio *folio;
104 
105 		kaddr = dir_get_folio(inode, n, &folio);
106 		if (IS_ERR(kaddr))
107 			continue;
108 		p = kaddr+offset;
109 		limit = kaddr + minix_last_byte(inode, n) - chunk_size;
110 		for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
111 			const char *name;
112 			__u32 inumber;
113 			if (sbi->s_version == MINIX_V3) {
114 				minix3_dirent *de3 = (minix3_dirent *)p;
115 				name = de3->name;
116 				inumber = de3->inode;
117 	 		} else {
118 				minix_dirent *de = (minix_dirent *)p;
119 				name = de->name;
120 				inumber = de->inode;
121 			}
122 			if (inumber) {
123 				unsigned l = strnlen(name, sbi->s_namelen);
124 				if (!dir_emit(ctx, name, l,
125 					      inumber, DT_UNKNOWN)) {
126 					folio_release_kmap(folio, p);
127 					return 0;
128 				}
129 			}
130 			ctx->pos += chunk_size;
131 		}
132 		folio_release_kmap(folio, kaddr);
133 	}
134 	return 0;
135 }
136 
namecompare(int len,int maxlen,const char * name,const char * buffer)137 static inline int namecompare(int len, int maxlen,
138 	const char * name, const char * buffer)
139 {
140 	if (len < maxlen && buffer[len])
141 		return 0;
142 	return !memcmp(name, buffer, len);
143 }
144 
145 /*
146  *	minix_find_entry()
147  *
148  * finds an entry in the specified directory with the wanted name.
149  * It does NOT read the inode of the
150  * entry - you'll have to do that yourself if you want to.
151  *
152  * On Success folio_release_kmap() should be called on *foliop.
153  */
minix_find_entry(struct dentry * dentry,struct folio ** foliop)154 minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop)
155 {
156 	const char * name = dentry->d_name.name;
157 	int namelen = dentry->d_name.len;
158 	struct inode * dir = d_inode(dentry->d_parent);
159 	struct super_block * sb = dir->i_sb;
160 	struct minix_sb_info * sbi = minix_sb(sb);
161 	unsigned long n;
162 	unsigned long npages = dir_pages(dir);
163 	char *p;
164 
165 	char *namx;
166 	__u32 inumber;
167 
168 	for (n = 0; n < npages; n++) {
169 		char *kaddr, *limit;
170 
171 		kaddr = dir_get_folio(dir, n, foliop);
172 		if (IS_ERR(kaddr))
173 			continue;
174 
175 		limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
176 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
177 			if (sbi->s_version == MINIX_V3) {
178 				minix3_dirent *de3 = (minix3_dirent *)p;
179 				namx = de3->name;
180 				inumber = de3->inode;
181  			} else {
182 				minix_dirent *de = (minix_dirent *)p;
183 				namx = de->name;
184 				inumber = de->inode;
185 			}
186 			if (!inumber)
187 				continue;
188 			if (namecompare(namelen, sbi->s_namelen, name, namx))
189 				goto found;
190 		}
191 		folio_release_kmap(*foliop, kaddr);
192 	}
193 	return NULL;
194 
195 found:
196 	return (minix_dirent *)p;
197 }
198 
minix_add_link(struct dentry * dentry,struct inode * inode)199 int minix_add_link(struct dentry *dentry, struct inode *inode)
200 {
201 	struct inode *dir = d_inode(dentry->d_parent);
202 	const char * name = dentry->d_name.name;
203 	int namelen = dentry->d_name.len;
204 	struct super_block * sb = dir->i_sb;
205 	struct minix_sb_info * sbi = minix_sb(sb);
206 	struct folio *folio = NULL;
207 	unsigned long npages = dir_pages(dir);
208 	unsigned long n;
209 	char *kaddr, *p;
210 	minix_dirent *de;
211 	minix3_dirent *de3;
212 	loff_t pos;
213 	int err;
214 	char *namx = NULL;
215 	__u32 inumber;
216 
217 	/*
218 	 * We take care of directory expansion in the same loop
219 	 * This code plays outside i_size, so it locks the page
220 	 * to protect that region.
221 	 */
222 	for (n = 0; n <= npages; n++) {
223 		char *limit, *dir_end;
224 
225 		kaddr = dir_get_folio(dir, n, &folio);
226 		if (IS_ERR(kaddr))
227 			return PTR_ERR(kaddr);
228 		folio_lock(folio);
229 		dir_end = kaddr + minix_last_byte(dir, n);
230 		limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
231 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
232 			de = (minix_dirent *)p;
233 			de3 = (minix3_dirent *)p;
234 			if (sbi->s_version == MINIX_V3) {
235 				namx = de3->name;
236 				inumber = de3->inode;
237 		 	} else {
238   				namx = de->name;
239 				inumber = de->inode;
240 			}
241 			if (p == dir_end) {
242 				/* We hit i_size */
243 				if (sbi->s_version == MINIX_V3)
244 					de3->inode = 0;
245 		 		else
246 					de->inode = 0;
247 				goto got_it;
248 			}
249 			if (!inumber)
250 				goto got_it;
251 			err = -EEXIST;
252 			if (namecompare(namelen, sbi->s_namelen, name, namx))
253 				goto out_unlock;
254 		}
255 		folio_unlock(folio);
256 		folio_release_kmap(folio, kaddr);
257 	}
258 	BUG();
259 	return -EINVAL;
260 
261 got_it:
262 	pos = folio_pos(folio) + offset_in_folio(folio, p);
263 	err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
264 	if (err)
265 		goto out_unlock;
266 	memcpy (namx, name, namelen);
267 	if (sbi->s_version == MINIX_V3) {
268 		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
269 		de3->inode = inode->i_ino;
270 	} else {
271 		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
272 		de->inode = inode->i_ino;
273 	}
274 	dir_commit_chunk(folio, pos, sbi->s_dirsize);
275 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
276 	mark_inode_dirty(dir);
277 	err = minix_handle_dirsync(dir);
278 out_put:
279 	folio_release_kmap(folio, kaddr);
280 	return err;
281 out_unlock:
282 	folio_unlock(folio);
283 	goto out_put;
284 }
285 
minix_delete_entry(struct minix_dir_entry * de,struct folio * folio)286 int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio)
287 {
288 	struct inode *inode = folio->mapping->host;
289 	loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
290 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
291 	unsigned len = sbi->s_dirsize;
292 	int err;
293 
294 	folio_lock(folio);
295 	err = minix_prepare_chunk(folio, pos, len);
296 	if (err) {
297 		folio_unlock(folio);
298 		return err;
299 	}
300 	if (sbi->s_version == MINIX_V3)
301 		((minix3_dirent *)de)->inode = 0;
302 	else
303 		de->inode = 0;
304 	dir_commit_chunk(folio, pos, len);
305 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
306 	mark_inode_dirty(inode);
307 	return minix_handle_dirsync(inode);
308 }
309 
minix_make_empty(struct inode * inode,struct inode * dir)310 int minix_make_empty(struct inode *inode, struct inode *dir)
311 {
312 	struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
313 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
314 	char *kaddr;
315 	int err;
316 
317 	if (IS_ERR(folio))
318 		return PTR_ERR(folio);
319 	err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize);
320 	if (err) {
321 		folio_unlock(folio);
322 		goto fail;
323 	}
324 
325 	kaddr = kmap_local_folio(folio, 0);
326 	memset(kaddr, 0, folio_size(folio));
327 
328 	if (sbi->s_version == MINIX_V3) {
329 		minix3_dirent *de3 = (minix3_dirent *)kaddr;
330 
331 		de3->inode = inode->i_ino;
332 		strcpy(de3->name, ".");
333 		de3 = minix_next_entry(de3, sbi);
334 		de3->inode = dir->i_ino;
335 		strcpy(de3->name, "..");
336 	} else {
337 		minix_dirent *de = (minix_dirent *)kaddr;
338 
339 		de->inode = inode->i_ino;
340 		strcpy(de->name, ".");
341 		de = minix_next_entry(de, sbi);
342 		de->inode = dir->i_ino;
343 		strcpy(de->name, "..");
344 	}
345 	kunmap_local(kaddr);
346 
347 	dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize);
348 	err = minix_handle_dirsync(inode);
349 fail:
350 	folio_put(folio);
351 	return err;
352 }
353 
354 /*
355  * routine to check that the specified directory is empty (for rmdir)
356  */
minix_empty_dir(struct inode * inode)357 int minix_empty_dir(struct inode * inode)
358 {
359 	struct folio *folio = NULL;
360 	unsigned long i, npages = dir_pages(inode);
361 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
362 	char *name, *kaddr;
363 	__u32 inumber;
364 
365 	for (i = 0; i < npages; i++) {
366 		char *p, *limit;
367 
368 		kaddr = dir_get_folio(inode, i, &folio);
369 		if (IS_ERR(kaddr))
370 			continue;
371 
372 		limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
373 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
374 			if (sbi->s_version == MINIX_V3) {
375 				minix3_dirent *de3 = (minix3_dirent *)p;
376 				name = de3->name;
377 				inumber = de3->inode;
378 			} else {
379 				minix_dirent *de = (minix_dirent *)p;
380 				name = de->name;
381 				inumber = de->inode;
382 			}
383 
384 			if (inumber != 0) {
385 				/* check for . and .. */
386 				if (name[0] != '.')
387 					goto not_empty;
388 				if (!name[1]) {
389 					if (inumber != inode->i_ino)
390 						goto not_empty;
391 				} else if (name[1] != '.')
392 					goto not_empty;
393 				else if (name[2])
394 					goto not_empty;
395 			}
396 		}
397 		folio_release_kmap(folio, kaddr);
398 	}
399 	return 1;
400 
401 not_empty:
402 	folio_release_kmap(folio, kaddr);
403 	return 0;
404 }
405 
406 /* Releases the page */
minix_set_link(struct minix_dir_entry * de,struct folio * folio,struct inode * inode)407 int minix_set_link(struct minix_dir_entry *de, struct folio *folio,
408 		struct inode *inode)
409 {
410 	struct inode *dir = folio->mapping->host;
411 	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
412 	loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
413 	int err;
414 
415 	folio_lock(folio);
416 	err = minix_prepare_chunk(folio, pos, sbi->s_dirsize);
417 	if (err) {
418 		folio_unlock(folio);
419 		return err;
420 	}
421 	if (sbi->s_version == MINIX_V3)
422 		((minix3_dirent *)de)->inode = inode->i_ino;
423 	else
424 		de->inode = inode->i_ino;
425 	dir_commit_chunk(folio, pos, sbi->s_dirsize);
426 	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
427 	mark_inode_dirty(dir);
428 	return minix_handle_dirsync(dir);
429 }
430 
minix_dotdot(struct inode * dir,struct folio ** foliop)431 struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop)
432 {
433 	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
434 	struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop);
435 
436 	if (!IS_ERR(de))
437 		return minix_next_entry(de, sbi);
438 	return NULL;
439 }
440 
minix_inode_by_name(struct dentry * dentry)441 ino_t minix_inode_by_name(struct dentry *dentry)
442 {
443 	struct folio *folio;
444 	struct minix_dir_entry *de = minix_find_entry(dentry, &folio);
445 	ino_t res = 0;
446 
447 	if (de) {
448 		struct inode *inode = folio->mapping->host;
449 		struct minix_sb_info *sbi = minix_sb(inode->i_sb);
450 
451 		if (sbi->s_version == MINIX_V3)
452 			res = ((minix3_dirent *) de)->inode;
453 		else
454 			res = de->inode;
455 		folio_release_kmap(folio, de);
456 	}
457 	return res;
458 }
459