xref: /linux/fs/ntfs3/fsntfs.c (revision a11b4fa602ed3b744aa075f34bee82c12aa3553a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
ntfs_extend_init(struct ntfs_sb_info * sbi)209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
ntfs_check_for_free_space(struct ntfs_sb_info * sbi,CLST clen,CLST mlen)452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
ntfs_extend_mft(struct ntfs_sb_info * sbi)498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno,bool is_mft)737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
857 	    unlikely(ntfs3_forced_shutdown(sb)))
858 		return;
859 
860 	blocksize = sb->s_blocksize;
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
ntfs_bad_inode(struct inode * inode,const char * hint)905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 	struct ntfs_inode *ni = ntfs_i(inode);
909 
910 	ntfs_inode_err(inode, "%s", hint);
911 
912 	/* Do not call make_bad_inode()! */
913 	ni->ni_bad = true;
914 
915 	/* Avoid recursion if bad inode is $Volume. */
916 	if (inode->i_ino != MFT_REC_VOL &&
917 	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
918 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
919 	}
920 }
921 
922 /*
923  * ntfs_set_state
924  *
925  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
926  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
927  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
928  */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)929 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
930 {
931 	int err;
932 	struct ATTRIB *attr;
933 	struct VOLUME_INFO *info;
934 	struct mft_inode *mi;
935 	struct ntfs_inode *ni;
936 	__le16 info_flags;
937 
938 	/*
939 	 * Do not change state if fs was real_dirty.
940 	 * Do not change state if fs already dirty(clear).
941 	 * Do not change any thing if mounted read only.
942 	 */
943 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
944 		return 0;
945 
946 	/* Check cached value. */
947 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
948 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
949 		return 0;
950 
951 	ni = sbi->volume.ni;
952 	if (!ni)
953 		return -EINVAL;
954 
955 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
956 
957 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
958 	if (!attr) {
959 		err = -EINVAL;
960 		goto out;
961 	}
962 
963 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
964 	if (!info) {
965 		err = -EINVAL;
966 		goto out;
967 	}
968 
969 	info_flags = info->flags;
970 
971 	switch (dirty) {
972 	case NTFS_DIRTY_ERROR:
973 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
974 		sbi->volume.real_dirty = true;
975 		fallthrough;
976 	case NTFS_DIRTY_DIRTY:
977 		info->flags |= VOLUME_FLAG_DIRTY;
978 		break;
979 	case NTFS_DIRTY_CLEAR:
980 		info->flags &= ~VOLUME_FLAG_DIRTY;
981 		break;
982 	}
983 	/* Cache current volume flags. */
984 	if (info_flags != info->flags) {
985 		sbi->volume.flags = info->flags;
986 		mi->dirty = true;
987 	}
988 	err = 0;
989 
990 out:
991 	ni_unlock(ni);
992 	if (err)
993 		return err;
994 
995 	mark_inode_dirty_sync(&ni->vfs_inode);
996 	/* verify(!ntfs_update_mftmirr()); */
997 
998 	/* write mft record on disk. */
999 	err = _ni_write_inode(&ni->vfs_inode, 1);
1000 
1001 	return err;
1002 }
1003 
1004 /*
1005  * security_hash - Calculates a hash of security descriptor.
1006  */
security_hash(const void * sd,size_t bytes)1007 static inline __le32 security_hash(const void *sd, size_t bytes)
1008 {
1009 	u32 hash = 0;
1010 	const __le32 *ptr = sd;
1011 
1012 	bytes >>= 2;
1013 	while (bytes--)
1014 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1015 	return cpu_to_le32(hash);
1016 }
1017 
1018 /*
1019  * simple wrapper for sb_bread_unmovable.
1020  */
ntfs_bread(struct super_block * sb,sector_t block)1021 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1022 {
1023 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1024 	struct buffer_head *bh;
1025 
1026 	if (unlikely(block >= sbi->volume.blocks)) {
1027 		/* prevent generic message "attempt to access beyond end of device" */
1028 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1029 			 (u64)block << sb->s_blocksize_bits);
1030 		return NULL;
1031 	}
1032 
1033 	bh = sb_bread_unmovable(sb, block);
1034 	if (bh)
1035 		return bh;
1036 
1037 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1038 		 (u64)block << sb->s_blocksize_bits);
1039 	return NULL;
1040 }
1041 
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1042 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1043 		  const void *buf, int wait)
1044 {
1045 	u32 blocksize = sb->s_blocksize;
1046 	struct block_device *bdev = sb->s_bdev;
1047 	sector_t block = lbo >> sb->s_blocksize_bits;
1048 	u32 off = lbo & (blocksize - 1);
1049 	u32 op = blocksize - off;
1050 	struct buffer_head *bh;
1051 
1052 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1053 		wait = 1;
1054 
1055 	for (; bytes; block += 1, off = 0, op = blocksize) {
1056 		if (op > bytes)
1057 			op = bytes;
1058 
1059 		if (op < blocksize) {
1060 			bh = __bread(bdev, block, blocksize);
1061 			if (!bh) {
1062 				ntfs_err(sb, "failed to read block %llx",
1063 					 (u64)block);
1064 				return -EIO;
1065 			}
1066 		} else {
1067 			bh = __getblk(bdev, block, blocksize);
1068 			if (!bh)
1069 				return -ENOMEM;
1070 		}
1071 
1072 		if (buffer_locked(bh))
1073 			__wait_on_buffer(bh);
1074 
1075 		lock_buffer(bh);
1076 		if (buf) {
1077 			memcpy(bh->b_data + off, buf, op);
1078 			buf = Add2Ptr(buf, op);
1079 		} else {
1080 			memset(bh->b_data + off, -1, op);
1081 		}
1082 
1083 		set_buffer_uptodate(bh);
1084 		mark_buffer_dirty(bh);
1085 		unlock_buffer(bh);
1086 
1087 		if (wait) {
1088 			int err = sync_dirty_buffer(bh);
1089 
1090 			if (err) {
1091 				ntfs_err(
1092 					sb,
1093 					"failed to sync buffer at block %llx, error %d",
1094 					(u64)block, err);
1095 				put_bh(bh);
1096 				return err;
1097 			}
1098 		}
1099 
1100 		put_bh(bh);
1101 
1102 		bytes -= op;
1103 	}
1104 	return 0;
1105 }
1106 
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1107 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1108 		      u64 vbo, const void *buf, size_t bytes, int sync)
1109 {
1110 	struct super_block *sb = sbi->sb;
1111 	u8 cluster_bits = sbi->cluster_bits;
1112 	u32 off = vbo & sbi->cluster_mask;
1113 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1114 	u64 lbo, len;
1115 	size_t idx;
1116 
1117 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1118 		return -ENOENT;
1119 
1120 	if (lcn == SPARSE_LCN)
1121 		return -EINVAL;
1122 
1123 	lbo = ((u64)lcn << cluster_bits) + off;
1124 	len = ((u64)clen << cluster_bits) - off;
1125 
1126 	for (;;) {
1127 		u32 op = min_t(u64, len, bytes);
1128 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1129 
1130 		if (err)
1131 			return err;
1132 
1133 		bytes -= op;
1134 		if (!bytes)
1135 			break;
1136 
1137 		vcn_next = vcn + clen;
1138 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1139 		    vcn != vcn_next)
1140 			return -ENOENT;
1141 
1142 		if (lcn == SPARSE_LCN)
1143 			return -EINVAL;
1144 
1145 		if (buf)
1146 			buf = Add2Ptr(buf, op);
1147 
1148 		lbo = ((u64)lcn << cluster_bits);
1149 		len = ((u64)clen << cluster_bits);
1150 	}
1151 
1152 	return 0;
1153 }
1154 
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1155 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1156 				   const struct runs_tree *run, u64 vbo)
1157 {
1158 	struct super_block *sb = sbi->sb;
1159 	u8 cluster_bits = sbi->cluster_bits;
1160 	CLST lcn;
1161 	u64 lbo;
1162 
1163 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1164 		return ERR_PTR(-ENOENT);
1165 
1166 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1167 
1168 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1169 }
1170 
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1171 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1172 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1173 {
1174 	int err;
1175 	struct super_block *sb = sbi->sb;
1176 	u32 blocksize = sb->s_blocksize;
1177 	u8 cluster_bits = sbi->cluster_bits;
1178 	u32 off = vbo & sbi->cluster_mask;
1179 	u32 nbh = 0;
1180 	CLST vcn_next, vcn = vbo >> cluster_bits;
1181 	CLST lcn, clen;
1182 	u64 lbo, len;
1183 	size_t idx;
1184 	struct buffer_head *bh;
1185 
1186 	if (!run) {
1187 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1188 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1189 			err = -ENOENT;
1190 			goto out;
1191 		}
1192 
1193 		/* Use absolute boot's 'MFTCluster' to read record. */
1194 		lbo = vbo + sbi->mft.lbo;
1195 		len = sbi->record_size;
1196 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1197 		err = -ENOENT;
1198 		goto out;
1199 	} else {
1200 		if (lcn == SPARSE_LCN) {
1201 			err = -EINVAL;
1202 			goto out;
1203 		}
1204 
1205 		lbo = ((u64)lcn << cluster_bits) + off;
1206 		len = ((u64)clen << cluster_bits) - off;
1207 	}
1208 
1209 	off = lbo & (blocksize - 1);
1210 	if (nb) {
1211 		nb->off = off;
1212 		nb->bytes = bytes;
1213 	}
1214 
1215 	for (;;) {
1216 		u32 len32 = len >= bytes ? bytes : len;
1217 		sector_t block = lbo >> sb->s_blocksize_bits;
1218 
1219 		do {
1220 			u32 op = blocksize - off;
1221 
1222 			if (op > len32)
1223 				op = len32;
1224 
1225 			bh = ntfs_bread(sb, block);
1226 			if (!bh) {
1227 				err = -EIO;
1228 				goto out;
1229 			}
1230 
1231 			if (buf) {
1232 				memcpy(buf, bh->b_data + off, op);
1233 				buf = Add2Ptr(buf, op);
1234 			}
1235 
1236 			if (!nb) {
1237 				put_bh(bh);
1238 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1239 				err = -EINVAL;
1240 				goto out;
1241 			} else {
1242 				nb->bh[nbh++] = bh;
1243 				nb->nbufs = nbh;
1244 			}
1245 
1246 			bytes -= op;
1247 			if (!bytes)
1248 				return 0;
1249 			len32 -= op;
1250 			block += 1;
1251 			off = 0;
1252 
1253 		} while (len32);
1254 
1255 		vcn_next = vcn + clen;
1256 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1257 		    vcn != vcn_next) {
1258 			err = -ENOENT;
1259 			goto out;
1260 		}
1261 
1262 		if (lcn == SPARSE_LCN) {
1263 			err = -EINVAL;
1264 			goto out;
1265 		}
1266 
1267 		lbo = ((u64)lcn << cluster_bits);
1268 		len = ((u64)clen << cluster_bits);
1269 	}
1270 
1271 out:
1272 	if (!nbh)
1273 		return err;
1274 
1275 	while (nbh) {
1276 		put_bh(nb->bh[--nbh]);
1277 		nb->bh[nbh] = NULL;
1278 	}
1279 
1280 	nb->nbufs = 0;
1281 	return err;
1282 }
1283 
1284 /*
1285  * ntfs_read_bh
1286  *
1287  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1288  */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1289 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1290 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1291 		 struct ntfs_buffers *nb)
1292 {
1293 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1294 
1295 	if (err)
1296 		return err;
1297 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1298 }
1299 
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1300 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1301 		u32 bytes, struct ntfs_buffers *nb)
1302 {
1303 	int err = 0;
1304 	struct super_block *sb = sbi->sb;
1305 	u32 blocksize = sb->s_blocksize;
1306 	u8 cluster_bits = sbi->cluster_bits;
1307 	CLST vcn_next, vcn = vbo >> cluster_bits;
1308 	u32 off;
1309 	u32 nbh = 0;
1310 	CLST lcn, clen;
1311 	u64 lbo, len;
1312 	size_t idx;
1313 
1314 	nb->bytes = bytes;
1315 
1316 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1317 		err = -ENOENT;
1318 		goto out;
1319 	}
1320 
1321 	off = vbo & sbi->cluster_mask;
1322 	lbo = ((u64)lcn << cluster_bits) + off;
1323 	len = ((u64)clen << cluster_bits) - off;
1324 
1325 	nb->off = off = lbo & (blocksize - 1);
1326 
1327 	for (;;) {
1328 		u32 len32 = min_t(u64, len, bytes);
1329 		sector_t block = lbo >> sb->s_blocksize_bits;
1330 
1331 		do {
1332 			u32 op;
1333 			struct buffer_head *bh;
1334 
1335 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1336 				err = -EINVAL;
1337 				goto out;
1338 			}
1339 
1340 			op = blocksize - off;
1341 			if (op > len32)
1342 				op = len32;
1343 
1344 			if (op == blocksize) {
1345 				bh = sb_getblk(sb, block);
1346 				if (!bh) {
1347 					err = -ENOMEM;
1348 					goto out;
1349 				}
1350 				if (buffer_locked(bh))
1351 					__wait_on_buffer(bh);
1352 				set_buffer_uptodate(bh);
1353 			} else {
1354 				bh = ntfs_bread(sb, block);
1355 				if (!bh) {
1356 					err = -EIO;
1357 					goto out;
1358 				}
1359 			}
1360 
1361 			nb->bh[nbh++] = bh;
1362 			bytes -= op;
1363 			if (!bytes) {
1364 				nb->nbufs = nbh;
1365 				return 0;
1366 			}
1367 
1368 			block += 1;
1369 			len32 -= op;
1370 			off = 0;
1371 		} while (len32);
1372 
1373 		vcn_next = vcn + clen;
1374 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1375 		    vcn != vcn_next) {
1376 			err = -ENOENT;
1377 			goto out;
1378 		}
1379 
1380 		lbo = ((u64)lcn << cluster_bits);
1381 		len = ((u64)clen << cluster_bits);
1382 	}
1383 
1384 out:
1385 	while (nbh) {
1386 		put_bh(nb->bh[--nbh]);
1387 		nb->bh[nbh] = NULL;
1388 	}
1389 
1390 	nb->nbufs = 0;
1391 
1392 	return err;
1393 }
1394 
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1395 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1396 		  struct ntfs_buffers *nb, int sync)
1397 {
1398 	int err = 0;
1399 	struct super_block *sb = sbi->sb;
1400 	u32 block_size = sb->s_blocksize;
1401 	u32 bytes = nb->bytes;
1402 	u32 off = nb->off;
1403 	u16 fo = le16_to_cpu(rhdr->fix_off);
1404 	u16 fn = le16_to_cpu(rhdr->fix_num);
1405 	u32 idx;
1406 	__le16 *fixup;
1407 	__le16 sample;
1408 
1409 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1410 	    fn * SECTOR_SIZE > bytes) {
1411 		return -EINVAL;
1412 	}
1413 
1414 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1415 		u32 op = block_size - off;
1416 		char *bh_data;
1417 		struct buffer_head *bh = nb->bh[idx];
1418 		__le16 *ptr, *end_data;
1419 
1420 		if (op > bytes)
1421 			op = bytes;
1422 
1423 		if (buffer_locked(bh))
1424 			__wait_on_buffer(bh);
1425 
1426 		lock_buffer(bh);
1427 
1428 		bh_data = bh->b_data + off;
1429 		end_data = Add2Ptr(bh_data, op);
1430 		memcpy(bh_data, rhdr, op);
1431 
1432 		if (!idx) {
1433 			u16 t16;
1434 
1435 			fixup = Add2Ptr(bh_data, fo);
1436 			sample = *fixup;
1437 			t16 = le16_to_cpu(sample);
1438 			if (t16 >= 0x7FFF) {
1439 				sample = *fixup = cpu_to_le16(1);
1440 			} else {
1441 				sample = cpu_to_le16(t16 + 1);
1442 				*fixup = sample;
1443 			}
1444 
1445 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1446 		}
1447 
1448 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1449 
1450 		do {
1451 			*++fixup = *ptr;
1452 			*ptr = sample;
1453 			ptr += SECTOR_SIZE / sizeof(short);
1454 		} while (ptr < end_data);
1455 
1456 		set_buffer_uptodate(bh);
1457 		mark_buffer_dirty(bh);
1458 		unlock_buffer(bh);
1459 
1460 		if (sync) {
1461 			int err2 = sync_dirty_buffer(bh);
1462 
1463 			if (!err && err2)
1464 				err = err2;
1465 		}
1466 
1467 		bytes -= op;
1468 		rhdr = Add2Ptr(rhdr, op);
1469 	}
1470 
1471 	return err;
1472 }
1473 
1474 /*
1475  * ntfs_bio_pages - Read/write pages from/to disk.
1476  */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,enum req_op op)1477 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1478 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1479 		   enum req_op op)
1480 {
1481 	int err = 0;
1482 	struct bio *new, *bio = NULL;
1483 	struct super_block *sb = sbi->sb;
1484 	struct block_device *bdev = sb->s_bdev;
1485 	struct page *page;
1486 	u8 cluster_bits = sbi->cluster_bits;
1487 	CLST lcn, clen, vcn, vcn_next;
1488 	u32 add, off, page_idx;
1489 	u64 lbo, len;
1490 	size_t run_idx;
1491 	struct blk_plug plug;
1492 
1493 	if (!bytes)
1494 		return 0;
1495 
1496 	blk_start_plug(&plug);
1497 
1498 	/* Align vbo and bytes to be 512 bytes aligned. */
1499 	lbo = (vbo + bytes + 511) & ~511ull;
1500 	vbo = vbo & ~511ull;
1501 	bytes = lbo - vbo;
1502 
1503 	vcn = vbo >> cluster_bits;
1504 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1505 		err = -ENOENT;
1506 		goto out;
1507 	}
1508 	off = vbo & sbi->cluster_mask;
1509 	page_idx = 0;
1510 	page = pages[0];
1511 
1512 	for (;;) {
1513 		lbo = ((u64)lcn << cluster_bits) + off;
1514 		len = ((u64)clen << cluster_bits) - off;
1515 new_bio:
1516 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1517 		if (bio) {
1518 			bio_chain(bio, new);
1519 			submit_bio(bio);
1520 		}
1521 		bio = new;
1522 		bio->bi_iter.bi_sector = lbo >> 9;
1523 
1524 		while (len) {
1525 			off = vbo & (PAGE_SIZE - 1);
1526 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1527 
1528 			if (bio_add_page(bio, page, add, off) < add)
1529 				goto new_bio;
1530 
1531 			if (bytes <= add)
1532 				goto out;
1533 			bytes -= add;
1534 			vbo += add;
1535 
1536 			if (add + off == PAGE_SIZE) {
1537 				page_idx += 1;
1538 				if (WARN_ON(page_idx >= nr_pages)) {
1539 					err = -EINVAL;
1540 					goto out;
1541 				}
1542 				page = pages[page_idx];
1543 			}
1544 
1545 			if (len <= add)
1546 				break;
1547 			len -= add;
1548 			lbo += add;
1549 		}
1550 
1551 		vcn_next = vcn + clen;
1552 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1553 		    vcn != vcn_next) {
1554 			err = -ENOENT;
1555 			goto out;
1556 		}
1557 		off = 0;
1558 	}
1559 out:
1560 	if (bio) {
1561 		if (!err)
1562 			err = submit_bio_wait(bio);
1563 		bio_put(bio);
1564 	}
1565 	blk_finish_plug(&plug);
1566 
1567 	return err;
1568 }
1569 
1570 /*
1571  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1572  *
1573  * Fill on-disk logfile range by (-1)
1574  * this means empty logfile.
1575  */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1576 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1577 {
1578 	int err = 0;
1579 	struct super_block *sb = sbi->sb;
1580 	struct block_device *bdev = sb->s_bdev;
1581 	u8 cluster_bits = sbi->cluster_bits;
1582 	struct bio *new, *bio = NULL;
1583 	CLST lcn, clen;
1584 	u64 lbo, len;
1585 	size_t run_idx;
1586 	struct page *fill;
1587 	void *kaddr;
1588 	struct blk_plug plug;
1589 
1590 	fill = alloc_page(GFP_KERNEL);
1591 	if (!fill)
1592 		return -ENOMEM;
1593 
1594 	kaddr = kmap_atomic(fill);
1595 	memset(kaddr, -1, PAGE_SIZE);
1596 	kunmap_atomic(kaddr);
1597 	flush_dcache_page(fill);
1598 	lock_page(fill);
1599 
1600 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1601 		err = -ENOENT;
1602 		goto out;
1603 	}
1604 
1605 	/*
1606 	 * TODO: Try blkdev_issue_write_same.
1607 	 */
1608 	blk_start_plug(&plug);
1609 	do {
1610 		lbo = (u64)lcn << cluster_bits;
1611 		len = (u64)clen << cluster_bits;
1612 new_bio:
1613 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1614 		if (bio) {
1615 			bio_chain(bio, new);
1616 			submit_bio(bio);
1617 		}
1618 		bio = new;
1619 		bio->bi_iter.bi_sector = lbo >> 9;
1620 
1621 		for (;;) {
1622 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1623 
1624 			if (bio_add_page(bio, fill, add, 0) < add)
1625 				goto new_bio;
1626 
1627 			lbo += add;
1628 			if (len <= add)
1629 				break;
1630 			len -= add;
1631 		}
1632 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1633 
1634 	if (!err)
1635 		err = submit_bio_wait(bio);
1636 	bio_put(bio);
1637 
1638 	blk_finish_plug(&plug);
1639 out:
1640 	unlock_page(fill);
1641 	put_page(fill);
1642 
1643 	return err;
1644 }
1645 
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1646 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1647 		    u64 vbo, u64 *lbo, u64 *bytes)
1648 {
1649 	u32 off;
1650 	CLST lcn, len;
1651 	u8 cluster_bits = sbi->cluster_bits;
1652 
1653 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1654 		return -ENOENT;
1655 
1656 	off = vbo & sbi->cluster_mask;
1657 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1658 	*bytes = ((u64)len << cluster_bits) - off;
1659 
1660 	return 0;
1661 }
1662 
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,enum RECORD_FLAG flag)1663 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1664 				  enum RECORD_FLAG flag)
1665 {
1666 	int err = 0;
1667 	struct super_block *sb = sbi->sb;
1668 	struct inode *inode = new_inode(sb);
1669 	struct ntfs_inode *ni;
1670 
1671 	if (!inode)
1672 		return ERR_PTR(-ENOMEM);
1673 
1674 	ni = ntfs_i(inode);
1675 
1676 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1677 	if (err)
1678 		goto out;
1679 
1680 	inode->i_ino = rno;
1681 	if (insert_inode_locked(inode) < 0) {
1682 		err = -EIO;
1683 		goto out;
1684 	}
1685 
1686 out:
1687 	if (err) {
1688 		make_bad_inode(inode);
1689 		iput(inode);
1690 		ni = ERR_PTR(err);
1691 	}
1692 	return ni;
1693 }
1694 
1695 /*
1696  * O:BAG:BAD:(A;OICI;FA;;;WD)
1697  * Owner S-1-5-32-544 (Administrators)
1698  * Group S-1-5-32-544 (Administrators)
1699  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1700  */
1701 const u8 s_default_security[] __aligned(8) = {
1702 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1703 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1704 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1705 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1706 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1707 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1708 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1709 };
1710 
1711 static_assert(sizeof(s_default_security) == 0x50);
1712 
sid_length(const struct SID * sid)1713 static inline u32 sid_length(const struct SID *sid)
1714 {
1715 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1716 }
1717 
1718 /*
1719  * is_acl_valid
1720  *
1721  * Thanks Mark Harmstone for idea.
1722  */
is_acl_valid(const struct ACL * acl,u32 len)1723 static bool is_acl_valid(const struct ACL *acl, u32 len)
1724 {
1725 	const struct ACE_HEADER *ace;
1726 	u32 i;
1727 	u16 ace_count, ace_size;
1728 
1729 	if (acl->AclRevision != ACL_REVISION &&
1730 	    acl->AclRevision != ACL_REVISION_DS) {
1731 		/*
1732 		 * This value should be ACL_REVISION, unless the ACL contains an
1733 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1734 		 * All ACEs in an ACL must be at the same revision level.
1735 		 */
1736 		return false;
1737 	}
1738 
1739 	if (acl->Sbz1)
1740 		return false;
1741 
1742 	if (le16_to_cpu(acl->AclSize) > len)
1743 		return false;
1744 
1745 	if (acl->Sbz2)
1746 		return false;
1747 
1748 	len -= sizeof(struct ACL);
1749 	ace = (struct ACE_HEADER *)&acl[1];
1750 	ace_count = le16_to_cpu(acl->AceCount);
1751 
1752 	for (i = 0; i < ace_count; i++) {
1753 		if (len < sizeof(struct ACE_HEADER))
1754 			return false;
1755 
1756 		ace_size = le16_to_cpu(ace->AceSize);
1757 		if (len < ace_size)
1758 			return false;
1759 
1760 		len -= ace_size;
1761 		ace = Add2Ptr(ace, ace_size);
1762 	}
1763 
1764 	return true;
1765 }
1766 
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1767 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1768 {
1769 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1770 
1771 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1772 		return false;
1773 
1774 	if (sd->Revision != 1)
1775 		return false;
1776 
1777 	if (sd->Sbz1)
1778 		return false;
1779 
1780 	if (!(sd->Control & SE_SELF_RELATIVE))
1781 		return false;
1782 
1783 	sd_owner = le32_to_cpu(sd->Owner);
1784 	if (sd_owner) {
1785 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1786 
1787 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1788 			return false;
1789 
1790 		if (owner->Revision != 1)
1791 			return false;
1792 
1793 		if (sd_owner + sid_length(owner) > len)
1794 			return false;
1795 	}
1796 
1797 	sd_group = le32_to_cpu(sd->Group);
1798 	if (sd_group) {
1799 		const struct SID *group = Add2Ptr(sd, sd_group);
1800 
1801 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1802 			return false;
1803 
1804 		if (group->Revision != 1)
1805 			return false;
1806 
1807 		if (sd_group + sid_length(group) > len)
1808 			return false;
1809 	}
1810 
1811 	sd_sacl = le32_to_cpu(sd->Sacl);
1812 	if (sd_sacl) {
1813 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1814 
1815 		if (sd_sacl + sizeof(struct ACL) > len)
1816 			return false;
1817 
1818 		if (!is_acl_valid(sacl, len - sd_sacl))
1819 			return false;
1820 	}
1821 
1822 	sd_dacl = le32_to_cpu(sd->Dacl);
1823 	if (sd_dacl) {
1824 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1825 
1826 		if (sd_dacl + sizeof(struct ACL) > len)
1827 			return false;
1828 
1829 		if (!is_acl_valid(dacl, len - sd_dacl))
1830 			return false;
1831 	}
1832 
1833 	return true;
1834 }
1835 
1836 /*
1837  * ntfs_security_init - Load and parse $Secure.
1838  */
ntfs_security_init(struct ntfs_sb_info * sbi)1839 int ntfs_security_init(struct ntfs_sb_info *sbi)
1840 {
1841 	int err;
1842 	struct super_block *sb = sbi->sb;
1843 	struct inode *inode;
1844 	struct ntfs_inode *ni;
1845 	struct MFT_REF ref;
1846 	struct ATTRIB *attr;
1847 	struct ATTR_LIST_ENTRY *le;
1848 	u64 sds_size;
1849 	size_t off;
1850 	struct NTFS_DE *ne;
1851 	struct NTFS_DE_SII *sii_e;
1852 	struct ntfs_fnd *fnd_sii = NULL;
1853 	const struct INDEX_ROOT *root_sii;
1854 	const struct INDEX_ROOT *root_sdh;
1855 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1856 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1857 
1858 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1859 	ref.high = 0;
1860 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1861 
1862 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1863 	if (IS_ERR(inode)) {
1864 		err = PTR_ERR(inode);
1865 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1866 		inode = NULL;
1867 		goto out;
1868 	}
1869 
1870 	ni = ntfs_i(inode);
1871 
1872 	le = NULL;
1873 
1874 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1875 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1876 	if (!attr ||
1877 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1878 	    root_sdh->type != ATTR_ZERO ||
1879 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1880 	    offsetof(struct INDEX_ROOT, ihdr) +
1881 			    le32_to_cpu(root_sdh->ihdr.used) >
1882 		    le32_to_cpu(attr->res.data_size)) {
1883 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1884 		err = -EINVAL;
1885 		goto out;
1886 	}
1887 
1888 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1889 	if (err) {
1890 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1891 		goto out;
1892 	}
1893 
1894 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1895 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1896 	if (!attr ||
1897 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1898 	    root_sii->type != ATTR_ZERO ||
1899 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1900 	    offsetof(struct INDEX_ROOT, ihdr) +
1901 			    le32_to_cpu(root_sii->ihdr.used) >
1902 		    le32_to_cpu(attr->res.data_size)) {
1903 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1904 		err = -EINVAL;
1905 		goto out;
1906 	}
1907 
1908 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1909 	if (err) {
1910 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1911 		goto out;
1912 	}
1913 
1914 	fnd_sii = fnd_get();
1915 	if (!fnd_sii) {
1916 		err = -ENOMEM;
1917 		goto out;
1918 	}
1919 
1920 	sds_size = inode->i_size;
1921 
1922 	/* Find the last valid Id. */
1923 	sbi->security.next_id = SECURITY_ID_FIRST;
1924 	/* Always write new security at the end of bucket. */
1925 	sbi->security.next_off =
1926 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1927 
1928 	off = 0;
1929 	ne = NULL;
1930 
1931 	for (;;) {
1932 		u32 next_id;
1933 
1934 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1935 		if (err || !ne)
1936 			break;
1937 
1938 		sii_e = (struct NTFS_DE_SII *)ne;
1939 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1940 			continue;
1941 
1942 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1943 		if (next_id >= sbi->security.next_id)
1944 			sbi->security.next_id = next_id;
1945 	}
1946 
1947 	sbi->security.ni = ni;
1948 	inode = NULL;
1949 out:
1950 	iput(inode);
1951 	fnd_put(fnd_sii);
1952 
1953 	return err;
1954 }
1955 
1956 /*
1957  * ntfs_get_security_by_id - Read security descriptor by id.
1958  */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1959 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1960 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1961 			    size_t *size)
1962 {
1963 	int err;
1964 	int diff;
1965 	struct ntfs_inode *ni = sbi->security.ni;
1966 	struct ntfs_index *indx = &sbi->security.index_sii;
1967 	void *p = NULL;
1968 	struct NTFS_DE_SII *sii_e;
1969 	struct ntfs_fnd *fnd_sii;
1970 	struct SECURITY_HDR d_security;
1971 	const struct INDEX_ROOT *root_sii;
1972 	u32 t32;
1973 
1974 	*sd = NULL;
1975 
1976 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1977 
1978 	fnd_sii = fnd_get();
1979 	if (!fnd_sii) {
1980 		err = -ENOMEM;
1981 		goto out;
1982 	}
1983 
1984 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1985 	if (!root_sii) {
1986 		err = -EINVAL;
1987 		goto out;
1988 	}
1989 
1990 	/* Try to find this SECURITY descriptor in SII indexes. */
1991 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1992 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1993 	if (err)
1994 		goto out;
1995 
1996 	if (diff)
1997 		goto out;
1998 
1999 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2000 	if (t32 < sizeof(struct SECURITY_HDR)) {
2001 		err = -EINVAL;
2002 		goto out;
2003 	}
2004 
2005 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2006 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2007 		err = -EFBIG;
2008 		goto out;
2009 	}
2010 
2011 	*size = t32 - sizeof(struct SECURITY_HDR);
2012 
2013 	p = kmalloc(*size, GFP_NOFS);
2014 	if (!p) {
2015 		err = -ENOMEM;
2016 		goto out;
2017 	}
2018 
2019 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2020 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2021 			       sizeof(d_security), NULL);
2022 	if (err)
2023 		goto out;
2024 
2025 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2026 		err = -EINVAL;
2027 		goto out;
2028 	}
2029 
2030 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2031 			       le64_to_cpu(sii_e->sec_hdr.off) +
2032 				       sizeof(struct SECURITY_HDR),
2033 			       p, *size, NULL);
2034 	if (err)
2035 		goto out;
2036 
2037 	*sd = p;
2038 	p = NULL;
2039 
2040 out:
2041 	kfree(p);
2042 	fnd_put(fnd_sii);
2043 	ni_unlock(ni);
2044 
2045 	return err;
2046 }
2047 
2048 /*
2049  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2050  *
2051  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2052  * and it contains a mirror copy of each security descriptor.  When writing
2053  * to a security descriptor at location X, another copy will be written at
2054  * location (X+256K).
2055  * When writing a security descriptor that will cross the 256K boundary,
2056  * the pointer will be advanced by 256K to skip
2057  * over the mirror portion.
2058  */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2059 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2060 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2061 			 u32 size_sd, __le32 *security_id, bool *inserted)
2062 {
2063 	int err, diff;
2064 	struct ntfs_inode *ni = sbi->security.ni;
2065 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2066 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2067 	struct NTFS_DE_SDH *e;
2068 	struct NTFS_DE_SDH sdh_e;
2069 	struct NTFS_DE_SII sii_e;
2070 	struct SECURITY_HDR *d_security;
2071 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2072 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2073 	struct SECURITY_KEY hash_key;
2074 	struct ntfs_fnd *fnd_sdh = NULL;
2075 	const struct INDEX_ROOT *root_sdh;
2076 	const struct INDEX_ROOT *root_sii;
2077 	u64 mirr_off, new_sds_size;
2078 	u32 next, left;
2079 
2080 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2081 		      SecurityDescriptorsBlockSize);
2082 
2083 	hash_key.hash = security_hash(sd, size_sd);
2084 	hash_key.sec_id = SECURITY_ID_INVALID;
2085 
2086 	if (inserted)
2087 		*inserted = false;
2088 	*security_id = SECURITY_ID_INVALID;
2089 
2090 	/* Allocate a temporal buffer. */
2091 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2092 	if (!d_security)
2093 		return -ENOMEM;
2094 
2095 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2096 
2097 	fnd_sdh = fnd_get();
2098 	if (!fnd_sdh) {
2099 		err = -ENOMEM;
2100 		goto out;
2101 	}
2102 
2103 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2104 	if (!root_sdh) {
2105 		err = -EINVAL;
2106 		goto out;
2107 	}
2108 
2109 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2110 	if (!root_sii) {
2111 		err = -EINVAL;
2112 		goto out;
2113 	}
2114 
2115 	/*
2116 	 * Check if such security already exists.
2117 	 * Use "SDH" and hash -> to get the offset in "SDS".
2118 	 */
2119 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2120 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2121 			fnd_sdh);
2122 	if (err)
2123 		goto out;
2124 
2125 	while (e) {
2126 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2127 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2128 					       le64_to_cpu(e->sec_hdr.off),
2129 					       d_security, new_sec_size, NULL);
2130 			if (err)
2131 				goto out;
2132 
2133 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2134 			    d_security->key.hash == hash_key.hash &&
2135 			    !memcmp(d_security + 1, sd, size_sd)) {
2136 				/* Such security already exists. */
2137 				*security_id = d_security->key.sec_id;
2138 				err = 0;
2139 				goto out;
2140 			}
2141 		}
2142 
2143 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2144 				     (struct NTFS_DE **)&e, fnd_sdh);
2145 		if (err)
2146 			goto out;
2147 
2148 		if (!e || e->key.hash != hash_key.hash)
2149 			break;
2150 	}
2151 
2152 	/* Zero unused space. */
2153 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2154 	left = SecurityDescriptorsBlockSize - next;
2155 
2156 	/* Zero gap until SecurityDescriptorsBlockSize. */
2157 	if (left < new_sec_size) {
2158 		/* Zero "left" bytes from sbi->security.next_off. */
2159 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2160 	}
2161 
2162 	/* Zero tail of previous security. */
2163 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2164 
2165 	/*
2166 	 * Example:
2167 	 * 0x40438 == ni->vfs_inode.i_size
2168 	 * 0x00440 == sbi->security.next_off
2169 	 * need to zero [0x438-0x440)
2170 	 * if (next > used) {
2171 	 *  u32 tozero = next - used;
2172 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2173 	 */
2174 
2175 	/* Format new security descriptor. */
2176 	d_security->key.hash = hash_key.hash;
2177 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2178 	d_security->off = cpu_to_le64(sbi->security.next_off);
2179 	d_security->size = cpu_to_le32(new_sec_size);
2180 	memcpy(d_security + 1, sd, size_sd);
2181 
2182 	/* Write main SDS bucket. */
2183 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2184 				d_security, aligned_sec_size, 0);
2185 
2186 	if (err)
2187 		goto out;
2188 
2189 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2190 	new_sds_size = mirr_off + aligned_sec_size;
2191 
2192 	if (new_sds_size > ni->vfs_inode.i_size) {
2193 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2194 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2195 				    new_sds_size, &new_sds_size, false, NULL);
2196 		if (err)
2197 			goto out;
2198 	}
2199 
2200 	/* Write copy SDS bucket. */
2201 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2202 				aligned_sec_size, 0);
2203 	if (err)
2204 		goto out;
2205 
2206 	/* Fill SII entry. */
2207 	sii_e.de.view.data_off =
2208 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2209 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2210 	sii_e.de.view.res = 0;
2211 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2212 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2213 	sii_e.de.flags = 0;
2214 	sii_e.de.res = 0;
2215 	sii_e.sec_id = d_security->key.sec_id;
2216 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2217 
2218 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2219 	if (err)
2220 		goto out;
2221 
2222 	/* Fill SDH entry. */
2223 	sdh_e.de.view.data_off =
2224 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2225 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2226 	sdh_e.de.view.res = 0;
2227 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2228 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2229 	sdh_e.de.flags = 0;
2230 	sdh_e.de.res = 0;
2231 	sdh_e.key.hash = d_security->key.hash;
2232 	sdh_e.key.sec_id = d_security->key.sec_id;
2233 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2234 	sdh_e.magic[0] = cpu_to_le16('I');
2235 	sdh_e.magic[1] = cpu_to_le16('I');
2236 
2237 	fnd_clear(fnd_sdh);
2238 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2239 				fnd_sdh, 0);
2240 	if (err)
2241 		goto out;
2242 
2243 	*security_id = d_security->key.sec_id;
2244 	if (inserted)
2245 		*inserted = true;
2246 
2247 	/* Update Id and offset for next descriptor. */
2248 	sbi->security.next_id += 1;
2249 	sbi->security.next_off += aligned_sec_size;
2250 
2251 out:
2252 	fnd_put(fnd_sdh);
2253 	mark_inode_dirty(&ni->vfs_inode);
2254 	ni_unlock(ni);
2255 	kfree(d_security);
2256 
2257 	return err;
2258 }
2259 
2260 /*
2261  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2262  */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2263 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2264 {
2265 	int err;
2266 	struct ntfs_inode *ni = sbi->reparse.ni;
2267 	struct ntfs_index *indx = &sbi->reparse.index_r;
2268 	struct ATTRIB *attr;
2269 	struct ATTR_LIST_ENTRY *le;
2270 	const struct INDEX_ROOT *root_r;
2271 
2272 	if (!ni)
2273 		return 0;
2274 
2275 	le = NULL;
2276 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2277 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2278 	if (!attr) {
2279 		err = -EINVAL;
2280 		goto out;
2281 	}
2282 
2283 	root_r = resident_data(attr);
2284 	if (root_r->type != ATTR_ZERO ||
2285 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2286 		err = -EINVAL;
2287 		goto out;
2288 	}
2289 
2290 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2291 	if (err)
2292 		goto out;
2293 
2294 out:
2295 	return err;
2296 }
2297 
2298 /*
2299  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2300  */
ntfs_objid_init(struct ntfs_sb_info * sbi)2301 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2302 {
2303 	int err;
2304 	struct ntfs_inode *ni = sbi->objid.ni;
2305 	struct ntfs_index *indx = &sbi->objid.index_o;
2306 	struct ATTRIB *attr;
2307 	struct ATTR_LIST_ENTRY *le;
2308 	const struct INDEX_ROOT *root;
2309 
2310 	if (!ni)
2311 		return 0;
2312 
2313 	le = NULL;
2314 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2315 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2316 	if (!attr) {
2317 		err = -EINVAL;
2318 		goto out;
2319 	}
2320 
2321 	root = resident_data(attr);
2322 	if (root->type != ATTR_ZERO ||
2323 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2324 		err = -EINVAL;
2325 		goto out;
2326 	}
2327 
2328 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2329 	if (err)
2330 		goto out;
2331 
2332 out:
2333 	return err;
2334 }
2335 
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2336 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2337 {
2338 	int err;
2339 	struct ntfs_inode *ni = sbi->objid.ni;
2340 	struct ntfs_index *indx = &sbi->objid.index_o;
2341 
2342 	if (!ni)
2343 		return -EINVAL;
2344 
2345 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2346 
2347 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2348 
2349 	mark_inode_dirty(&ni->vfs_inode);
2350 	ni_unlock(ni);
2351 
2352 	return err;
2353 }
2354 
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2355 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2356 			const struct MFT_REF *ref)
2357 {
2358 	int err;
2359 	struct ntfs_inode *ni = sbi->reparse.ni;
2360 	struct ntfs_index *indx = &sbi->reparse.index_r;
2361 	struct NTFS_DE_R re;
2362 
2363 	if (!ni)
2364 		return -EINVAL;
2365 
2366 	memset(&re, 0, sizeof(re));
2367 
2368 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2369 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2370 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2371 
2372 	re.key.ReparseTag = rtag;
2373 	memcpy(&re.key.ref, ref, sizeof(*ref));
2374 
2375 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2376 
2377 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2378 
2379 	mark_inode_dirty(&ni->vfs_inode);
2380 	ni_unlock(ni);
2381 
2382 	return err;
2383 }
2384 
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2385 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2386 			const struct MFT_REF *ref)
2387 {
2388 	int err, diff;
2389 	struct ntfs_inode *ni = sbi->reparse.ni;
2390 	struct ntfs_index *indx = &sbi->reparse.index_r;
2391 	struct ntfs_fnd *fnd = NULL;
2392 	struct REPARSE_KEY rkey;
2393 	struct NTFS_DE_R *re;
2394 	struct INDEX_ROOT *root_r;
2395 
2396 	if (!ni)
2397 		return -EINVAL;
2398 
2399 	rkey.ReparseTag = rtag;
2400 	rkey.ref = *ref;
2401 
2402 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2403 
2404 	if (rtag) {
2405 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2406 		goto out1;
2407 	}
2408 
2409 	fnd = fnd_get();
2410 	if (!fnd) {
2411 		err = -ENOMEM;
2412 		goto out1;
2413 	}
2414 
2415 	root_r = indx_get_root(indx, ni, NULL, NULL);
2416 	if (!root_r) {
2417 		err = -EINVAL;
2418 		goto out;
2419 	}
2420 
2421 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2422 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2423 			(struct NTFS_DE **)&re, fnd);
2424 	if (err)
2425 		goto out;
2426 
2427 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2428 		/* Impossible. Looks like volume corrupt? */
2429 		goto out;
2430 	}
2431 
2432 	memcpy(&rkey, &re->key, sizeof(rkey));
2433 
2434 	fnd_put(fnd);
2435 	fnd = NULL;
2436 
2437 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2438 	if (err)
2439 		goto out;
2440 
2441 out:
2442 	fnd_put(fnd);
2443 
2444 out1:
2445 	mark_inode_dirty(&ni->vfs_inode);
2446 	ni_unlock(ni);
2447 
2448 	return err;
2449 }
2450 
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2451 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2452 					  CLST len)
2453 {
2454 	ntfs_unmap_meta(sbi->sb, lcn, len);
2455 	ntfs_discard(sbi, lcn, len);
2456 }
2457 
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2458 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2459 {
2460 	CLST end, i, zone_len, zlen;
2461 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2462 	bool dirty = false;
2463 
2464 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2465 	if (!wnd_is_used(wnd, lcn, len)) {
2466 		/* mark volume as dirty out of wnd->rw_lock */
2467 		dirty = true;
2468 
2469 		end = lcn + len;
2470 		len = 0;
2471 		for (i = lcn; i < end; i++) {
2472 			if (wnd_is_used(wnd, i, 1)) {
2473 				if (!len)
2474 					lcn = i;
2475 				len += 1;
2476 				continue;
2477 			}
2478 
2479 			if (!len)
2480 				continue;
2481 
2482 			if (trim)
2483 				ntfs_unmap_and_discard(sbi, lcn, len);
2484 
2485 			wnd_set_free(wnd, lcn, len);
2486 			len = 0;
2487 		}
2488 
2489 		if (!len)
2490 			goto out;
2491 	}
2492 
2493 	if (trim)
2494 		ntfs_unmap_and_discard(sbi, lcn, len);
2495 	wnd_set_free(wnd, lcn, len);
2496 
2497 	/* append to MFT zone, if possible. */
2498 	zone_len = wnd_zone_len(wnd);
2499 	zlen = min(zone_len + len, sbi->zone_max);
2500 
2501 	if (zlen == zone_len) {
2502 		/* MFT zone already has maximum size. */
2503 	} else if (!zone_len) {
2504 		/* Create MFT zone only if 'zlen' is large enough. */
2505 		if (zlen == sbi->zone_max)
2506 			wnd_zone_set(wnd, lcn, zlen);
2507 	} else {
2508 		CLST zone_lcn = wnd_zone_bit(wnd);
2509 
2510 		if (lcn + len == zone_lcn) {
2511 			/* Append into head MFT zone. */
2512 			wnd_zone_set(wnd, lcn, zlen);
2513 		} else if (zone_lcn + zone_len == lcn) {
2514 			/* Append into tail MFT zone. */
2515 			wnd_zone_set(wnd, zone_lcn, zlen);
2516 		}
2517 	}
2518 
2519 out:
2520 	up_write(&wnd->rw_lock);
2521 	if (dirty)
2522 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2523 }
2524 
2525 /*
2526  * run_deallocate - Deallocate clusters.
2527  */
run_deallocate(struct ntfs_sb_info * sbi,const struct runs_tree * run,bool trim)2528 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2529 		   bool trim)
2530 {
2531 	CLST lcn, len;
2532 	size_t idx = 0;
2533 
2534 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2535 		if (lcn == SPARSE_LCN)
2536 			continue;
2537 
2538 		mark_as_free_ex(sbi, lcn, len, trim);
2539 	}
2540 
2541 	return 0;
2542 }
2543 
name_has_forbidden_chars(const struct le_str * fname)2544 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2545 {
2546 	int i, ch;
2547 
2548 	/* check for forbidden chars */
2549 	for (i = 0; i < fname->len; ++i) {
2550 		ch = le16_to_cpu(fname->name[i]);
2551 
2552 		/* control chars */
2553 		if (ch < 0x20)
2554 			return true;
2555 
2556 		switch (ch) {
2557 		/* disallowed by Windows */
2558 		case '\\':
2559 		case '/':
2560 		case ':':
2561 		case '*':
2562 		case '?':
2563 		case '<':
2564 		case '>':
2565 		case '|':
2566 		case '\"':
2567 			return true;
2568 
2569 		default:
2570 			/* allowed char */
2571 			break;
2572 		}
2573 	}
2574 
2575 	/* file names cannot end with space or . */
2576 	if (fname->len > 0) {
2577 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2578 		if (ch == ' ' || ch == '.')
2579 			return true;
2580 	}
2581 
2582 	return false;
2583 }
2584 
is_reserved_name(const struct ntfs_sb_info * sbi,const struct le_str * fname)2585 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2586 				    const struct le_str *fname)
2587 {
2588 	int port_digit;
2589 	const __le16 *name = fname->name;
2590 	int len = fname->len;
2591 	const u16 *upcase = sbi->upcase;
2592 
2593 	/* check for 3 chars reserved names (device names) */
2594 	/* name by itself or with any extension is forbidden */
2595 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2596 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2597 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2598 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2599 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2600 			return true;
2601 
2602 	/* check for 4 chars reserved names (port name followed by 1..9) */
2603 	/* name by itself or with any extension is forbidden */
2604 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2605 		port_digit = le16_to_cpu(name[3]);
2606 		if (port_digit >= '1' && port_digit <= '9')
2607 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2608 					    false) ||
2609 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2610 					    false))
2611 				return true;
2612 	}
2613 
2614 	return false;
2615 }
2616 
2617 /*
2618  * valid_windows_name - Check if a file name is valid in Windows.
2619  */
valid_windows_name(struct ntfs_sb_info * sbi,const struct le_str * fname)2620 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2621 {
2622 	return !name_has_forbidden_chars(fname) &&
2623 	       !is_reserved_name(sbi, fname);
2624 }
2625 
2626 /*
2627  * ntfs_set_label - updates current ntfs label.
2628  */
ntfs_set_label(struct ntfs_sb_info * sbi,u8 * label,int len)2629 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2630 {
2631 	int err;
2632 	struct ATTRIB *attr;
2633 	u32 uni_bytes;
2634 	struct ntfs_inode *ni = sbi->volume.ni;
2635 	/* Allocate PATH_MAX bytes. */
2636 	struct cpu_str *uni = __getname();
2637 
2638 	if (!uni)
2639 		return -ENOMEM;
2640 
2641 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2642 				UTF16_LITTLE_ENDIAN);
2643 	if (err < 0)
2644 		goto out;
2645 
2646 	uni_bytes = uni->len * sizeof(u16);
2647 	if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
2648 		ntfs_warn(sbi->sb, "new label is too long");
2649 		err = -EFBIG;
2650 		goto out;
2651 	}
2652 
2653 	ni_lock(ni);
2654 
2655 	/* Ignore any errors. */
2656 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2657 
2658 	err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
2659 				 NULL, NULL);
2660 	if (err < 0)
2661 		goto unlock_out;
2662 
2663 	/* write new label in on-disk struct. */
2664 	memcpy(resident_data(attr), uni->name, uni_bytes);
2665 
2666 	/* update cached value of current label. */
2667 	if (len >= ARRAY_SIZE(sbi->volume.label))
2668 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2669 	memcpy(sbi->volume.label, label, len);
2670 	sbi->volume.label[len] = 0;
2671 	mark_inode_dirty_sync(&ni->vfs_inode);
2672 
2673 unlock_out:
2674 	ni_unlock(ni);
2675 
2676 	if (!err)
2677 		err = _ni_write_inode(&ni->vfs_inode, 0);
2678 
2679 out:
2680 	__putname(uni);
2681 	return err;
2682 }
2683