xref: /linux/fs/ntfs3/fsntfs.c (revision ce335806b5ecc5132aed0a1af8bd48ae3b2ea178)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
ntfs_fix_pre_write(struct NTFS_RECORD_HEADER * rhdr,size_t bytes)131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
ntfs_fix_post_read(struct NTFS_RECORD_HEADER * rhdr,size_t bytes,bool simple)168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
ntfs_extend_init(struct ntfs_sb_info * sbi)209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
ntfs_loadlog_and_replay(struct ntfs_inode * ni,struct ntfs_sb_info * sbi)276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
ntfs_look_for_free_space(struct ntfs_sb_info * sbi,CLST lcn,CLST len,CLST * new_lcn,CLST * new_len,enum ALLOCATE_OPT opt)352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
ntfs_check_for_free_space(struct ntfs_sb_info * sbi,CLST clen,CLST mlen)452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
ntfs_extend_mft(struct ntfs_sb_info * sbi)498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
ntfs_look_free_mft(struct ntfs_sb_info * sbi,CLST * rno,bool mft,struct ntfs_inode * ni,struct mft_inode ** mi)556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
ntfs_mark_rec_free(struct ntfs_sb_info * sbi,CLST rno,bool is_mft)737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
ntfs_clear_mft_tail(struct ntfs_sb_info * sbi,size_t from,size_t to)770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
ntfs_refresh_zone(struct ntfs_sb_info * sbi)813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
ntfs_update_mftmirr(struct ntfs_sb_info * sbi,int wait)846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
857 	    unlikely(ntfs3_forced_shutdown(sb)))
858 		return;
859 
860 	blocksize = sb->s_blocksize;
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
ntfs_bad_inode(struct inode * inode,const char * hint)905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 
909 	ntfs_inode_err(inode, "%s", hint);
910 	make_bad_inode(inode);
911 	/* Avoid recursion if bad inode is $Volume. */
912 	if (inode->i_ino != MFT_REC_VOL &&
913 	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
914 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
915 	}
916 }
917 
918 /*
919  * ntfs_set_state
920  *
921  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
922  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
923  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
924  */
ntfs_set_state(struct ntfs_sb_info * sbi,enum NTFS_DIRTY_FLAGS dirty)925 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
926 {
927 	int err;
928 	struct ATTRIB *attr;
929 	struct VOLUME_INFO *info;
930 	struct mft_inode *mi;
931 	struct ntfs_inode *ni;
932 	__le16 info_flags;
933 
934 	/*
935 	 * Do not change state if fs was real_dirty.
936 	 * Do not change state if fs already dirty(clear).
937 	 * Do not change any thing if mounted read only.
938 	 */
939 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
940 		return 0;
941 
942 	/* Check cached value. */
943 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
944 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
945 		return 0;
946 
947 	ni = sbi->volume.ni;
948 	if (!ni)
949 		return -EINVAL;
950 
951 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
952 
953 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
954 	if (!attr) {
955 		err = -EINVAL;
956 		goto out;
957 	}
958 
959 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
960 	if (!info) {
961 		err = -EINVAL;
962 		goto out;
963 	}
964 
965 	info_flags = info->flags;
966 
967 	switch (dirty) {
968 	case NTFS_DIRTY_ERROR:
969 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
970 		sbi->volume.real_dirty = true;
971 		fallthrough;
972 	case NTFS_DIRTY_DIRTY:
973 		info->flags |= VOLUME_FLAG_DIRTY;
974 		break;
975 	case NTFS_DIRTY_CLEAR:
976 		info->flags &= ~VOLUME_FLAG_DIRTY;
977 		break;
978 	}
979 	/* Cache current volume flags. */
980 	if (info_flags != info->flags) {
981 		sbi->volume.flags = info->flags;
982 		mi->dirty = true;
983 	}
984 	err = 0;
985 
986 out:
987 	ni_unlock(ni);
988 	if (err)
989 		return err;
990 
991 	mark_inode_dirty_sync(&ni->vfs_inode);
992 	/* verify(!ntfs_update_mftmirr()); */
993 
994 	/* write mft record on disk. */
995 	err = _ni_write_inode(&ni->vfs_inode, 1);
996 
997 	return err;
998 }
999 
1000 /*
1001  * security_hash - Calculates a hash of security descriptor.
1002  */
security_hash(const void * sd,size_t bytes)1003 static inline __le32 security_hash(const void *sd, size_t bytes)
1004 {
1005 	u32 hash = 0;
1006 	const __le32 *ptr = sd;
1007 
1008 	bytes >>= 2;
1009 	while (bytes--)
1010 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1011 	return cpu_to_le32(hash);
1012 }
1013 
1014 /*
1015  * simple wrapper for sb_bread_unmovable.
1016  */
ntfs_bread(struct super_block * sb,sector_t block)1017 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1018 {
1019 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1020 	struct buffer_head *bh;
1021 
1022 	if (unlikely(block >= sbi->volume.blocks)) {
1023 		/* prevent generic message "attempt to access beyond end of device" */
1024 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1025 			 (u64)block << sb->s_blocksize_bits);
1026 		return NULL;
1027 	}
1028 
1029 	bh = sb_bread_unmovable(sb, block);
1030 	if (bh)
1031 		return bh;
1032 
1033 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1034 		 (u64)block << sb->s_blocksize_bits);
1035 	return NULL;
1036 }
1037 
ntfs_sb_read(struct super_block * sb,u64 lbo,size_t bytes,void * buffer)1038 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1039 {
1040 	struct block_device *bdev = sb->s_bdev;
1041 	u32 blocksize = sb->s_blocksize;
1042 	u64 block = lbo >> sb->s_blocksize_bits;
1043 	u32 off = lbo & (blocksize - 1);
1044 	u32 op = blocksize - off;
1045 
1046 	for (; bytes; block += 1, off = 0, op = blocksize) {
1047 		struct buffer_head *bh = __bread(bdev, block, blocksize);
1048 
1049 		if (!bh)
1050 			return -EIO;
1051 
1052 		if (op > bytes)
1053 			op = bytes;
1054 
1055 		memcpy(buffer, bh->b_data + off, op);
1056 
1057 		put_bh(bh);
1058 
1059 		bytes -= op;
1060 		buffer = Add2Ptr(buffer, op);
1061 	}
1062 
1063 	return 0;
1064 }
1065 
ntfs_sb_write(struct super_block * sb,u64 lbo,size_t bytes,const void * buf,int wait)1066 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1067 		  const void *buf, int wait)
1068 {
1069 	u32 blocksize = sb->s_blocksize;
1070 	struct block_device *bdev = sb->s_bdev;
1071 	sector_t block = lbo >> sb->s_blocksize_bits;
1072 	u32 off = lbo & (blocksize - 1);
1073 	u32 op = blocksize - off;
1074 	struct buffer_head *bh;
1075 
1076 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1077 		wait = 1;
1078 
1079 	for (; bytes; block += 1, off = 0, op = blocksize) {
1080 		if (op > bytes)
1081 			op = bytes;
1082 
1083 		if (op < blocksize) {
1084 			bh = __bread(bdev, block, blocksize);
1085 			if (!bh) {
1086 				ntfs_err(sb, "failed to read block %llx",
1087 					 (u64)block);
1088 				return -EIO;
1089 			}
1090 		} else {
1091 			bh = __getblk(bdev, block, blocksize);
1092 			if (!bh)
1093 				return -ENOMEM;
1094 		}
1095 
1096 		if (buffer_locked(bh))
1097 			__wait_on_buffer(bh);
1098 
1099 		lock_buffer(bh);
1100 		if (buf) {
1101 			memcpy(bh->b_data + off, buf, op);
1102 			buf = Add2Ptr(buf, op);
1103 		} else {
1104 			memset(bh->b_data + off, -1, op);
1105 		}
1106 
1107 		set_buffer_uptodate(bh);
1108 		mark_buffer_dirty(bh);
1109 		unlock_buffer(bh);
1110 
1111 		if (wait) {
1112 			int err = sync_dirty_buffer(bh);
1113 
1114 			if (err) {
1115 				ntfs_err(
1116 					sb,
1117 					"failed to sync buffer at block %llx, error %d",
1118 					(u64)block, err);
1119 				put_bh(bh);
1120 				return err;
1121 			}
1122 		}
1123 
1124 		put_bh(bh);
1125 
1126 		bytes -= op;
1127 	}
1128 	return 0;
1129 }
1130 
ntfs_sb_write_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,const void * buf,size_t bytes,int sync)1131 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1132 		      u64 vbo, const void *buf, size_t bytes, int sync)
1133 {
1134 	struct super_block *sb = sbi->sb;
1135 	u8 cluster_bits = sbi->cluster_bits;
1136 	u32 off = vbo & sbi->cluster_mask;
1137 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1138 	u64 lbo, len;
1139 	size_t idx;
1140 
1141 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1142 		return -ENOENT;
1143 
1144 	if (lcn == SPARSE_LCN)
1145 		return -EINVAL;
1146 
1147 	lbo = ((u64)lcn << cluster_bits) + off;
1148 	len = ((u64)clen << cluster_bits) - off;
1149 
1150 	for (;;) {
1151 		u32 op = min_t(u64, len, bytes);
1152 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1153 
1154 		if (err)
1155 			return err;
1156 
1157 		bytes -= op;
1158 		if (!bytes)
1159 			break;
1160 
1161 		vcn_next = vcn + clen;
1162 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1163 		    vcn != vcn_next)
1164 			return -ENOENT;
1165 
1166 		if (lcn == SPARSE_LCN)
1167 			return -EINVAL;
1168 
1169 		if (buf)
1170 			buf = Add2Ptr(buf, op);
1171 
1172 		lbo = ((u64)lcn << cluster_bits);
1173 		len = ((u64)clen << cluster_bits);
1174 	}
1175 
1176 	return 0;
1177 }
1178 
ntfs_bread_run(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo)1179 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1180 				   const struct runs_tree *run, u64 vbo)
1181 {
1182 	struct super_block *sb = sbi->sb;
1183 	u8 cluster_bits = sbi->cluster_bits;
1184 	CLST lcn;
1185 	u64 lbo;
1186 
1187 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1188 		return ERR_PTR(-ENOENT);
1189 
1190 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1191 
1192 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1193 }
1194 
ntfs_read_run_nb(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,void * buf,u32 bytes,struct ntfs_buffers * nb)1195 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1196 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1197 {
1198 	int err;
1199 	struct super_block *sb = sbi->sb;
1200 	u32 blocksize = sb->s_blocksize;
1201 	u8 cluster_bits = sbi->cluster_bits;
1202 	u32 off = vbo & sbi->cluster_mask;
1203 	u32 nbh = 0;
1204 	CLST vcn_next, vcn = vbo >> cluster_bits;
1205 	CLST lcn, clen;
1206 	u64 lbo, len;
1207 	size_t idx;
1208 	struct buffer_head *bh;
1209 
1210 	if (!run) {
1211 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1212 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1213 			err = -ENOENT;
1214 			goto out;
1215 		}
1216 
1217 		/* Use absolute boot's 'MFTCluster' to read record. */
1218 		lbo = vbo + sbi->mft.lbo;
1219 		len = sbi->record_size;
1220 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1221 		err = -ENOENT;
1222 		goto out;
1223 	} else {
1224 		if (lcn == SPARSE_LCN) {
1225 			err = -EINVAL;
1226 			goto out;
1227 		}
1228 
1229 		lbo = ((u64)lcn << cluster_bits) + off;
1230 		len = ((u64)clen << cluster_bits) - off;
1231 	}
1232 
1233 	off = lbo & (blocksize - 1);
1234 	if (nb) {
1235 		nb->off = off;
1236 		nb->bytes = bytes;
1237 	}
1238 
1239 	for (;;) {
1240 		u32 len32 = len >= bytes ? bytes : len;
1241 		sector_t block = lbo >> sb->s_blocksize_bits;
1242 
1243 		do {
1244 			u32 op = blocksize - off;
1245 
1246 			if (op > len32)
1247 				op = len32;
1248 
1249 			bh = ntfs_bread(sb, block);
1250 			if (!bh) {
1251 				err = -EIO;
1252 				goto out;
1253 			}
1254 
1255 			if (buf) {
1256 				memcpy(buf, bh->b_data + off, op);
1257 				buf = Add2Ptr(buf, op);
1258 			}
1259 
1260 			if (!nb) {
1261 				put_bh(bh);
1262 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1263 				err = -EINVAL;
1264 				goto out;
1265 			} else {
1266 				nb->bh[nbh++] = bh;
1267 				nb->nbufs = nbh;
1268 			}
1269 
1270 			bytes -= op;
1271 			if (!bytes)
1272 				return 0;
1273 			len32 -= op;
1274 			block += 1;
1275 			off = 0;
1276 
1277 		} while (len32);
1278 
1279 		vcn_next = vcn + clen;
1280 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1281 		    vcn != vcn_next) {
1282 			err = -ENOENT;
1283 			goto out;
1284 		}
1285 
1286 		if (lcn == SPARSE_LCN) {
1287 			err = -EINVAL;
1288 			goto out;
1289 		}
1290 
1291 		lbo = ((u64)lcn << cluster_bits);
1292 		len = ((u64)clen << cluster_bits);
1293 	}
1294 
1295 out:
1296 	if (!nbh)
1297 		return err;
1298 
1299 	while (nbh) {
1300 		put_bh(nb->bh[--nbh]);
1301 		nb->bh[nbh] = NULL;
1302 	}
1303 
1304 	nb->nbufs = 0;
1305 	return err;
1306 }
1307 
1308 /*
1309  * ntfs_read_bh
1310  *
1311  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1312  */
ntfs_read_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,struct NTFS_RECORD_HEADER * rhdr,u32 bytes,struct ntfs_buffers * nb)1313 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1314 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1315 		 struct ntfs_buffers *nb)
1316 {
1317 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1318 
1319 	if (err)
1320 		return err;
1321 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1322 }
1323 
ntfs_get_bh(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u32 bytes,struct ntfs_buffers * nb)1324 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1325 		u32 bytes, struct ntfs_buffers *nb)
1326 {
1327 	int err = 0;
1328 	struct super_block *sb = sbi->sb;
1329 	u32 blocksize = sb->s_blocksize;
1330 	u8 cluster_bits = sbi->cluster_bits;
1331 	CLST vcn_next, vcn = vbo >> cluster_bits;
1332 	u32 off;
1333 	u32 nbh = 0;
1334 	CLST lcn, clen;
1335 	u64 lbo, len;
1336 	size_t idx;
1337 
1338 	nb->bytes = bytes;
1339 
1340 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1341 		err = -ENOENT;
1342 		goto out;
1343 	}
1344 
1345 	off = vbo & sbi->cluster_mask;
1346 	lbo = ((u64)lcn << cluster_bits) + off;
1347 	len = ((u64)clen << cluster_bits) - off;
1348 
1349 	nb->off = off = lbo & (blocksize - 1);
1350 
1351 	for (;;) {
1352 		u32 len32 = min_t(u64, len, bytes);
1353 		sector_t block = lbo >> sb->s_blocksize_bits;
1354 
1355 		do {
1356 			u32 op;
1357 			struct buffer_head *bh;
1358 
1359 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1360 				err = -EINVAL;
1361 				goto out;
1362 			}
1363 
1364 			op = blocksize - off;
1365 			if (op > len32)
1366 				op = len32;
1367 
1368 			if (op == blocksize) {
1369 				bh = sb_getblk(sb, block);
1370 				if (!bh) {
1371 					err = -ENOMEM;
1372 					goto out;
1373 				}
1374 				if (buffer_locked(bh))
1375 					__wait_on_buffer(bh);
1376 				set_buffer_uptodate(bh);
1377 			} else {
1378 				bh = ntfs_bread(sb, block);
1379 				if (!bh) {
1380 					err = -EIO;
1381 					goto out;
1382 				}
1383 			}
1384 
1385 			nb->bh[nbh++] = bh;
1386 			bytes -= op;
1387 			if (!bytes) {
1388 				nb->nbufs = nbh;
1389 				return 0;
1390 			}
1391 
1392 			block += 1;
1393 			len32 -= op;
1394 			off = 0;
1395 		} while (len32);
1396 
1397 		vcn_next = vcn + clen;
1398 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1399 		    vcn != vcn_next) {
1400 			err = -ENOENT;
1401 			goto out;
1402 		}
1403 
1404 		lbo = ((u64)lcn << cluster_bits);
1405 		len = ((u64)clen << cluster_bits);
1406 	}
1407 
1408 out:
1409 	while (nbh) {
1410 		put_bh(nb->bh[--nbh]);
1411 		nb->bh[nbh] = NULL;
1412 	}
1413 
1414 	nb->nbufs = 0;
1415 
1416 	return err;
1417 }
1418 
ntfs_write_bh(struct ntfs_sb_info * sbi,struct NTFS_RECORD_HEADER * rhdr,struct ntfs_buffers * nb,int sync)1419 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1420 		  struct ntfs_buffers *nb, int sync)
1421 {
1422 	int err = 0;
1423 	struct super_block *sb = sbi->sb;
1424 	u32 block_size = sb->s_blocksize;
1425 	u32 bytes = nb->bytes;
1426 	u32 off = nb->off;
1427 	u16 fo = le16_to_cpu(rhdr->fix_off);
1428 	u16 fn = le16_to_cpu(rhdr->fix_num);
1429 	u32 idx;
1430 	__le16 *fixup;
1431 	__le16 sample;
1432 
1433 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1434 	    fn * SECTOR_SIZE > bytes) {
1435 		return -EINVAL;
1436 	}
1437 
1438 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1439 		u32 op = block_size - off;
1440 		char *bh_data;
1441 		struct buffer_head *bh = nb->bh[idx];
1442 		__le16 *ptr, *end_data;
1443 
1444 		if (op > bytes)
1445 			op = bytes;
1446 
1447 		if (buffer_locked(bh))
1448 			__wait_on_buffer(bh);
1449 
1450 		lock_buffer(bh);
1451 
1452 		bh_data = bh->b_data + off;
1453 		end_data = Add2Ptr(bh_data, op);
1454 		memcpy(bh_data, rhdr, op);
1455 
1456 		if (!idx) {
1457 			u16 t16;
1458 
1459 			fixup = Add2Ptr(bh_data, fo);
1460 			sample = *fixup;
1461 			t16 = le16_to_cpu(sample);
1462 			if (t16 >= 0x7FFF) {
1463 				sample = *fixup = cpu_to_le16(1);
1464 			} else {
1465 				sample = cpu_to_le16(t16 + 1);
1466 				*fixup = sample;
1467 			}
1468 
1469 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1470 		}
1471 
1472 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1473 
1474 		do {
1475 			*++fixup = *ptr;
1476 			*ptr = sample;
1477 			ptr += SECTOR_SIZE / sizeof(short);
1478 		} while (ptr < end_data);
1479 
1480 		set_buffer_uptodate(bh);
1481 		mark_buffer_dirty(bh);
1482 		unlock_buffer(bh);
1483 
1484 		if (sync) {
1485 			int err2 = sync_dirty_buffer(bh);
1486 
1487 			if (!err && err2)
1488 				err = err2;
1489 		}
1490 
1491 		bytes -= op;
1492 		rhdr = Add2Ptr(rhdr, op);
1493 	}
1494 
1495 	return err;
1496 }
1497 
1498 /*
1499  * ntfs_bio_pages - Read/write pages from/to disk.
1500  */
ntfs_bio_pages(struct ntfs_sb_info * sbi,const struct runs_tree * run,struct page ** pages,u32 nr_pages,u64 vbo,u32 bytes,enum req_op op)1501 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1502 		   struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1503 		   enum req_op op)
1504 {
1505 	int err = 0;
1506 	struct bio *new, *bio = NULL;
1507 	struct super_block *sb = sbi->sb;
1508 	struct block_device *bdev = sb->s_bdev;
1509 	struct page *page;
1510 	u8 cluster_bits = sbi->cluster_bits;
1511 	CLST lcn, clen, vcn, vcn_next;
1512 	u32 add, off, page_idx;
1513 	u64 lbo, len;
1514 	size_t run_idx;
1515 	struct blk_plug plug;
1516 
1517 	if (!bytes)
1518 		return 0;
1519 
1520 	blk_start_plug(&plug);
1521 
1522 	/* Align vbo and bytes to be 512 bytes aligned. */
1523 	lbo = (vbo + bytes + 511) & ~511ull;
1524 	vbo = vbo & ~511ull;
1525 	bytes = lbo - vbo;
1526 
1527 	vcn = vbo >> cluster_bits;
1528 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1529 		err = -ENOENT;
1530 		goto out;
1531 	}
1532 	off = vbo & sbi->cluster_mask;
1533 	page_idx = 0;
1534 	page = pages[0];
1535 
1536 	for (;;) {
1537 		lbo = ((u64)lcn << cluster_bits) + off;
1538 		len = ((u64)clen << cluster_bits) - off;
1539 new_bio:
1540 		new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1541 		if (bio) {
1542 			bio_chain(bio, new);
1543 			submit_bio(bio);
1544 		}
1545 		bio = new;
1546 		bio->bi_iter.bi_sector = lbo >> 9;
1547 
1548 		while (len) {
1549 			off = vbo & (PAGE_SIZE - 1);
1550 			add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1551 
1552 			if (bio_add_page(bio, page, add, off) < add)
1553 				goto new_bio;
1554 
1555 			if (bytes <= add)
1556 				goto out;
1557 			bytes -= add;
1558 			vbo += add;
1559 
1560 			if (add + off == PAGE_SIZE) {
1561 				page_idx += 1;
1562 				if (WARN_ON(page_idx >= nr_pages)) {
1563 					err = -EINVAL;
1564 					goto out;
1565 				}
1566 				page = pages[page_idx];
1567 			}
1568 
1569 			if (len <= add)
1570 				break;
1571 			len -= add;
1572 			lbo += add;
1573 		}
1574 
1575 		vcn_next = vcn + clen;
1576 		if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1577 		    vcn != vcn_next) {
1578 			err = -ENOENT;
1579 			goto out;
1580 		}
1581 		off = 0;
1582 	}
1583 out:
1584 	if (bio) {
1585 		if (!err)
1586 			err = submit_bio_wait(bio);
1587 		bio_put(bio);
1588 	}
1589 	blk_finish_plug(&plug);
1590 
1591 	return err;
1592 }
1593 
1594 /*
1595  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1596  *
1597  * Fill on-disk logfile range by (-1)
1598  * this means empty logfile.
1599  */
ntfs_bio_fill_1(struct ntfs_sb_info * sbi,const struct runs_tree * run)1600 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1601 {
1602 	int err = 0;
1603 	struct super_block *sb = sbi->sb;
1604 	struct block_device *bdev = sb->s_bdev;
1605 	u8 cluster_bits = sbi->cluster_bits;
1606 	struct bio *new, *bio = NULL;
1607 	CLST lcn, clen;
1608 	u64 lbo, len;
1609 	size_t run_idx;
1610 	struct page *fill;
1611 	void *kaddr;
1612 	struct blk_plug plug;
1613 
1614 	fill = alloc_page(GFP_KERNEL);
1615 	if (!fill)
1616 		return -ENOMEM;
1617 
1618 	kaddr = kmap_atomic(fill);
1619 	memset(kaddr, -1, PAGE_SIZE);
1620 	kunmap_atomic(kaddr);
1621 	flush_dcache_page(fill);
1622 	lock_page(fill);
1623 
1624 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1625 		err = -ENOENT;
1626 		goto out;
1627 	}
1628 
1629 	/*
1630 	 * TODO: Try blkdev_issue_write_same.
1631 	 */
1632 	blk_start_plug(&plug);
1633 	do {
1634 		lbo = (u64)lcn << cluster_bits;
1635 		len = (u64)clen << cluster_bits;
1636 new_bio:
1637 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1638 		if (bio) {
1639 			bio_chain(bio, new);
1640 			submit_bio(bio);
1641 		}
1642 		bio = new;
1643 		bio->bi_iter.bi_sector = lbo >> 9;
1644 
1645 		for (;;) {
1646 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1647 
1648 			if (bio_add_page(bio, fill, add, 0) < add)
1649 				goto new_bio;
1650 
1651 			lbo += add;
1652 			if (len <= add)
1653 				break;
1654 			len -= add;
1655 		}
1656 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1657 
1658 	if (!err)
1659 		err = submit_bio_wait(bio);
1660 	bio_put(bio);
1661 
1662 	blk_finish_plug(&plug);
1663 out:
1664 	unlock_page(fill);
1665 	put_page(fill);
1666 
1667 	return err;
1668 }
1669 
ntfs_vbo_to_lbo(struct ntfs_sb_info * sbi,const struct runs_tree * run,u64 vbo,u64 * lbo,u64 * bytes)1670 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1671 		    u64 vbo, u64 *lbo, u64 *bytes)
1672 {
1673 	u32 off;
1674 	CLST lcn, len;
1675 	u8 cluster_bits = sbi->cluster_bits;
1676 
1677 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1678 		return -ENOENT;
1679 
1680 	off = vbo & sbi->cluster_mask;
1681 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1682 	*bytes = ((u64)len << cluster_bits) - off;
1683 
1684 	return 0;
1685 }
1686 
ntfs_new_inode(struct ntfs_sb_info * sbi,CLST rno,enum RECORD_FLAG flag)1687 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1688 				  enum RECORD_FLAG flag)
1689 {
1690 	int err = 0;
1691 	struct super_block *sb = sbi->sb;
1692 	struct inode *inode = new_inode(sb);
1693 	struct ntfs_inode *ni;
1694 
1695 	if (!inode)
1696 		return ERR_PTR(-ENOMEM);
1697 
1698 	ni = ntfs_i(inode);
1699 
1700 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1701 	if (err)
1702 		goto out;
1703 
1704 	inode->i_ino = rno;
1705 	if (insert_inode_locked(inode) < 0) {
1706 		err = -EIO;
1707 		goto out;
1708 	}
1709 
1710 out:
1711 	if (err) {
1712 		make_bad_inode(inode);
1713 		iput(inode);
1714 		ni = ERR_PTR(err);
1715 	}
1716 	return ni;
1717 }
1718 
1719 /*
1720  * O:BAG:BAD:(A;OICI;FA;;;WD)
1721  * Owner S-1-5-32-544 (Administrators)
1722  * Group S-1-5-32-544 (Administrators)
1723  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1724  */
1725 const u8 s_default_security[] __aligned(8) = {
1726 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1727 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1728 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1729 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1730 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1731 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1732 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1733 };
1734 
1735 static_assert(sizeof(s_default_security) == 0x50);
1736 
sid_length(const struct SID * sid)1737 static inline u32 sid_length(const struct SID *sid)
1738 {
1739 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1740 }
1741 
1742 /*
1743  * is_acl_valid
1744  *
1745  * Thanks Mark Harmstone for idea.
1746  */
is_acl_valid(const struct ACL * acl,u32 len)1747 static bool is_acl_valid(const struct ACL *acl, u32 len)
1748 {
1749 	const struct ACE_HEADER *ace;
1750 	u32 i;
1751 	u16 ace_count, ace_size;
1752 
1753 	if (acl->AclRevision != ACL_REVISION &&
1754 	    acl->AclRevision != ACL_REVISION_DS) {
1755 		/*
1756 		 * This value should be ACL_REVISION, unless the ACL contains an
1757 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1758 		 * All ACEs in an ACL must be at the same revision level.
1759 		 */
1760 		return false;
1761 	}
1762 
1763 	if (acl->Sbz1)
1764 		return false;
1765 
1766 	if (le16_to_cpu(acl->AclSize) > len)
1767 		return false;
1768 
1769 	if (acl->Sbz2)
1770 		return false;
1771 
1772 	len -= sizeof(struct ACL);
1773 	ace = (struct ACE_HEADER *)&acl[1];
1774 	ace_count = le16_to_cpu(acl->AceCount);
1775 
1776 	for (i = 0; i < ace_count; i++) {
1777 		if (len < sizeof(struct ACE_HEADER))
1778 			return false;
1779 
1780 		ace_size = le16_to_cpu(ace->AceSize);
1781 		if (len < ace_size)
1782 			return false;
1783 
1784 		len -= ace_size;
1785 		ace = Add2Ptr(ace, ace_size);
1786 	}
1787 
1788 	return true;
1789 }
1790 
is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 len)1791 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1792 {
1793 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1794 
1795 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1796 		return false;
1797 
1798 	if (sd->Revision != 1)
1799 		return false;
1800 
1801 	if (sd->Sbz1)
1802 		return false;
1803 
1804 	if (!(sd->Control & SE_SELF_RELATIVE))
1805 		return false;
1806 
1807 	sd_owner = le32_to_cpu(sd->Owner);
1808 	if (sd_owner) {
1809 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1810 
1811 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1812 			return false;
1813 
1814 		if (owner->Revision != 1)
1815 			return false;
1816 
1817 		if (sd_owner + sid_length(owner) > len)
1818 			return false;
1819 	}
1820 
1821 	sd_group = le32_to_cpu(sd->Group);
1822 	if (sd_group) {
1823 		const struct SID *group = Add2Ptr(sd, sd_group);
1824 
1825 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1826 			return false;
1827 
1828 		if (group->Revision != 1)
1829 			return false;
1830 
1831 		if (sd_group + sid_length(group) > len)
1832 			return false;
1833 	}
1834 
1835 	sd_sacl = le32_to_cpu(sd->Sacl);
1836 	if (sd_sacl) {
1837 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1838 
1839 		if (sd_sacl + sizeof(struct ACL) > len)
1840 			return false;
1841 
1842 		if (!is_acl_valid(sacl, len - sd_sacl))
1843 			return false;
1844 	}
1845 
1846 	sd_dacl = le32_to_cpu(sd->Dacl);
1847 	if (sd_dacl) {
1848 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1849 
1850 		if (sd_dacl + sizeof(struct ACL) > len)
1851 			return false;
1852 
1853 		if (!is_acl_valid(dacl, len - sd_dacl))
1854 			return false;
1855 	}
1856 
1857 	return true;
1858 }
1859 
1860 /*
1861  * ntfs_security_init - Load and parse $Secure.
1862  */
ntfs_security_init(struct ntfs_sb_info * sbi)1863 int ntfs_security_init(struct ntfs_sb_info *sbi)
1864 {
1865 	int err;
1866 	struct super_block *sb = sbi->sb;
1867 	struct inode *inode;
1868 	struct ntfs_inode *ni;
1869 	struct MFT_REF ref;
1870 	struct ATTRIB *attr;
1871 	struct ATTR_LIST_ENTRY *le;
1872 	u64 sds_size;
1873 	size_t off;
1874 	struct NTFS_DE *ne;
1875 	struct NTFS_DE_SII *sii_e;
1876 	struct ntfs_fnd *fnd_sii = NULL;
1877 	const struct INDEX_ROOT *root_sii;
1878 	const struct INDEX_ROOT *root_sdh;
1879 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1880 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1881 
1882 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1883 	ref.high = 0;
1884 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1885 
1886 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1887 	if (IS_ERR(inode)) {
1888 		err = PTR_ERR(inode);
1889 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1890 		inode = NULL;
1891 		goto out;
1892 	}
1893 
1894 	ni = ntfs_i(inode);
1895 
1896 	le = NULL;
1897 
1898 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1899 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1900 	if (!attr ||
1901 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1902 	    root_sdh->type != ATTR_ZERO ||
1903 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1904 	    offsetof(struct INDEX_ROOT, ihdr) +
1905 			    le32_to_cpu(root_sdh->ihdr.used) >
1906 		    le32_to_cpu(attr->res.data_size)) {
1907 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1908 		err = -EINVAL;
1909 		goto out;
1910 	}
1911 
1912 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1913 	if (err) {
1914 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1915 		goto out;
1916 	}
1917 
1918 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1919 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1920 	if (!attr ||
1921 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1922 	    root_sii->type != ATTR_ZERO ||
1923 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1924 	    offsetof(struct INDEX_ROOT, ihdr) +
1925 			    le32_to_cpu(root_sii->ihdr.used) >
1926 		    le32_to_cpu(attr->res.data_size)) {
1927 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1928 		err = -EINVAL;
1929 		goto out;
1930 	}
1931 
1932 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1933 	if (err) {
1934 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1935 		goto out;
1936 	}
1937 
1938 	fnd_sii = fnd_get();
1939 	if (!fnd_sii) {
1940 		err = -ENOMEM;
1941 		goto out;
1942 	}
1943 
1944 	sds_size = inode->i_size;
1945 
1946 	/* Find the last valid Id. */
1947 	sbi->security.next_id = SECURITY_ID_FIRST;
1948 	/* Always write new security at the end of bucket. */
1949 	sbi->security.next_off =
1950 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1951 
1952 	off = 0;
1953 	ne = NULL;
1954 
1955 	for (;;) {
1956 		u32 next_id;
1957 
1958 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1959 		if (err || !ne)
1960 			break;
1961 
1962 		sii_e = (struct NTFS_DE_SII *)ne;
1963 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1964 			continue;
1965 
1966 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1967 		if (next_id >= sbi->security.next_id)
1968 			sbi->security.next_id = next_id;
1969 	}
1970 
1971 	sbi->security.ni = ni;
1972 	inode = NULL;
1973 out:
1974 	iput(inode);
1975 	fnd_put(fnd_sii);
1976 
1977 	return err;
1978 }
1979 
1980 /*
1981  * ntfs_get_security_by_id - Read security descriptor by id.
1982  */
ntfs_get_security_by_id(struct ntfs_sb_info * sbi,__le32 security_id,struct SECURITY_DESCRIPTOR_RELATIVE ** sd,size_t * size)1983 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1984 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1985 			    size_t *size)
1986 {
1987 	int err;
1988 	int diff;
1989 	struct ntfs_inode *ni = sbi->security.ni;
1990 	struct ntfs_index *indx = &sbi->security.index_sii;
1991 	void *p = NULL;
1992 	struct NTFS_DE_SII *sii_e;
1993 	struct ntfs_fnd *fnd_sii;
1994 	struct SECURITY_HDR d_security;
1995 	const struct INDEX_ROOT *root_sii;
1996 	u32 t32;
1997 
1998 	*sd = NULL;
1999 
2000 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2001 
2002 	fnd_sii = fnd_get();
2003 	if (!fnd_sii) {
2004 		err = -ENOMEM;
2005 		goto out;
2006 	}
2007 
2008 	root_sii = indx_get_root(indx, ni, NULL, NULL);
2009 	if (!root_sii) {
2010 		err = -EINVAL;
2011 		goto out;
2012 	}
2013 
2014 	/* Try to find this SECURITY descriptor in SII indexes. */
2015 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2016 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2017 	if (err)
2018 		goto out;
2019 
2020 	if (diff)
2021 		goto out;
2022 
2023 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2024 	if (t32 < sizeof(struct SECURITY_HDR)) {
2025 		err = -EINVAL;
2026 		goto out;
2027 	}
2028 
2029 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2030 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2031 		err = -EFBIG;
2032 		goto out;
2033 	}
2034 
2035 	*size = t32 - sizeof(struct SECURITY_HDR);
2036 
2037 	p = kmalloc(*size, GFP_NOFS);
2038 	if (!p) {
2039 		err = -ENOMEM;
2040 		goto out;
2041 	}
2042 
2043 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2044 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2045 			       sizeof(d_security), NULL);
2046 	if (err)
2047 		goto out;
2048 
2049 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2050 		err = -EINVAL;
2051 		goto out;
2052 	}
2053 
2054 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2055 			       le64_to_cpu(sii_e->sec_hdr.off) +
2056 				       sizeof(struct SECURITY_HDR),
2057 			       p, *size, NULL);
2058 	if (err)
2059 		goto out;
2060 
2061 	*sd = p;
2062 	p = NULL;
2063 
2064 out:
2065 	kfree(p);
2066 	fnd_put(fnd_sii);
2067 	ni_unlock(ni);
2068 
2069 	return err;
2070 }
2071 
2072 /*
2073  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2074  *
2075  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2076  * and it contains a mirror copy of each security descriptor.  When writing
2077  * to a security descriptor at location X, another copy will be written at
2078  * location (X+256K).
2079  * When writing a security descriptor that will cross the 256K boundary,
2080  * the pointer will be advanced by 256K to skip
2081  * over the mirror portion.
2082  */
ntfs_insert_security(struct ntfs_sb_info * sbi,const struct SECURITY_DESCRIPTOR_RELATIVE * sd,u32 size_sd,__le32 * security_id,bool * inserted)2083 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2084 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2085 			 u32 size_sd, __le32 *security_id, bool *inserted)
2086 {
2087 	int err, diff;
2088 	struct ntfs_inode *ni = sbi->security.ni;
2089 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2090 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2091 	struct NTFS_DE_SDH *e;
2092 	struct NTFS_DE_SDH sdh_e;
2093 	struct NTFS_DE_SII sii_e;
2094 	struct SECURITY_HDR *d_security;
2095 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2096 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2097 	struct SECURITY_KEY hash_key;
2098 	struct ntfs_fnd *fnd_sdh = NULL;
2099 	const struct INDEX_ROOT *root_sdh;
2100 	const struct INDEX_ROOT *root_sii;
2101 	u64 mirr_off, new_sds_size;
2102 	u32 next, left;
2103 
2104 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2105 		      SecurityDescriptorsBlockSize);
2106 
2107 	hash_key.hash = security_hash(sd, size_sd);
2108 	hash_key.sec_id = SECURITY_ID_INVALID;
2109 
2110 	if (inserted)
2111 		*inserted = false;
2112 	*security_id = SECURITY_ID_INVALID;
2113 
2114 	/* Allocate a temporal buffer. */
2115 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2116 	if (!d_security)
2117 		return -ENOMEM;
2118 
2119 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2120 
2121 	fnd_sdh = fnd_get();
2122 	if (!fnd_sdh) {
2123 		err = -ENOMEM;
2124 		goto out;
2125 	}
2126 
2127 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2128 	if (!root_sdh) {
2129 		err = -EINVAL;
2130 		goto out;
2131 	}
2132 
2133 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2134 	if (!root_sii) {
2135 		err = -EINVAL;
2136 		goto out;
2137 	}
2138 
2139 	/*
2140 	 * Check if such security already exists.
2141 	 * Use "SDH" and hash -> to get the offset in "SDS".
2142 	 */
2143 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2144 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2145 			fnd_sdh);
2146 	if (err)
2147 		goto out;
2148 
2149 	while (e) {
2150 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2151 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2152 					       le64_to_cpu(e->sec_hdr.off),
2153 					       d_security, new_sec_size, NULL);
2154 			if (err)
2155 				goto out;
2156 
2157 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2158 			    d_security->key.hash == hash_key.hash &&
2159 			    !memcmp(d_security + 1, sd, size_sd)) {
2160 				/* Such security already exists. */
2161 				*security_id = d_security->key.sec_id;
2162 				err = 0;
2163 				goto out;
2164 			}
2165 		}
2166 
2167 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2168 				     (struct NTFS_DE **)&e, fnd_sdh);
2169 		if (err)
2170 			goto out;
2171 
2172 		if (!e || e->key.hash != hash_key.hash)
2173 			break;
2174 	}
2175 
2176 	/* Zero unused space. */
2177 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2178 	left = SecurityDescriptorsBlockSize - next;
2179 
2180 	/* Zero gap until SecurityDescriptorsBlockSize. */
2181 	if (left < new_sec_size) {
2182 		/* Zero "left" bytes from sbi->security.next_off. */
2183 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2184 	}
2185 
2186 	/* Zero tail of previous security. */
2187 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2188 
2189 	/*
2190 	 * Example:
2191 	 * 0x40438 == ni->vfs_inode.i_size
2192 	 * 0x00440 == sbi->security.next_off
2193 	 * need to zero [0x438-0x440)
2194 	 * if (next > used) {
2195 	 *  u32 tozero = next - used;
2196 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2197 	 */
2198 
2199 	/* Format new security descriptor. */
2200 	d_security->key.hash = hash_key.hash;
2201 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2202 	d_security->off = cpu_to_le64(sbi->security.next_off);
2203 	d_security->size = cpu_to_le32(new_sec_size);
2204 	memcpy(d_security + 1, sd, size_sd);
2205 
2206 	/* Write main SDS bucket. */
2207 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2208 				d_security, aligned_sec_size, 0);
2209 
2210 	if (err)
2211 		goto out;
2212 
2213 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2214 	new_sds_size = mirr_off + aligned_sec_size;
2215 
2216 	if (new_sds_size > ni->vfs_inode.i_size) {
2217 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2218 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2219 				    new_sds_size, &new_sds_size, false, NULL);
2220 		if (err)
2221 			goto out;
2222 	}
2223 
2224 	/* Write copy SDS bucket. */
2225 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2226 				aligned_sec_size, 0);
2227 	if (err)
2228 		goto out;
2229 
2230 	/* Fill SII entry. */
2231 	sii_e.de.view.data_off =
2232 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2233 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2234 	sii_e.de.view.res = 0;
2235 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2236 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2237 	sii_e.de.flags = 0;
2238 	sii_e.de.res = 0;
2239 	sii_e.sec_id = d_security->key.sec_id;
2240 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2241 
2242 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2243 	if (err)
2244 		goto out;
2245 
2246 	/* Fill SDH entry. */
2247 	sdh_e.de.view.data_off =
2248 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2249 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2250 	sdh_e.de.view.res = 0;
2251 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2252 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2253 	sdh_e.de.flags = 0;
2254 	sdh_e.de.res = 0;
2255 	sdh_e.key.hash = d_security->key.hash;
2256 	sdh_e.key.sec_id = d_security->key.sec_id;
2257 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2258 	sdh_e.magic[0] = cpu_to_le16('I');
2259 	sdh_e.magic[1] = cpu_to_le16('I');
2260 
2261 	fnd_clear(fnd_sdh);
2262 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2263 				fnd_sdh, 0);
2264 	if (err)
2265 		goto out;
2266 
2267 	*security_id = d_security->key.sec_id;
2268 	if (inserted)
2269 		*inserted = true;
2270 
2271 	/* Update Id and offset for next descriptor. */
2272 	sbi->security.next_id += 1;
2273 	sbi->security.next_off += aligned_sec_size;
2274 
2275 out:
2276 	fnd_put(fnd_sdh);
2277 	mark_inode_dirty(&ni->vfs_inode);
2278 	ni_unlock(ni);
2279 	kfree(d_security);
2280 
2281 	return err;
2282 }
2283 
2284 /*
2285  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2286  */
ntfs_reparse_init(struct ntfs_sb_info * sbi)2287 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2288 {
2289 	int err;
2290 	struct ntfs_inode *ni = sbi->reparse.ni;
2291 	struct ntfs_index *indx = &sbi->reparse.index_r;
2292 	struct ATTRIB *attr;
2293 	struct ATTR_LIST_ENTRY *le;
2294 	const struct INDEX_ROOT *root_r;
2295 
2296 	if (!ni)
2297 		return 0;
2298 
2299 	le = NULL;
2300 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2301 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2302 	if (!attr) {
2303 		err = -EINVAL;
2304 		goto out;
2305 	}
2306 
2307 	root_r = resident_data(attr);
2308 	if (root_r->type != ATTR_ZERO ||
2309 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2310 		err = -EINVAL;
2311 		goto out;
2312 	}
2313 
2314 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2315 	if (err)
2316 		goto out;
2317 
2318 out:
2319 	return err;
2320 }
2321 
2322 /*
2323  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2324  */
ntfs_objid_init(struct ntfs_sb_info * sbi)2325 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2326 {
2327 	int err;
2328 	struct ntfs_inode *ni = sbi->objid.ni;
2329 	struct ntfs_index *indx = &sbi->objid.index_o;
2330 	struct ATTRIB *attr;
2331 	struct ATTR_LIST_ENTRY *le;
2332 	const struct INDEX_ROOT *root;
2333 
2334 	if (!ni)
2335 		return 0;
2336 
2337 	le = NULL;
2338 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2339 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2340 	if (!attr) {
2341 		err = -EINVAL;
2342 		goto out;
2343 	}
2344 
2345 	root = resident_data(attr);
2346 	if (root->type != ATTR_ZERO ||
2347 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2348 		err = -EINVAL;
2349 		goto out;
2350 	}
2351 
2352 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2353 	if (err)
2354 		goto out;
2355 
2356 out:
2357 	return err;
2358 }
2359 
ntfs_objid_remove(struct ntfs_sb_info * sbi,struct GUID * guid)2360 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2361 {
2362 	int err;
2363 	struct ntfs_inode *ni = sbi->objid.ni;
2364 	struct ntfs_index *indx = &sbi->objid.index_o;
2365 
2366 	if (!ni)
2367 		return -EINVAL;
2368 
2369 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2370 
2371 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2372 
2373 	mark_inode_dirty(&ni->vfs_inode);
2374 	ni_unlock(ni);
2375 
2376 	return err;
2377 }
2378 
ntfs_insert_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2379 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2380 			const struct MFT_REF *ref)
2381 {
2382 	int err;
2383 	struct ntfs_inode *ni = sbi->reparse.ni;
2384 	struct ntfs_index *indx = &sbi->reparse.index_r;
2385 	struct NTFS_DE_R re;
2386 
2387 	if (!ni)
2388 		return -EINVAL;
2389 
2390 	memset(&re, 0, sizeof(re));
2391 
2392 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2393 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2394 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2395 
2396 	re.key.ReparseTag = rtag;
2397 	memcpy(&re.key.ref, ref, sizeof(*ref));
2398 
2399 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2400 
2401 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2402 
2403 	mark_inode_dirty(&ni->vfs_inode);
2404 	ni_unlock(ni);
2405 
2406 	return err;
2407 }
2408 
ntfs_remove_reparse(struct ntfs_sb_info * sbi,__le32 rtag,const struct MFT_REF * ref)2409 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2410 			const struct MFT_REF *ref)
2411 {
2412 	int err, diff;
2413 	struct ntfs_inode *ni = sbi->reparse.ni;
2414 	struct ntfs_index *indx = &sbi->reparse.index_r;
2415 	struct ntfs_fnd *fnd = NULL;
2416 	struct REPARSE_KEY rkey;
2417 	struct NTFS_DE_R *re;
2418 	struct INDEX_ROOT *root_r;
2419 
2420 	if (!ni)
2421 		return -EINVAL;
2422 
2423 	rkey.ReparseTag = rtag;
2424 	rkey.ref = *ref;
2425 
2426 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2427 
2428 	if (rtag) {
2429 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2430 		goto out1;
2431 	}
2432 
2433 	fnd = fnd_get();
2434 	if (!fnd) {
2435 		err = -ENOMEM;
2436 		goto out1;
2437 	}
2438 
2439 	root_r = indx_get_root(indx, ni, NULL, NULL);
2440 	if (!root_r) {
2441 		err = -EINVAL;
2442 		goto out;
2443 	}
2444 
2445 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2446 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2447 			(struct NTFS_DE **)&re, fnd);
2448 	if (err)
2449 		goto out;
2450 
2451 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2452 		/* Impossible. Looks like volume corrupt? */
2453 		goto out;
2454 	}
2455 
2456 	memcpy(&rkey, &re->key, sizeof(rkey));
2457 
2458 	fnd_put(fnd);
2459 	fnd = NULL;
2460 
2461 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2462 	if (err)
2463 		goto out;
2464 
2465 out:
2466 	fnd_put(fnd);
2467 
2468 out1:
2469 	mark_inode_dirty(&ni->vfs_inode);
2470 	ni_unlock(ni);
2471 
2472 	return err;
2473 }
2474 
ntfs_unmap_and_discard(struct ntfs_sb_info * sbi,CLST lcn,CLST len)2475 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2476 					  CLST len)
2477 {
2478 	ntfs_unmap_meta(sbi->sb, lcn, len);
2479 	ntfs_discard(sbi, lcn, len);
2480 }
2481 
mark_as_free_ex(struct ntfs_sb_info * sbi,CLST lcn,CLST len,bool trim)2482 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2483 {
2484 	CLST end, i, zone_len, zlen;
2485 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2486 	bool dirty = false;
2487 
2488 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2489 	if (!wnd_is_used(wnd, lcn, len)) {
2490 		/* mark volume as dirty out of wnd->rw_lock */
2491 		dirty = true;
2492 
2493 		end = lcn + len;
2494 		len = 0;
2495 		for (i = lcn; i < end; i++) {
2496 			if (wnd_is_used(wnd, i, 1)) {
2497 				if (!len)
2498 					lcn = i;
2499 				len += 1;
2500 				continue;
2501 			}
2502 
2503 			if (!len)
2504 				continue;
2505 
2506 			if (trim)
2507 				ntfs_unmap_and_discard(sbi, lcn, len);
2508 
2509 			wnd_set_free(wnd, lcn, len);
2510 			len = 0;
2511 		}
2512 
2513 		if (!len)
2514 			goto out;
2515 	}
2516 
2517 	if (trim)
2518 		ntfs_unmap_and_discard(sbi, lcn, len);
2519 	wnd_set_free(wnd, lcn, len);
2520 
2521 	/* append to MFT zone, if possible. */
2522 	zone_len = wnd_zone_len(wnd);
2523 	zlen = min(zone_len + len, sbi->zone_max);
2524 
2525 	if (zlen == zone_len) {
2526 		/* MFT zone already has maximum size. */
2527 	} else if (!zone_len) {
2528 		/* Create MFT zone only if 'zlen' is large enough. */
2529 		if (zlen == sbi->zone_max)
2530 			wnd_zone_set(wnd, lcn, zlen);
2531 	} else {
2532 		CLST zone_lcn = wnd_zone_bit(wnd);
2533 
2534 		if (lcn + len == zone_lcn) {
2535 			/* Append into head MFT zone. */
2536 			wnd_zone_set(wnd, lcn, zlen);
2537 		} else if (zone_lcn + zone_len == lcn) {
2538 			/* Append into tail MFT zone. */
2539 			wnd_zone_set(wnd, zone_lcn, zlen);
2540 		}
2541 	}
2542 
2543 out:
2544 	up_write(&wnd->rw_lock);
2545 	if (dirty)
2546 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2547 }
2548 
2549 /*
2550  * run_deallocate - Deallocate clusters.
2551  */
run_deallocate(struct ntfs_sb_info * sbi,const struct runs_tree * run,bool trim)2552 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2553 		   bool trim)
2554 {
2555 	CLST lcn, len;
2556 	size_t idx = 0;
2557 
2558 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2559 		if (lcn == SPARSE_LCN)
2560 			continue;
2561 
2562 		mark_as_free_ex(sbi, lcn, len, trim);
2563 	}
2564 
2565 	return 0;
2566 }
2567 
name_has_forbidden_chars(const struct le_str * fname)2568 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2569 {
2570 	int i, ch;
2571 
2572 	/* check for forbidden chars */
2573 	for (i = 0; i < fname->len; ++i) {
2574 		ch = le16_to_cpu(fname->name[i]);
2575 
2576 		/* control chars */
2577 		if (ch < 0x20)
2578 			return true;
2579 
2580 		switch (ch) {
2581 		/* disallowed by Windows */
2582 		case '\\':
2583 		case '/':
2584 		case ':':
2585 		case '*':
2586 		case '?':
2587 		case '<':
2588 		case '>':
2589 		case '|':
2590 		case '\"':
2591 			return true;
2592 
2593 		default:
2594 			/* allowed char */
2595 			break;
2596 		}
2597 	}
2598 
2599 	/* file names cannot end with space or . */
2600 	if (fname->len > 0) {
2601 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2602 		if (ch == ' ' || ch == '.')
2603 			return true;
2604 	}
2605 
2606 	return false;
2607 }
2608 
is_reserved_name(const struct ntfs_sb_info * sbi,const struct le_str * fname)2609 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2610 				    const struct le_str *fname)
2611 {
2612 	int port_digit;
2613 	const __le16 *name = fname->name;
2614 	int len = fname->len;
2615 	const u16 *upcase = sbi->upcase;
2616 
2617 	/* check for 3 chars reserved names (device names) */
2618 	/* name by itself or with any extension is forbidden */
2619 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2620 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2621 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2622 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2623 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2624 			return true;
2625 
2626 	/* check for 4 chars reserved names (port name followed by 1..9) */
2627 	/* name by itself or with any extension is forbidden */
2628 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2629 		port_digit = le16_to_cpu(name[3]);
2630 		if (port_digit >= '1' && port_digit <= '9')
2631 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2632 					    false) ||
2633 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2634 					    false))
2635 				return true;
2636 	}
2637 
2638 	return false;
2639 }
2640 
2641 /*
2642  * valid_windows_name - Check if a file name is valid in Windows.
2643  */
valid_windows_name(struct ntfs_sb_info * sbi,const struct le_str * fname)2644 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2645 {
2646 	return !name_has_forbidden_chars(fname) &&
2647 	       !is_reserved_name(sbi, fname);
2648 }
2649 
2650 /*
2651  * ntfs_set_label - updates current ntfs label.
2652  */
ntfs_set_label(struct ntfs_sb_info * sbi,u8 * label,int len)2653 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2654 {
2655 	int err;
2656 	struct ATTRIB *attr;
2657 	u32 uni_bytes;
2658 	struct ntfs_inode *ni = sbi->volume.ni;
2659 	/* Allocate PATH_MAX bytes. */
2660 	struct cpu_str *uni = __getname();
2661 
2662 	if (!uni)
2663 		return -ENOMEM;
2664 
2665 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2666 				UTF16_LITTLE_ENDIAN);
2667 	if (err < 0)
2668 		goto out;
2669 
2670 	uni_bytes = uni->len * sizeof(u16);
2671 	if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
2672 		ntfs_warn(sbi->sb, "new label is too long");
2673 		err = -EFBIG;
2674 		goto out;
2675 	}
2676 
2677 	ni_lock(ni);
2678 
2679 	/* Ignore any errors. */
2680 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2681 
2682 	err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
2683 				 NULL, NULL);
2684 	if (err < 0)
2685 		goto unlock_out;
2686 
2687 	/* write new label in on-disk struct. */
2688 	memcpy(resident_data(attr), uni->name, uni_bytes);
2689 
2690 	/* update cached value of current label. */
2691 	if (len >= ARRAY_SIZE(sbi->volume.label))
2692 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2693 	memcpy(sbi->volume.label, label, len);
2694 	sbi->volume.label[len] = 0;
2695 	mark_inode_dirty_sync(&ni->vfs_inode);
2696 
2697 unlock_out:
2698 	ni_unlock(ni);
2699 
2700 	if (!err)
2701 		err = _ni_write_inode(&ni->vfs_inode, 0);
2702 
2703 out:
2704 	__putname(uni);
2705 	return err;
2706 }
2707