xref: /linux/fs/ntfs3/fsntfs.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
452 bool ntfs_check_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen,
453 			   bool da)
454 {
455 	size_t free, zlen, avail;
456 	struct wnd_bitmap *wnd;
457 	CLST da_clusters = ntfs_get_da(sbi);
458 
459 	wnd = &sbi->used.bitmap;
460 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
461 	free = wnd_zeroes(wnd);
462 
463 	if (free >= da_clusters) {
464 		free -= da_clusters;
465 	} else {
466 		free = 0;
467 	}
468 
469 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
470 	up_read(&wnd->rw_lock);
471 
472 	if (free < zlen + clen) {
473 		return false;
474 	}
475 
476 	avail = free - (zlen + clen);
477 
478 	/*
479 	 * When delalloc is active then keep in mind some reserved space.
480 	 * The worst case: 1 mft record per each ~500 clusters.
481 	 */
482 	if (da) {
483 		/* 1 mft record per each 1024 clusters. */
484 		mlen += da_clusters >> 10;
485 	}
486 
487 	if (mlen || !avail) {
488 		wnd = &sbi->mft.bitmap;
489 		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
490 		free = wnd_zeroes(wnd);
491 		zlen = wnd_zone_len(wnd);
492 		up_read(&wnd->rw_lock);
493 
494 		if (free < zlen + mlen &&
495 		    avail < bytes_to_cluster(sbi, mlen << sbi->record_bits)) {
496 			return false;
497 		}
498 	}
499 
500 	return true;
501 }
502 
503 /*
504  * ntfs_extend_mft - Allocate additional MFT records.
505  *
506  * sbi->mft.bitmap is locked for write.
507  *
508  * NOTE: recursive:
509  *	ntfs_look_free_mft ->
510  *	ntfs_extend_mft ->
511  *	attr_set_size ->
512  *	ni_insert_nonresident ->
513  *	ni_insert_attr ->
514  *	ni_ins_attr_ext ->
515  *	ntfs_look_free_mft ->
516  *	ntfs_extend_mft
517  *
518  * To avoid recursive always allocate space for two new MFT records
519  * see attrib.c: "at least two MFT to avoid recursive loop".
520  */
521 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
522 {
523 	int err;
524 	struct ntfs_inode *ni = sbi->mft.ni;
525 	size_t new_mft_total;
526 	u64 new_mft_bytes, new_bitmap_bytes;
527 	struct ATTRIB *attr;
528 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
529 
530 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
531 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
532 
533 	/* Step 1: Resize $MFT::DATA. */
534 	down_write(&ni->file.run_lock);
535 	err = attr_set_size_ex(ni, ATTR_DATA, NULL, 0, &ni->file.run,
536 			       new_mft_bytes, NULL, false, &attr, false);
537 
538 	if (err) {
539 		up_write(&ni->file.run_lock);
540 		goto out;
541 	}
542 
543 	attr->nres.valid_size = attr->nres.data_size;
544 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
545 	ni->mi.dirty = true;
546 
547 	/* Step 2: Resize $MFT::BITMAP. */
548 	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
549 
550 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
551 			    new_bitmap_bytes, &new_bitmap_bytes, true);
552 
553 	/* Refresh MFT Zone if necessary. */
554 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
555 
556 	ntfs_refresh_zone(sbi);
557 
558 	up_write(&sbi->used.bitmap.rw_lock);
559 	up_write(&ni->file.run_lock);
560 
561 	if (err)
562 		goto out;
563 
564 	err = wnd_extend(wnd, new_mft_total);
565 
566 	if (err)
567 		goto out;
568 
569 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
570 
571 	err = _ni_write_inode(&ni->vfs_inode, 0);
572 out:
573 	return err;
574 }
575 
576 /*
577  * ntfs_look_free_mft - Look for a free MFT record.
578  */
579 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
580 		       struct ntfs_inode *ni, struct mft_inode **mi)
581 {
582 	int err = 0;
583 	size_t zbit, zlen, from, to, fr;
584 	size_t mft_total;
585 	struct MFT_REF ref;
586 	struct super_block *sb = sbi->sb;
587 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
588 	u32 ir;
589 
590 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
591 		      MFT_REC_FREE - MFT_REC_RESERVED);
592 
593 	if (!mft)
594 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
595 
596 	zlen = wnd_zone_len(wnd);
597 
598 	/* Always reserve space for MFT. */
599 	if (zlen) {
600 		if (mft) {
601 			zbit = wnd_zone_bit(wnd);
602 			*rno = zbit;
603 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
604 		}
605 		goto found;
606 	}
607 
608 	/* No MFT zone. Find the nearest to '0' free MFT. */
609 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
610 		/* Resize MFT */
611 		mft_total = wnd->nbits;
612 
613 		err = ntfs_extend_mft(sbi);
614 		if (!err) {
615 			zbit = mft_total;
616 			goto reserve_mft;
617 		}
618 
619 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
620 			goto out;
621 
622 		err = 0;
623 
624 		/*
625 		 * Look for free record reserved area [11-16) ==
626 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
627 		 * marks it as used.
628 		 */
629 		if (!sbi->mft.reserved_bitmap) {
630 			/* Once per session create internal bitmap for 5 bits. */
631 			sbi->mft.reserved_bitmap = 0xFF;
632 
633 			ref.high = 0;
634 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
635 				struct inode *i;
636 				struct ntfs_inode *ni;
637 				struct MFT_REC *mrec;
638 
639 				ref.low = cpu_to_le32(ir);
640 				ref.seq = cpu_to_le16(ir);
641 
642 				i = ntfs_iget5(sb, &ref, NULL);
643 				if (IS_ERR(i)) {
644 next:
645 					ntfs_notice(
646 						sb,
647 						"Invalid reserved record %x",
648 						ref.low);
649 					continue;
650 				}
651 				if (is_bad_inode(i)) {
652 					iput(i);
653 					goto next;
654 				}
655 
656 				ni = ntfs_i(i);
657 
658 				mrec = ni->mi.mrec;
659 
660 				if (!is_rec_base(mrec))
661 					goto next;
662 
663 				if (mrec->hard_links)
664 					goto next;
665 
666 				if (!ni_std(ni))
667 					goto next;
668 
669 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
670 						 NULL, 0, NULL, NULL))
671 					goto next;
672 
673 				__clear_bit(ir - MFT_REC_RESERVED,
674 					    &sbi->mft.reserved_bitmap);
675 			}
676 		}
677 
678 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
679 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
680 					  MFT_REC_FREE, MFT_REC_RESERVED);
681 		if (zbit >= MFT_REC_FREE) {
682 			sbi->mft.next_reserved = MFT_REC_FREE;
683 			goto out;
684 		}
685 
686 		zlen = 1;
687 		sbi->mft.next_reserved = zbit;
688 	} else {
689 reserve_mft:
690 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
691 		if (zbit + zlen > wnd->nbits)
692 			zlen = wnd->nbits - zbit;
693 
694 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
695 			zlen -= 1;
696 
697 		/* [zbit, zbit + zlen) will be used for MFT itself. */
698 		from = sbi->mft.used;
699 		if (from < zbit)
700 			from = zbit;
701 		to = zbit + zlen;
702 		if (from < to) {
703 			ntfs_clear_mft_tail(sbi, from, to);
704 			sbi->mft.used = to;
705 		}
706 	}
707 
708 	if (mft) {
709 		*rno = zbit;
710 		zbit += 1;
711 		zlen -= 1;
712 	}
713 
714 	wnd_zone_set(wnd, zbit, zlen);
715 
716 found:
717 	if (!mft) {
718 		/* The request to get record for general purpose. */
719 		if (sbi->mft.next_free < MFT_REC_USER)
720 			sbi->mft.next_free = MFT_REC_USER;
721 
722 		for (;;) {
723 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
724 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
725 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
726 			} else {
727 				*rno = fr;
728 				sbi->mft.next_free = *rno + 1;
729 				break;
730 			}
731 
732 			err = ntfs_extend_mft(sbi);
733 			if (err)
734 				goto out;
735 		}
736 	}
737 
738 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
739 		err = -ENOMEM;
740 		goto out;
741 	}
742 
743 	/* We have found a record that are not reserved for next MFT. */
744 	if (*rno >= MFT_REC_FREE)
745 		wnd_set_used(wnd, *rno, 1);
746 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
747 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
748 
749 out:
750 	if (!mft)
751 		up_write(&wnd->rw_lock);
752 
753 	return err;
754 }
755 
756 /*
757  * ntfs_mark_rec_free - Mark record as free.
758  * is_mft - true if we are changing MFT
759  */
760 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
761 {
762 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
763 
764 	if (!is_mft)
765 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
766 	if (rno >= wnd->nbits)
767 		goto out;
768 
769 	if (rno >= MFT_REC_FREE) {
770 		if (!wnd_is_used(wnd, rno, 1))
771 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
772 		else
773 			wnd_set_free(wnd, rno, 1);
774 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
775 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
776 	}
777 
778 	if (rno < wnd_zone_bit(wnd))
779 		wnd_zone_set(wnd, rno, 1);
780 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
781 		sbi->mft.next_free = rno;
782 
783 out:
784 	if (!is_mft)
785 		up_write(&wnd->rw_lock);
786 }
787 
788 /*
789  * ntfs_clear_mft_tail - Format empty records [from, to).
790  *
791  * sbi->mft.bitmap is locked for write.
792  */
793 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
794 {
795 	int err;
796 	u32 rs;
797 	u64 vbo;
798 	struct runs_tree *run;
799 	struct ntfs_inode *ni;
800 
801 	if (from >= to)
802 		return 0;
803 
804 	rs = sbi->record_size;
805 	ni = sbi->mft.ni;
806 	run = &ni->file.run;
807 
808 	down_read(&ni->file.run_lock);
809 	vbo = (u64)from * rs;
810 	for (; from < to; from++, vbo += rs) {
811 		struct ntfs_buffers nb;
812 
813 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
814 		if (err)
815 			goto out;
816 
817 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
818 		nb_put(&nb);
819 		if (err)
820 			goto out;
821 	}
822 
823 out:
824 	sbi->mft.used = from;
825 	up_read(&ni->file.run_lock);
826 	return err;
827 }
828 
829 /*
830  * ntfs_refresh_zone - Refresh MFT zone.
831  *
832  * sbi->used.bitmap is locked for rw.
833  * sbi->mft.bitmap is locked for write.
834  * sbi->mft.ni->file.run_lock for write.
835  */
836 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
837 {
838 	CLST lcn, vcn, len;
839 	size_t lcn_s, zlen;
840 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
841 	struct ntfs_inode *ni = sbi->mft.ni;
842 
843 	/* Do not change anything unless we have non empty MFT zone. */
844 	if (wnd_zone_len(wnd))
845 		return 0;
846 
847 	vcn = bytes_to_cluster(sbi,
848 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
849 
850 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
851 		lcn = SPARSE_LCN;
852 
853 	/* We should always find Last Lcn for MFT. */
854 	if (lcn == SPARSE_LCN)
855 		return -EINVAL;
856 
857 	lcn_s = lcn + 1;
858 
859 	/* Try to allocate clusters after last MFT run. */
860 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
861 	wnd_zone_set(wnd, lcn_s, zlen);
862 
863 	return 0;
864 }
865 
866 /*
867  * ntfs_update_mftmirr - Update $MFTMirr data.
868  */
869 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi)
870 {
871 	struct super_block *sb = sbi->sb;
872 	u32 blocksize, bytes;
873 	sector_t block1, block2;
874 
875 	/*
876 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
877 	 */
878 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
879 	    unlikely(ntfs3_forced_shutdown(sb)))
880 		return;
881 
882 	blocksize = sb->s_blocksize;
883 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
884 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
885 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
886 
887 	for (; bytes >= blocksize; bytes -= blocksize) {
888 		struct buffer_head *bh1, *bh2;
889 
890 		bh1 = sb_bread(sb, block1++);
891 		if (!bh1)
892 			return;
893 
894 		bh2 = sb_getblk(sb, block2++);
895 		if (!bh2) {
896 			put_bh(bh1);
897 			return;
898 		}
899 
900 		wait_on_buffer(bh2);
901 		lock_buffer(bh2);
902 		memcpy(bh2->b_data, bh1->b_data, blocksize);
903 		set_buffer_uptodate(bh2);
904 		mark_buffer_dirty(bh2);
905 		unlock_buffer(bh2);
906 
907 		put_bh(bh1);
908 		bh1 = NULL;
909 		put_bh(bh2);
910 	}
911 
912 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
913 }
914 
915 /*
916  * ntfs_bad_inode
917  *
918  * Marks inode as bad and marks fs as 'dirty'
919  */
920 void ntfs_bad_inode(struct inode *inode, const char *hint)
921 {
922 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
923 	struct ntfs_inode *ni = ntfs_i(inode);
924 
925 	ntfs_inode_err(inode, "%s", hint);
926 
927 	/* Do not call make_bad_inode()! */
928 	ni->ni_bad = true;
929 
930 	/* Avoid recursion if bad inode is $Volume. */
931 	if (inode->i_ino != MFT_REC_VOL &&
932 	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
933 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
934 	}
935 }
936 
937 /*
938  * ntfs_set_state
939  *
940  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
941  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
942  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
943  */
944 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
945 {
946 	int err;
947 	struct ATTRIB *attr;
948 	struct VOLUME_INFO *info;
949 	struct mft_inode *mi;
950 	struct ntfs_inode *ni;
951 	__le16 info_flags;
952 
953 	/*
954 	 * Do not change state if fs was real_dirty.
955 	 * Do not change state if fs already dirty(clear).
956 	 * Do not change any thing if mounted read only.
957 	 */
958 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
959 		return 0;
960 
961 	/* Check cached value. */
962 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
963 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
964 		return 0;
965 
966 	ni = sbi->volume.ni;
967 	if (!ni)
968 		return -EINVAL;
969 
970 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
971 
972 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
973 	if (!attr) {
974 		err = -EINVAL;
975 		goto out;
976 	}
977 
978 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
979 	if (!info) {
980 		err = -EINVAL;
981 		goto out;
982 	}
983 
984 	info_flags = info->flags;
985 
986 	switch (dirty) {
987 	case NTFS_DIRTY_ERROR:
988 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
989 		sbi->volume.real_dirty = true;
990 		fallthrough;
991 	case NTFS_DIRTY_DIRTY:
992 		info->flags |= VOLUME_FLAG_DIRTY;
993 		break;
994 	case NTFS_DIRTY_CLEAR:
995 		info->flags &= ~VOLUME_FLAG_DIRTY;
996 		break;
997 	}
998 	/* Cache current volume flags. */
999 	if (info_flags != info->flags) {
1000 		sbi->volume.flags = info->flags;
1001 		mi->dirty = true;
1002 	}
1003 	err = 0;
1004 
1005 out:
1006 	ni_unlock(ni);
1007 	if (err)
1008 		return err;
1009 
1010 	mark_inode_dirty_sync(&ni->vfs_inode);
1011 	/* verify(!ntfs_update_mftmirr()); */
1012 
1013 	/* write mft record on disk. */
1014 	err = _ni_write_inode(&ni->vfs_inode, 1);
1015 
1016 	return err;
1017 }
1018 
1019 /*
1020  * security_hash - Calculates a hash of security descriptor.
1021  */
1022 static inline __le32 security_hash(const void *sd, size_t bytes)
1023 {
1024 	u32 hash = 0;
1025 	const __le32 *ptr = sd;
1026 
1027 	bytes >>= 2;
1028 	while (bytes--)
1029 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1030 	return cpu_to_le32(hash);
1031 }
1032 
1033 /*
1034  * simple wrapper for sb_bread_unmovable.
1035  */
1036 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1037 {
1038 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1039 	struct buffer_head *bh;
1040 
1041 	if (unlikely(block >= sbi->volume.blocks)) {
1042 		/* prevent generic message "attempt to access beyond end of device" */
1043 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1044 			 (u64)block << sb->s_blocksize_bits);
1045 		return NULL;
1046 	}
1047 
1048 	bh = sb_bread_unmovable(sb, block);
1049 	if (bh)
1050 		return bh;
1051 
1052 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1053 		 (u64)block << sb->s_blocksize_bits);
1054 	return NULL;
1055 }
1056 
1057 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1058 		  const void *buf, int wait)
1059 {
1060 	u32 blocksize = sb->s_blocksize;
1061 	struct block_device *bdev = sb->s_bdev;
1062 	sector_t block = lbo >> sb->s_blocksize_bits;
1063 	u32 off = lbo & (blocksize - 1);
1064 	u32 op = blocksize - off;
1065 	struct buffer_head *bh;
1066 
1067 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1068 		wait = 1;
1069 
1070 	for (; bytes; block += 1, off = 0, op = blocksize) {
1071 		if (op > bytes)
1072 			op = bytes;
1073 
1074 		if (op < blocksize) {
1075 			bh = __bread(bdev, block, blocksize);
1076 			if (!bh) {
1077 				ntfs_err(sb, "failed to read block %llx",
1078 					 (u64)block);
1079 				return -EIO;
1080 			}
1081 		} else {
1082 			bh = __getblk(bdev, block, blocksize);
1083 			if (!bh)
1084 				return -ENOMEM;
1085 		}
1086 
1087 		wait_on_buffer(bh);
1088 		lock_buffer(bh);
1089 		if (buf) {
1090 			memcpy(bh->b_data + off, buf, op);
1091 			buf = Add2Ptr(buf, op);
1092 		} else {
1093 			memset(bh->b_data + off, -1, op);
1094 		}
1095 
1096 		set_buffer_uptodate(bh);
1097 		mark_buffer_dirty(bh);
1098 		unlock_buffer(bh);
1099 
1100 		if (wait) {
1101 			int err = sync_dirty_buffer(bh);
1102 
1103 			if (err) {
1104 				ntfs_err(
1105 					sb,
1106 					"failed to sync buffer at block %llx, error %d",
1107 					(u64)block, err);
1108 				put_bh(bh);
1109 				return err;
1110 			}
1111 		}
1112 
1113 		put_bh(bh);
1114 
1115 		bytes -= op;
1116 	}
1117 	return 0;
1118 }
1119 
1120 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1121 		      u64 vbo, const void *buf, size_t bytes, int sync)
1122 {
1123 	struct super_block *sb = sbi->sb;
1124 	u8 cluster_bits = sbi->cluster_bits;
1125 	u32 off = vbo & sbi->cluster_mask;
1126 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1127 	u64 lbo, len;
1128 	size_t idx;
1129 
1130 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1131 		return -ENOENT;
1132 
1133 	if (lcn == SPARSE_LCN)
1134 		return -EINVAL;
1135 
1136 	lbo = ((u64)lcn << cluster_bits) + off;
1137 	len = ((u64)clen << cluster_bits) - off;
1138 
1139 	for (;;) {
1140 		u32 op = min_t(u64, len, bytes);
1141 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1142 
1143 		if (err)
1144 			return err;
1145 
1146 		bytes -= op;
1147 		if (!bytes)
1148 			break;
1149 
1150 		vcn_next = vcn + clen;
1151 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1152 		    vcn != vcn_next)
1153 			return -ENOENT;
1154 
1155 		if (lcn == SPARSE_LCN)
1156 			return -EINVAL;
1157 
1158 		if (buf)
1159 			buf = Add2Ptr(buf, op);
1160 
1161 		lbo = ((u64)lcn << cluster_bits);
1162 		len = ((u64)clen << cluster_bits);
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1169 				   const struct runs_tree *run, u64 vbo)
1170 {
1171 	struct super_block *sb = sbi->sb;
1172 	u8 cluster_bits = sbi->cluster_bits;
1173 	CLST lcn;
1174 	u64 lbo;
1175 
1176 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1177 		return ERR_PTR(-ENOENT);
1178 
1179 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1180 
1181 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1182 }
1183 
1184 int ntfs_read_run_nb_ra(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1185 			u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb,
1186 			struct file_ra_state *ra)
1187 {
1188 	int err;
1189 	struct super_block *sb = sbi->sb;
1190 	struct address_space *mapping = sb->s_bdev->bd_mapping;
1191 	u32 blocksize = sb->s_blocksize;
1192 	u8 cluster_bits = sbi->cluster_bits;
1193 	u32 off = vbo & sbi->cluster_mask;
1194 	u32 nbh = 0;
1195 	CLST vcn_next, vcn = vbo >> cluster_bits;
1196 	CLST lcn, clen;
1197 	u64 lbo, len;
1198 	size_t idx;
1199 	struct buffer_head *bh;
1200 
1201 	if (!run) {
1202 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1203 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1204 			err = -ENOENT;
1205 			goto out;
1206 		}
1207 
1208 		/* Use absolute boot's 'MFTCluster' to read record. */
1209 		lbo = vbo + sbi->mft.lbo;
1210 		len = sbi->record_size;
1211 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1212 		err = -ENOENT;
1213 		goto out;
1214 	} else {
1215 		if (lcn == SPARSE_LCN) {
1216 			err = -EINVAL;
1217 			goto out;
1218 		}
1219 
1220 		lbo = ((u64)lcn << cluster_bits) + off;
1221 		len = ((u64)clen << cluster_bits) - off;
1222 	}
1223 
1224 	off = lbo & (blocksize - 1);
1225 	if (nb) {
1226 		nb->off = off;
1227 		nb->bytes = bytes;
1228 	}
1229 
1230 	if (ra && !ra->ra_pages)
1231 		file_ra_state_init(ra, mapping);
1232 
1233 	for (;;) {
1234 		u32 len32 = len >= bytes ? bytes : len;
1235 		sector_t block = lbo >> sb->s_blocksize_bits;
1236 
1237 		if (ra) {
1238 			pgoff_t index = lbo >> PAGE_SHIFT;
1239 			if (!ra_has_index(ra, index)) {
1240 				page_cache_sync_readahead(mapping, ra, NULL,
1241 							  index, 1);
1242 				ra->prev_pos = (loff_t)index << PAGE_SHIFT;
1243 			}
1244 		}
1245 
1246 		do {
1247 			u32 op = blocksize - off;
1248 
1249 			if (op > len32)
1250 				op = len32;
1251 
1252 			bh = ntfs_bread(sb, block);
1253 			if (!bh) {
1254 				err = -EIO;
1255 				goto out;
1256 			}
1257 
1258 			if (buf) {
1259 				memcpy(buf, bh->b_data + off, op);
1260 				buf = Add2Ptr(buf, op);
1261 			}
1262 
1263 			if (!nb) {
1264 				put_bh(bh);
1265 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1266 				err = -EINVAL;
1267 				goto out;
1268 			} else {
1269 				nb->bh[nbh++] = bh;
1270 				nb->nbufs = nbh;
1271 			}
1272 
1273 			bytes -= op;
1274 			if (!bytes)
1275 				return 0;
1276 			len32 -= op;
1277 			block += 1;
1278 			off = 0;
1279 
1280 		} while (len32);
1281 
1282 		if (!run) {
1283 			err = -EINVAL;
1284 			goto out;
1285 		}
1286 
1287 		/* Get next fragment to read. */
1288 		vcn_next = vcn + clen;
1289 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1290 		    vcn != vcn_next) {
1291 			err = -ENOENT;
1292 			goto out;
1293 		}
1294 
1295 		if (lcn == SPARSE_LCN) {
1296 			err = -EINVAL;
1297 			goto out;
1298 		}
1299 
1300 		lbo = ((u64)lcn << cluster_bits);
1301 		len = ((u64)clen << cluster_bits);
1302 	}
1303 
1304 out:
1305 	if (!nbh)
1306 		return err;
1307 
1308 	while (nbh) {
1309 		put_bh(nb->bh[--nbh]);
1310 		nb->bh[nbh] = NULL;
1311 	}
1312 
1313 	nb->nbufs = 0;
1314 	return err;
1315 }
1316 
1317 /*
1318  * ntfs_read_bh
1319  *
1320  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1321  */
1322 int ntfs_read_bh_ra(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1323 		    u64 vbo, struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1324 		    struct ntfs_buffers *nb, struct file_ra_state *ra)
1325 {
1326 	int err = ntfs_read_run_nb_ra(sbi, run, vbo, rhdr, bytes, nb, ra);
1327 
1328 	if (err)
1329 		return err;
1330 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1331 }
1332 
1333 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1334 		u32 bytes, struct ntfs_buffers *nb)
1335 {
1336 	int err = 0;
1337 	struct super_block *sb = sbi->sb;
1338 	u32 blocksize = sb->s_blocksize;
1339 	u8 cluster_bits = sbi->cluster_bits;
1340 	CLST vcn_next, vcn = vbo >> cluster_bits;
1341 	u32 off;
1342 	u32 nbh = 0;
1343 	CLST lcn, clen;
1344 	u64 lbo, len;
1345 	size_t idx;
1346 
1347 	nb->bytes = bytes;
1348 
1349 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1350 		err = -ENOENT;
1351 		goto out;
1352 	}
1353 
1354 	off = vbo & sbi->cluster_mask;
1355 	lbo = ((u64)lcn << cluster_bits) + off;
1356 	len = ((u64)clen << cluster_bits) - off;
1357 
1358 	nb->off = off = lbo & (blocksize - 1);
1359 
1360 	for (;;) {
1361 		u32 len32 = min_t(u64, len, bytes);
1362 		sector_t block = lbo >> sb->s_blocksize_bits;
1363 
1364 		do {
1365 			u32 op;
1366 			struct buffer_head *bh;
1367 
1368 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1369 				err = -EINVAL;
1370 				goto out;
1371 			}
1372 
1373 			op = blocksize - off;
1374 			if (op > len32)
1375 				op = len32;
1376 
1377 			if (op == blocksize) {
1378 				bh = sb_getblk(sb, block);
1379 				if (!bh) {
1380 					err = -ENOMEM;
1381 					goto out;
1382 				}
1383 				wait_on_buffer(bh);
1384 				lock_buffer(bh);
1385 				if (!buffer_uptodate(bh)) {
1386 					memset(bh->b_data, 0, blocksize);
1387 					set_buffer_uptodate(bh);
1388 				}
1389 				unlock_buffer(bh);
1390 			} else {
1391 				bh = ntfs_bread(sb, block);
1392 				if (!bh) {
1393 					err = -EIO;
1394 					goto out;
1395 				}
1396 			}
1397 
1398 			nb->bh[nbh++] = bh;
1399 			bytes -= op;
1400 			if (!bytes) {
1401 				nb->nbufs = nbh;
1402 				return 0;
1403 			}
1404 
1405 			block += 1;
1406 			len32 -= op;
1407 			off = 0;
1408 		} while (len32);
1409 
1410 		vcn_next = vcn + clen;
1411 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1412 		    vcn != vcn_next) {
1413 			err = -ENOENT;
1414 			goto out;
1415 		}
1416 
1417 		lbo = ((u64)lcn << cluster_bits);
1418 		len = ((u64)clen << cluster_bits);
1419 	}
1420 
1421 out:
1422 	while (nbh) {
1423 		put_bh(nb->bh[--nbh]);
1424 		nb->bh[nbh] = NULL;
1425 	}
1426 
1427 	nb->nbufs = 0;
1428 
1429 	return err;
1430 }
1431 
1432 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1433 		  struct ntfs_buffers *nb, int sync)
1434 {
1435 	int err = 0;
1436 	struct super_block *sb = sbi->sb;
1437 	u32 block_size = sb->s_blocksize;
1438 	u32 bytes = nb->bytes;
1439 	u32 off = nb->off;
1440 	u16 fo = le16_to_cpu(rhdr->fix_off);
1441 	u16 fn = le16_to_cpu(rhdr->fix_num);
1442 	u32 idx;
1443 	__le16 *fixup;
1444 	__le16 sample;
1445 
1446 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1447 	    fn * SECTOR_SIZE > bytes) {
1448 		return -EINVAL;
1449 	}
1450 
1451 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1452 		u32 op = block_size - off;
1453 		char *bh_data;
1454 		struct buffer_head *bh = nb->bh[idx];
1455 		__le16 *ptr, *end_data;
1456 
1457 		if (op > bytes)
1458 			op = bytes;
1459 
1460 		wait_on_buffer(bh);
1461 		lock_buffer(bh);
1462 
1463 		bh_data = bh->b_data + off;
1464 		end_data = Add2Ptr(bh_data, op);
1465 		memcpy(bh_data, rhdr, op);
1466 
1467 		if (!idx) {
1468 			u16 t16;
1469 
1470 			fixup = Add2Ptr(bh_data, fo);
1471 			sample = *fixup;
1472 			t16 = le16_to_cpu(sample);
1473 			if (t16 >= 0x7FFF) {
1474 				sample = *fixup = cpu_to_le16(1);
1475 			} else {
1476 				sample = cpu_to_le16(t16 + 1);
1477 				*fixup = sample;
1478 			}
1479 
1480 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1481 		}
1482 
1483 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1484 
1485 		do {
1486 			*++fixup = *ptr;
1487 			*ptr = sample;
1488 			ptr += SECTOR_SIZE / sizeof(short);
1489 		} while (ptr < end_data);
1490 
1491 		set_buffer_uptodate(bh);
1492 		mark_buffer_dirty(bh);
1493 		unlock_buffer(bh);
1494 
1495 		if (sync) {
1496 			int err2 = sync_dirty_buffer(bh);
1497 
1498 			if (!err && err2)
1499 				err = err2;
1500 		}
1501 
1502 		bytes -= op;
1503 		rhdr = Add2Ptr(rhdr, op);
1504 	}
1505 
1506 	return err;
1507 }
1508 
1509 /*
1510  * ntfs_read_write_run - Read/Write disk's page cache.
1511  */
1512 int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1513 			void *buf, u64 vbo, size_t bytes, int wr)
1514 {
1515 	struct super_block *sb = sbi->sb;
1516 	struct address_space *mapping = sb->s_bdev->bd_mapping;
1517 	u8 cluster_bits = sbi->cluster_bits;
1518 	CLST vcn_next, vcn = vbo >> cluster_bits;
1519 	CLST lcn, clen;
1520 	u64 lbo, len;
1521 	size_t idx;
1522 	u32 off, op;
1523 	struct folio *folio;
1524 	char *kaddr;
1525 
1526 	if (!bytes)
1527 		return 0;
1528 
1529 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1530 		return -ENOENT;
1531 
1532 	if (lcn == SPARSE_LCN)
1533 		return -EINVAL;
1534 
1535 	off = vbo & sbi->cluster_mask;
1536 	lbo = ((u64)lcn << cluster_bits) + off;
1537 	len = ((u64)clen << cluster_bits) - off;
1538 
1539 	for (;;) {
1540 		/* Read range [lbo, lbo+len). */
1541 		folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
1542 
1543 		if (IS_ERR(folio))
1544 			return PTR_ERR(folio);
1545 
1546 		off = offset_in_page(lbo);
1547 		op = PAGE_SIZE - off;
1548 
1549 		if (op > len)
1550 			op = len;
1551 		if (op > bytes)
1552 			op = bytes;
1553 
1554 		kaddr = kmap_local_folio(folio, 0);
1555 		if (wr) {
1556 			memcpy(kaddr + off, buf, op);
1557 			folio_mark_dirty(folio);
1558 		} else {
1559 			memcpy(buf, kaddr + off, op);
1560 			flush_dcache_folio(folio);
1561 		}
1562 		kunmap_local(kaddr);
1563 		folio_put(folio);
1564 
1565 		bytes -= op;
1566 		if (!bytes)
1567 			return 0;
1568 
1569 		buf += op;
1570 		len -= op;
1571 		if (len) {
1572 			/* next volume's page. */
1573 			lbo += op;
1574 			continue;
1575 		}
1576 
1577 		/* get next range. */
1578 		vcn_next = vcn + clen;
1579 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1580 		    vcn != vcn_next) {
1581 			return -ENOENT;
1582 		}
1583 
1584 		if (lcn == SPARSE_LCN)
1585 			return -EINVAL;
1586 
1587 		lbo = ((u64)lcn << cluster_bits);
1588 		len = ((u64)clen << cluster_bits);
1589 	}
1590 }
1591 
1592 /*
1593  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1594  *
1595  * Fill on-disk logfile range by (-1)
1596  * this means empty logfile.
1597  */
1598 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1599 {
1600 	int err = 0;
1601 	struct super_block *sb = sbi->sb;
1602 	struct block_device *bdev = sb->s_bdev;
1603 	u8 cluster_bits = sbi->cluster_bits;
1604 	struct bio *new, *bio = NULL;
1605 	CLST lcn, clen;
1606 	u64 lbo, len;
1607 	size_t run_idx;
1608 	struct page *fill;
1609 	void *kaddr;
1610 	struct blk_plug plug;
1611 
1612 	fill = alloc_page(GFP_KERNEL);
1613 	if (!fill)
1614 		return -ENOMEM;
1615 
1616 	kaddr = kmap_atomic(fill);
1617 	memset(kaddr, -1, PAGE_SIZE);
1618 	kunmap_atomic(kaddr);
1619 	flush_dcache_page(fill);
1620 	lock_page(fill);
1621 
1622 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1623 		err = -ENOENT;
1624 		goto out;
1625 	}
1626 
1627 	/*
1628 	 * TODO: Try blkdev_issue_write_same.
1629 	 */
1630 	blk_start_plug(&plug);
1631 	do {
1632 		lbo = (u64)lcn << cluster_bits;
1633 		len = (u64)clen << cluster_bits;
1634 new_bio:
1635 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1636 		if (bio) {
1637 			bio_chain(bio, new);
1638 			submit_bio(bio);
1639 		}
1640 		bio = new;
1641 		bio->bi_iter.bi_sector = lbo >> 9;
1642 
1643 		for (;;) {
1644 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1645 
1646 			if (bio_add_page(bio, fill, add, 0) < add)
1647 				goto new_bio;
1648 
1649 			lbo += add;
1650 			if (len <= add)
1651 				break;
1652 			len -= add;
1653 		}
1654 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1655 
1656 	if (!err)
1657 		err = submit_bio_wait(bio);
1658 	bio_put(bio);
1659 
1660 	blk_finish_plug(&plug);
1661 out:
1662 	unlock_page(fill);
1663 	put_page(fill);
1664 
1665 	return err;
1666 }
1667 
1668 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1669 		    u64 vbo, u64 *lbo, u64 *bytes)
1670 {
1671 	u32 off;
1672 	CLST lcn, len;
1673 	u8 cluster_bits = sbi->cluster_bits;
1674 
1675 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1676 		return -ENOENT;
1677 
1678 	off = vbo & sbi->cluster_mask;
1679 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1680 	*bytes = ((u64)len << cluster_bits) - off;
1681 
1682 	return 0;
1683 }
1684 
1685 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1686 				  enum RECORD_FLAG flag)
1687 {
1688 	int err = 0;
1689 	struct super_block *sb = sbi->sb;
1690 	struct inode *inode = new_inode(sb);
1691 	struct ntfs_inode *ni;
1692 
1693 	if (!inode)
1694 		return ERR_PTR(-ENOMEM);
1695 
1696 	ni = ntfs_i(inode);
1697 
1698 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1699 	if (err)
1700 		goto out;
1701 
1702 	inode->i_ino = rno;
1703 	if (insert_inode_locked(inode) < 0) {
1704 		err = -EIO;
1705 		goto out;
1706 	}
1707 
1708 out:
1709 	if (err) {
1710 		make_bad_inode(inode);
1711 		iput(inode);
1712 		ni = ERR_PTR(err);
1713 	}
1714 	return ni;
1715 }
1716 
1717 /*
1718  * O:BAG:BAD:(A;OICI;FA;;;WD)
1719  * Owner S-1-5-32-544 (Administrators)
1720  * Group S-1-5-32-544 (Administrators)
1721  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1722  */
1723 const u8 s_default_security[] __aligned(8) = {
1724 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1725 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1726 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1727 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1728 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1729 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1730 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1731 };
1732 
1733 static_assert(sizeof(s_default_security) == 0x50);
1734 
1735 static inline u32 sid_length(const struct SID *sid)
1736 {
1737 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1738 }
1739 
1740 /*
1741  * is_acl_valid
1742  *
1743  * Thanks Mark Harmstone for idea.
1744  */
1745 static bool is_acl_valid(const struct ACL *acl, u32 len)
1746 {
1747 	const struct ACE_HEADER *ace;
1748 	u32 i;
1749 	u16 ace_count, ace_size;
1750 
1751 	if (acl->AclRevision != ACL_REVISION &&
1752 	    acl->AclRevision != ACL_REVISION_DS) {
1753 		/*
1754 		 * This value should be ACL_REVISION, unless the ACL contains an
1755 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1756 		 * All ACEs in an ACL must be at the same revision level.
1757 		 */
1758 		return false;
1759 	}
1760 
1761 	if (acl->Sbz1)
1762 		return false;
1763 
1764 	if (le16_to_cpu(acl->AclSize) > len)
1765 		return false;
1766 
1767 	if (acl->Sbz2)
1768 		return false;
1769 
1770 	len -= sizeof(struct ACL);
1771 	ace = (struct ACE_HEADER *)&acl[1];
1772 	ace_count = le16_to_cpu(acl->AceCount);
1773 
1774 	for (i = 0; i < ace_count; i++) {
1775 		if (len < sizeof(struct ACE_HEADER))
1776 			return false;
1777 
1778 		ace_size = le16_to_cpu(ace->AceSize);
1779 		if (len < ace_size)
1780 			return false;
1781 
1782 		len -= ace_size;
1783 		ace = Add2Ptr(ace, ace_size);
1784 	}
1785 
1786 	return true;
1787 }
1788 
1789 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1790 {
1791 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1792 
1793 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1794 		return false;
1795 
1796 	if (sd->Revision != 1)
1797 		return false;
1798 
1799 	if (sd->Sbz1)
1800 		return false;
1801 
1802 	if (!(sd->Control & SE_SELF_RELATIVE))
1803 		return false;
1804 
1805 	sd_owner = le32_to_cpu(sd->Owner);
1806 	if (sd_owner) {
1807 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1808 
1809 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1810 			return false;
1811 
1812 		if (owner->Revision != 1)
1813 			return false;
1814 
1815 		if (sd_owner + sid_length(owner) > len)
1816 			return false;
1817 	}
1818 
1819 	sd_group = le32_to_cpu(sd->Group);
1820 	if (sd_group) {
1821 		const struct SID *group = Add2Ptr(sd, sd_group);
1822 
1823 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1824 			return false;
1825 
1826 		if (group->Revision != 1)
1827 			return false;
1828 
1829 		if (sd_group + sid_length(group) > len)
1830 			return false;
1831 	}
1832 
1833 	sd_sacl = le32_to_cpu(sd->Sacl);
1834 	if (sd_sacl) {
1835 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1836 
1837 		if (sd_sacl + sizeof(struct ACL) > len)
1838 			return false;
1839 
1840 		if (!is_acl_valid(sacl, len - sd_sacl))
1841 			return false;
1842 	}
1843 
1844 	sd_dacl = le32_to_cpu(sd->Dacl);
1845 	if (sd_dacl) {
1846 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1847 
1848 		if (sd_dacl + sizeof(struct ACL) > len)
1849 			return false;
1850 
1851 		if (!is_acl_valid(dacl, len - sd_dacl))
1852 			return false;
1853 	}
1854 
1855 	return true;
1856 }
1857 
1858 /*
1859  * ntfs_security_init - Load and parse $Secure.
1860  */
1861 int ntfs_security_init(struct ntfs_sb_info *sbi)
1862 {
1863 	int err;
1864 	struct super_block *sb = sbi->sb;
1865 	struct inode *inode;
1866 	struct ntfs_inode *ni;
1867 	struct MFT_REF ref;
1868 	struct ATTRIB *attr;
1869 	struct ATTR_LIST_ENTRY *le;
1870 	u64 sds_size;
1871 	size_t off;
1872 	struct NTFS_DE *ne;
1873 	struct NTFS_DE_SII *sii_e;
1874 	struct ntfs_fnd *fnd_sii = NULL;
1875 	const struct INDEX_ROOT *root_sii;
1876 	const struct INDEX_ROOT *root_sdh;
1877 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1878 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1879 
1880 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1881 	ref.high = 0;
1882 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1883 
1884 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1885 	if (IS_ERR(inode)) {
1886 		err = PTR_ERR(inode);
1887 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1888 		inode = NULL;
1889 		goto out;
1890 	}
1891 
1892 	ni = ntfs_i(inode);
1893 
1894 	le = NULL;
1895 
1896 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1897 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1898 	if (!attr ||
1899 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1900 	    root_sdh->type != ATTR_ZERO ||
1901 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1902 	    offsetof(struct INDEX_ROOT, ihdr) +
1903 			    le32_to_cpu(root_sdh->ihdr.used) >
1904 		    le32_to_cpu(attr->res.data_size)) {
1905 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1906 		err = -EINVAL;
1907 		goto out;
1908 	}
1909 
1910 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1911 	if (err) {
1912 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1913 		goto out;
1914 	}
1915 
1916 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1917 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1918 	if (!attr ||
1919 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1920 	    root_sii->type != ATTR_ZERO ||
1921 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1922 	    offsetof(struct INDEX_ROOT, ihdr) +
1923 			    le32_to_cpu(root_sii->ihdr.used) >
1924 		    le32_to_cpu(attr->res.data_size)) {
1925 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1926 		err = -EINVAL;
1927 		goto out;
1928 	}
1929 
1930 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1931 	if (err) {
1932 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1933 		goto out;
1934 	}
1935 
1936 	fnd_sii = fnd_get();
1937 	if (!fnd_sii) {
1938 		err = -ENOMEM;
1939 		goto out;
1940 	}
1941 
1942 	sds_size = inode->i_size;
1943 
1944 	/* Find the last valid Id. */
1945 	sbi->security.next_id = SECURITY_ID_FIRST;
1946 	/* Always write new security at the end of bucket. */
1947 	sbi->security.next_off =
1948 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1949 
1950 	off = 0;
1951 	ne = NULL;
1952 
1953 	for (;;) {
1954 		u32 next_id;
1955 
1956 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1957 		if (err || !ne)
1958 			break;
1959 
1960 		sii_e = (struct NTFS_DE_SII *)ne;
1961 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1962 			continue;
1963 
1964 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1965 		if (next_id >= sbi->security.next_id)
1966 			sbi->security.next_id = next_id;
1967 	}
1968 
1969 	sbi->security.ni = ni;
1970 	inode = NULL;
1971 out:
1972 	iput(inode);
1973 	fnd_put(fnd_sii);
1974 
1975 	return err;
1976 }
1977 
1978 /*
1979  * ntfs_get_security_by_id - Read security descriptor by id.
1980  */
1981 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1982 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1983 			    size_t *size)
1984 {
1985 	int err;
1986 	int diff;
1987 	struct ntfs_inode *ni = sbi->security.ni;
1988 	struct ntfs_index *indx = &sbi->security.index_sii;
1989 	void *p = NULL;
1990 	struct NTFS_DE_SII *sii_e;
1991 	struct ntfs_fnd *fnd_sii;
1992 	struct SECURITY_HDR d_security;
1993 	const struct INDEX_ROOT *root_sii;
1994 	u32 t32;
1995 
1996 	*sd = NULL;
1997 
1998 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1999 
2000 	fnd_sii = fnd_get();
2001 	if (!fnd_sii) {
2002 		err = -ENOMEM;
2003 		goto out;
2004 	}
2005 
2006 	root_sii = indx_get_root(indx, ni, NULL, NULL);
2007 	if (!root_sii) {
2008 		err = -EINVAL;
2009 		goto out;
2010 	}
2011 
2012 	/* Try to find this SECURITY descriptor in SII indexes. */
2013 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2014 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2015 	if (err)
2016 		goto out;
2017 
2018 	if (diff)
2019 		goto out;
2020 
2021 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
2022 	if (t32 < sizeof(struct SECURITY_HDR)) {
2023 		err = -EINVAL;
2024 		goto out;
2025 	}
2026 
2027 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2028 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2029 		err = -EFBIG;
2030 		goto out;
2031 	}
2032 
2033 	*size = t32 - sizeof(struct SECURITY_HDR);
2034 
2035 	p = kmalloc(*size, GFP_NOFS);
2036 	if (!p) {
2037 		err = -ENOMEM;
2038 		goto out;
2039 	}
2040 
2041 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2042 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2043 			       sizeof(d_security), NULL);
2044 	if (err)
2045 		goto out;
2046 
2047 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2048 		err = -EINVAL;
2049 		goto out;
2050 	}
2051 
2052 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2053 			       le64_to_cpu(sii_e->sec_hdr.off) +
2054 				       sizeof(struct SECURITY_HDR),
2055 			       p, *size, NULL);
2056 	if (err)
2057 		goto out;
2058 
2059 	*sd = p;
2060 	p = NULL;
2061 
2062 out:
2063 	kfree(p);
2064 	fnd_put(fnd_sii);
2065 	ni_unlock(ni);
2066 
2067 	return err;
2068 }
2069 
2070 /*
2071  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2072  *
2073  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2074  * and it contains a mirror copy of each security descriptor.  When writing
2075  * to a security descriptor at location X, another copy will be written at
2076  * location (X+256K).
2077  * When writing a security descriptor that will cross the 256K boundary,
2078  * the pointer will be advanced by 256K to skip
2079  * over the mirror portion.
2080  */
2081 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2082 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2083 			 u32 size_sd, __le32 *security_id, bool *inserted)
2084 {
2085 	int err, diff;
2086 	struct ntfs_inode *ni = sbi->security.ni;
2087 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2088 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2089 	struct NTFS_DE_SDH *e;
2090 	struct NTFS_DE_SDH sdh_e;
2091 	struct NTFS_DE_SII sii_e;
2092 	struct SECURITY_HDR *d_security;
2093 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2094 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2095 	struct SECURITY_KEY hash_key;
2096 	struct ntfs_fnd *fnd_sdh = NULL;
2097 	const struct INDEX_ROOT *root_sdh;
2098 	const struct INDEX_ROOT *root_sii;
2099 	u64 mirr_off, new_sds_size;
2100 	u32 next, left;
2101 
2102 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2103 		      SecurityDescriptorsBlockSize);
2104 
2105 	hash_key.hash = security_hash(sd, size_sd);
2106 	hash_key.sec_id = SECURITY_ID_INVALID;
2107 
2108 	if (inserted)
2109 		*inserted = false;
2110 	*security_id = SECURITY_ID_INVALID;
2111 
2112 	/* Allocate a temporal buffer. */
2113 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2114 	if (!d_security)
2115 		return -ENOMEM;
2116 
2117 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2118 
2119 	fnd_sdh = fnd_get();
2120 	if (!fnd_sdh) {
2121 		err = -ENOMEM;
2122 		goto out;
2123 	}
2124 
2125 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2126 	if (!root_sdh) {
2127 		err = -EINVAL;
2128 		goto out;
2129 	}
2130 
2131 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2132 	if (!root_sii) {
2133 		err = -EINVAL;
2134 		goto out;
2135 	}
2136 
2137 	/*
2138 	 * Check if such security already exists.
2139 	 * Use "SDH" and hash -> to get the offset in "SDS".
2140 	 */
2141 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2142 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2143 			fnd_sdh);
2144 	if (err)
2145 		goto out;
2146 
2147 	while (e) {
2148 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2149 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2150 					       le64_to_cpu(e->sec_hdr.off),
2151 					       d_security, new_sec_size, NULL);
2152 			if (err)
2153 				goto out;
2154 
2155 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2156 			    d_security->key.hash == hash_key.hash &&
2157 			    !memcmp(d_security + 1, sd, size_sd)) {
2158 				/* Such security already exists. */
2159 				*security_id = d_security->key.sec_id;
2160 				err = 0;
2161 				goto out;
2162 			}
2163 		}
2164 
2165 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2166 				     (struct NTFS_DE **)&e, fnd_sdh);
2167 		if (err)
2168 			goto out;
2169 
2170 		if (!e || e->key.hash != hash_key.hash)
2171 			break;
2172 	}
2173 
2174 	/* Zero unused space. */
2175 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2176 	left = SecurityDescriptorsBlockSize - next;
2177 
2178 	/* Zero gap until SecurityDescriptorsBlockSize. */
2179 	if (left < new_sec_size) {
2180 		/* Zero "left" bytes from sbi->security.next_off. */
2181 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2182 	}
2183 
2184 	/* Zero tail of previous security. */
2185 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2186 
2187 	/*
2188 	 * Example:
2189 	 * 0x40438 == ni->vfs_inode.i_size
2190 	 * 0x00440 == sbi->security.next_off
2191 	 * need to zero [0x438-0x440)
2192 	 * if (next > used) {
2193 	 *  u32 tozero = next - used;
2194 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2195 	 */
2196 
2197 	/* Format new security descriptor. */
2198 	d_security->key.hash = hash_key.hash;
2199 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2200 	d_security->off = cpu_to_le64(sbi->security.next_off);
2201 	d_security->size = cpu_to_le32(new_sec_size);
2202 	memcpy(d_security + 1, sd, size_sd);
2203 
2204 	/* Write main SDS bucket. */
2205 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2206 				d_security, aligned_sec_size, 0);
2207 
2208 	if (err)
2209 		goto out;
2210 
2211 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2212 	new_sds_size = mirr_off + aligned_sec_size;
2213 
2214 	if (new_sds_size > ni->vfs_inode.i_size) {
2215 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2216 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2217 				    new_sds_size, &new_sds_size, false);
2218 		if (err)
2219 			goto out;
2220 	}
2221 
2222 	/* Write copy SDS bucket. */
2223 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2224 				aligned_sec_size, 0);
2225 	if (err)
2226 		goto out;
2227 
2228 	/* Fill SII entry. */
2229 	sii_e.de.view.data_off =
2230 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2231 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2232 	sii_e.de.view.res = 0;
2233 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2234 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2235 	sii_e.de.flags = 0;
2236 	sii_e.de.res = 0;
2237 	sii_e.sec_id = d_security->key.sec_id;
2238 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2239 
2240 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2241 	if (err)
2242 		goto out;
2243 
2244 	/* Fill SDH entry. */
2245 	sdh_e.de.view.data_off =
2246 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2247 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2248 	sdh_e.de.view.res = 0;
2249 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2250 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2251 	sdh_e.de.flags = 0;
2252 	sdh_e.de.res = 0;
2253 	sdh_e.key.hash = d_security->key.hash;
2254 	sdh_e.key.sec_id = d_security->key.sec_id;
2255 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2256 	sdh_e.magic[0] = cpu_to_le16('I');
2257 	sdh_e.magic[1] = cpu_to_le16('I');
2258 
2259 	fnd_clear(fnd_sdh);
2260 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2261 				fnd_sdh, 0);
2262 	if (err)
2263 		goto out;
2264 
2265 	*security_id = d_security->key.sec_id;
2266 	if (inserted)
2267 		*inserted = true;
2268 
2269 	/* Update Id and offset for next descriptor. */
2270 	sbi->security.next_id += 1;
2271 	sbi->security.next_off += aligned_sec_size;
2272 
2273 out:
2274 	fnd_put(fnd_sdh);
2275 	mark_inode_dirty(&ni->vfs_inode);
2276 	ni_unlock(ni);
2277 	kfree(d_security);
2278 
2279 	return err;
2280 }
2281 
2282 /*
2283  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2284  */
2285 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2286 {
2287 	int err;
2288 	struct ntfs_inode *ni = sbi->reparse.ni;
2289 	struct ntfs_index *indx = &sbi->reparse.index_r;
2290 	struct ATTRIB *attr;
2291 	struct ATTR_LIST_ENTRY *le;
2292 	const struct INDEX_ROOT *root_r;
2293 
2294 	if (!ni)
2295 		return 0;
2296 
2297 	le = NULL;
2298 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2299 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2300 	if (!attr) {
2301 		err = -EINVAL;
2302 		goto out;
2303 	}
2304 
2305 	root_r = resident_data(attr);
2306 	if (root_r->type != ATTR_ZERO ||
2307 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2308 		err = -EINVAL;
2309 		goto out;
2310 	}
2311 
2312 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2313 	if (err)
2314 		goto out;
2315 
2316 out:
2317 	return err;
2318 }
2319 
2320 /*
2321  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2322  */
2323 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2324 {
2325 	int err;
2326 	struct ntfs_inode *ni = sbi->objid.ni;
2327 	struct ntfs_index *indx = &sbi->objid.index_o;
2328 	struct ATTRIB *attr;
2329 	struct ATTR_LIST_ENTRY *le;
2330 	const struct INDEX_ROOT *root;
2331 
2332 	if (!ni)
2333 		return 0;
2334 
2335 	le = NULL;
2336 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2337 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2338 	if (!attr) {
2339 		err = -EINVAL;
2340 		goto out;
2341 	}
2342 
2343 	root = resident_data(attr);
2344 	if (root->type != ATTR_ZERO ||
2345 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2346 		err = -EINVAL;
2347 		goto out;
2348 	}
2349 
2350 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2351 	if (err)
2352 		goto out;
2353 
2354 out:
2355 	return err;
2356 }
2357 
2358 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2359 {
2360 	int err;
2361 	struct ntfs_inode *ni = sbi->objid.ni;
2362 	struct ntfs_index *indx = &sbi->objid.index_o;
2363 
2364 	if (!ni)
2365 		return -EINVAL;
2366 
2367 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2368 
2369 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2370 
2371 	mark_inode_dirty(&ni->vfs_inode);
2372 	ni_unlock(ni);
2373 
2374 	return err;
2375 }
2376 
2377 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2378 			const struct MFT_REF *ref)
2379 {
2380 	int err;
2381 	struct ntfs_inode *ni = sbi->reparse.ni;
2382 	struct ntfs_index *indx = &sbi->reparse.index_r;
2383 	struct NTFS_DE_R re;
2384 
2385 	if (!ni)
2386 		return -EINVAL;
2387 
2388 	memset(&re, 0, sizeof(re));
2389 
2390 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2391 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2392 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2393 
2394 	re.key.ReparseTag = rtag;
2395 	memcpy(&re.key.ref, ref, sizeof(*ref));
2396 
2397 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2398 
2399 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2400 
2401 	mark_inode_dirty(&ni->vfs_inode);
2402 	ni_unlock(ni);
2403 
2404 	return err;
2405 }
2406 
2407 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2408 			const struct MFT_REF *ref)
2409 {
2410 	int err, diff;
2411 	struct ntfs_inode *ni = sbi->reparse.ni;
2412 	struct ntfs_index *indx = &sbi->reparse.index_r;
2413 	struct ntfs_fnd *fnd = NULL;
2414 	struct REPARSE_KEY rkey;
2415 	struct NTFS_DE_R *re;
2416 	struct INDEX_ROOT *root_r;
2417 
2418 	if (!ni)
2419 		return -EINVAL;
2420 
2421 	rkey.ReparseTag = rtag;
2422 	rkey.ref = *ref;
2423 
2424 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2425 
2426 	if (rtag) {
2427 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2428 		goto out1;
2429 	}
2430 
2431 	fnd = fnd_get();
2432 	if (!fnd) {
2433 		err = -ENOMEM;
2434 		goto out1;
2435 	}
2436 
2437 	root_r = indx_get_root(indx, ni, NULL, NULL);
2438 	if (!root_r) {
2439 		err = -EINVAL;
2440 		goto out;
2441 	}
2442 
2443 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2444 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2445 			(struct NTFS_DE **)&re, fnd);
2446 	if (err)
2447 		goto out;
2448 
2449 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2450 		/* Impossible. Looks like volume corrupt? */
2451 		goto out;
2452 	}
2453 
2454 	memcpy(&rkey, &re->key, sizeof(rkey));
2455 
2456 	fnd_put(fnd);
2457 	fnd = NULL;
2458 
2459 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2460 	if (err)
2461 		goto out;
2462 
2463 out:
2464 	fnd_put(fnd);
2465 
2466 out1:
2467 	mark_inode_dirty(&ni->vfs_inode);
2468 	ni_unlock(ni);
2469 
2470 	return err;
2471 }
2472 
2473 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2474 					  CLST len)
2475 {
2476 	ntfs_unmap_meta(sbi->sb, lcn, len);
2477 	ntfs_discard(sbi, lcn, len);
2478 }
2479 
2480 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2481 {
2482 	CLST end, i, zone_len, zlen;
2483 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2484 	bool dirty = false;
2485 
2486 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2487 	if (!wnd_is_used(wnd, lcn, len)) {
2488 		/* mark volume as dirty out of wnd->rw_lock */
2489 		dirty = true;
2490 
2491 		end = lcn + len;
2492 		len = 0;
2493 		for (i = lcn; i < end; i++) {
2494 			if (wnd_is_used(wnd, i, 1)) {
2495 				if (!len)
2496 					lcn = i;
2497 				len += 1;
2498 				continue;
2499 			}
2500 
2501 			if (!len)
2502 				continue;
2503 
2504 			if (trim)
2505 				ntfs_unmap_and_discard(sbi, lcn, len);
2506 
2507 			wnd_set_free(wnd, lcn, len);
2508 			len = 0;
2509 		}
2510 
2511 		if (!len)
2512 			goto out;
2513 	}
2514 
2515 	if (trim)
2516 		ntfs_unmap_and_discard(sbi, lcn, len);
2517 	wnd_set_free(wnd, lcn, len);
2518 
2519 	/* append to MFT zone, if possible. */
2520 	zone_len = wnd_zone_len(wnd);
2521 	zlen = min(zone_len + len, sbi->zone_max);
2522 
2523 	if (zlen == zone_len) {
2524 		/* MFT zone already has maximum size. */
2525 	} else if (!zone_len) {
2526 		/* Create MFT zone only if 'zlen' is large enough. */
2527 		if (zlen == sbi->zone_max)
2528 			wnd_zone_set(wnd, lcn, zlen);
2529 	} else {
2530 		CLST zone_lcn = wnd_zone_bit(wnd);
2531 
2532 		if (lcn + len == zone_lcn) {
2533 			/* Append into head MFT zone. */
2534 			wnd_zone_set(wnd, lcn, zlen);
2535 		} else if (zone_lcn + zone_len == lcn) {
2536 			/* Append into tail MFT zone. */
2537 			wnd_zone_set(wnd, zone_lcn, zlen);
2538 		}
2539 	}
2540 
2541 out:
2542 	up_write(&wnd->rw_lock);
2543 	if (dirty)
2544 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2545 }
2546 
2547 /*
2548  * run_deallocate - Deallocate clusters.
2549  */
2550 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2551 		   bool trim)
2552 {
2553 	CLST lcn, len;
2554 	size_t idx = 0;
2555 
2556 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2557 		if (lcn == SPARSE_LCN)
2558 			continue;
2559 
2560 		mark_as_free_ex(sbi, lcn, len, trim);
2561 	}
2562 
2563 	return 0;
2564 }
2565 
2566 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2567 {
2568 	int i, ch;
2569 
2570 	/* check for forbidden chars */
2571 	for (i = 0; i < fname->len; ++i) {
2572 		ch = le16_to_cpu(fname->name[i]);
2573 
2574 		/* control chars */
2575 		if (ch < 0x20)
2576 			return true;
2577 
2578 		switch (ch) {
2579 		/* disallowed by Windows */
2580 		case '\\':
2581 		case '/':
2582 		case ':':
2583 		case '*':
2584 		case '?':
2585 		case '<':
2586 		case '>':
2587 		case '|':
2588 		case '\"':
2589 			return true;
2590 
2591 		default:
2592 			/* allowed char */
2593 			break;
2594 		}
2595 	}
2596 
2597 	/* file names cannot end with space or . */
2598 	if (fname->len > 0) {
2599 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2600 		if (ch == ' ' || ch == '.')
2601 			return true;
2602 	}
2603 
2604 	return false;
2605 }
2606 
2607 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2608 				    const struct le_str *fname)
2609 {
2610 	int port_digit;
2611 	const __le16 *name = fname->name;
2612 	int len = fname->len;
2613 	const u16 *upcase = sbi->upcase;
2614 
2615 	/* check for 3 chars reserved names (device names) */
2616 	/* name by itself or with any extension is forbidden */
2617 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2618 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2619 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2620 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2621 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2622 			return true;
2623 
2624 	/* check for 4 chars reserved names (port name followed by 1..9) */
2625 	/* name by itself or with any extension is forbidden */
2626 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2627 		port_digit = le16_to_cpu(name[3]);
2628 		if (port_digit >= '1' && port_digit <= '9')
2629 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2630 					    false) ||
2631 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2632 					    false))
2633 				return true;
2634 	}
2635 
2636 	return false;
2637 }
2638 
2639 /*
2640  * valid_windows_name - Check if a file name is valid in Windows.
2641  */
2642 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2643 {
2644 	return !name_has_forbidden_chars(fname) &&
2645 	       !is_reserved_name(sbi, fname);
2646 }
2647 
2648 /*
2649  * ntfs_set_label - updates current ntfs label.
2650  */
2651 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2652 {
2653 	int err;
2654 	struct ATTRIB *attr;
2655 	u32 uni_bytes;
2656 	struct ntfs_inode *ni = sbi->volume.ni;
2657 	/* Allocate PATH_MAX bytes. */
2658 	struct cpu_str *uni = kmalloc(PATH_MAX, GFP_KERNEL);
2659 
2660 	if (!uni)
2661 		return -ENOMEM;
2662 
2663 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2664 				UTF16_LITTLE_ENDIAN);
2665 	if (err < 0)
2666 		goto out;
2667 
2668 	uni_bytes = uni->len * sizeof(u16);
2669 	if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
2670 		ntfs_warn(sbi->sb, "new label is too long");
2671 		err = -EFBIG;
2672 		goto out;
2673 	}
2674 
2675 	ni_lock(ni);
2676 
2677 	/* Ignore any errors. */
2678 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2679 
2680 	err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
2681 				 NULL, NULL);
2682 	if (err < 0)
2683 		goto unlock_out;
2684 
2685 	/* write new label in on-disk struct. */
2686 	memcpy(resident_data(attr), uni->name, uni_bytes);
2687 
2688 	/* update cached value of current label. */
2689 	if (len >= ARRAY_SIZE(sbi->volume.label))
2690 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2691 	memcpy(sbi->volume.label, label, len);
2692 	sbi->volume.label[len] = 0;
2693 	mark_inode_dirty_sync(&ni->vfs_inode);
2694 
2695 unlock_out:
2696 	ni_unlock(ni);
2697 
2698 	if (!err)
2699 		err = _ni_write_inode(&ni->vfs_inode, 0);
2700 
2701 out:
2702 	kfree(uni);
2703 	return err;
2704 }
2705