xref: /linux/fs/ntfs3/fsntfs.c (revision 559e608c46553c107dbba19dae0854af7b219400)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include <linux/kernel.h>
12 #include <linux/nls.h>
13 
14 #include "debug.h"
15 #include "ntfs.h"
16 #include "ntfs_fs.h"
17 
18 // clang-format off
19 const struct cpu_str NAME_MFT = {
20 	4, 0, { '$', 'M', 'F', 'T' },
21 };
22 const struct cpu_str NAME_MIRROR = {
23 	8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 };
25 const struct cpu_str NAME_LOGFILE = {
26 	8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 };
28 const struct cpu_str NAME_VOLUME = {
29 	7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 };
31 const struct cpu_str NAME_ATTRDEF = {
32 	8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 };
34 const struct cpu_str NAME_ROOT = {
35 	1, 0, { '.' },
36 };
37 const struct cpu_str NAME_BITMAP = {
38 	7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 };
40 const struct cpu_str NAME_BOOT = {
41 	5, 0, { '$', 'B', 'o', 'o', 't' },
42 };
43 const struct cpu_str NAME_BADCLUS = {
44 	8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 };
46 const struct cpu_str NAME_QUOTA = {
47 	6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 };
49 const struct cpu_str NAME_SECURE = {
50 	7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 };
52 const struct cpu_str NAME_UPCASE = {
53 	7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 };
55 const struct cpu_str NAME_EXTEND = {
56 	7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 };
58 const struct cpu_str NAME_OBJID = {
59 	6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 };
61 const struct cpu_str NAME_REPARSE = {
62 	8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 };
64 const struct cpu_str NAME_USNJRNL = {
65 	8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 };
67 const __le16 BAD_NAME[4] = {
68 	cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 };
70 const __le16 I30_NAME[4] = {
71 	cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 };
73 const __le16 SII_NAME[4] = {
74 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 };
76 const __le16 SDH_NAME[4] = {
77 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 };
79 const __le16 SDS_NAME[4] = {
80 	cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 };
82 const __le16 SO_NAME[2] = {
83 	cpu_to_le16('$'), cpu_to_le16('O'),
84 };
85 const __le16 SQ_NAME[2] = {
86 	cpu_to_le16('$'), cpu_to_le16('Q'),
87 };
88 const __le16 SR_NAME[2] = {
89 	cpu_to_le16('$'), cpu_to_le16('R'),
90 };
91 
92 #ifdef CONFIG_NTFS3_LZX_XPRESS
93 const __le16 WOF_NAME[17] = {
94 	cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
95 	cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
96 	cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
97 	cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
98 	cpu_to_le16('a'),
99 };
100 #endif
101 
102 static const __le16 CON_NAME[3] = {
103 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
104 };
105 
106 static const __le16 NUL_NAME[3] = {
107 	cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
108 };
109 
110 static const __le16 AUX_NAME[3] = {
111 	cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
112 };
113 
114 static const __le16 PRN_NAME[3] = {
115 	cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
116 };
117 
118 static const __le16 COM_NAME[3] = {
119 	cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
120 };
121 
122 static const __le16 LPT_NAME[3] = {
123 	cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
124 };
125 
126 // clang-format on
127 
128 /*
129  * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130  */
131 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
132 {
133 	u16 *fixup, *ptr;
134 	u16 sample;
135 	u16 fo = le16_to_cpu(rhdr->fix_off);
136 	u16 fn = le16_to_cpu(rhdr->fix_num);
137 
138 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
139 	    fn * SECTOR_SIZE > bytes) {
140 		return false;
141 	}
142 
143 	/* Get fixup pointer. */
144 	fixup = Add2Ptr(rhdr, fo);
145 
146 	if (*fixup >= 0x7FFF)
147 		*fixup = 1;
148 	else
149 		*fixup += 1;
150 
151 	sample = *fixup;
152 
153 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
154 
155 	while (fn--) {
156 		*++fixup = *ptr;
157 		*ptr = sample;
158 		ptr += SECTOR_SIZE / sizeof(short);
159 	}
160 	return true;
161 }
162 
163 /*
164  * ntfs_fix_post_read - Remove fixups after reading from disk.
165  *
166  * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167  */
168 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
169 		       bool simple)
170 {
171 	int ret;
172 	u16 *fixup, *ptr;
173 	u16 sample, fo, fn;
174 
175 	fo = le16_to_cpu(rhdr->fix_off);
176 	fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
177 		      le16_to_cpu(rhdr->fix_num);
178 
179 	/* Check errors. */
180 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
181 	    fn * SECTOR_SIZE > bytes) {
182 		return -E_NTFS_CORRUPT;
183 	}
184 
185 	/* Get fixup pointer. */
186 	fixup = Add2Ptr(rhdr, fo);
187 	sample = *fixup;
188 	ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
189 	ret = 0;
190 
191 	while (fn--) {
192 		/* Test current word. */
193 		if (*ptr != sample) {
194 			/* Fixup does not match! Is it serious error? */
195 			ret = -E_NTFS_FIXUP;
196 		}
197 
198 		/* Replace fixup. */
199 		*ptr = *++fixup;
200 		ptr += SECTOR_SIZE / sizeof(short);
201 	}
202 
203 	return ret;
204 }
205 
206 /*
207  * ntfs_extend_init - Load $Extend file.
208  */
209 int ntfs_extend_init(struct ntfs_sb_info *sbi)
210 {
211 	int err;
212 	struct super_block *sb = sbi->sb;
213 	struct inode *inode, *inode2;
214 	struct MFT_REF ref;
215 
216 	if (sbi->volume.major_ver < 3) {
217 		ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
218 		return 0;
219 	}
220 
221 	ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 	ref.high = 0;
223 	ref.seq = cpu_to_le16(MFT_REC_EXTEND);
224 	inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 	if (IS_ERR(inode)) {
226 		err = PTR_ERR(inode);
227 		ntfs_err(sb, "Failed to load $Extend (%d).", err);
228 		inode = NULL;
229 		goto out;
230 	}
231 
232 	/* If ntfs_iget5() reads from disk it never returns bad inode. */
233 	if (!S_ISDIR(inode->i_mode)) {
234 		err = -EINVAL;
235 		goto out;
236 	}
237 
238 	/* Try to find $ObjId */
239 	inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
240 	if (inode2 && !IS_ERR(inode2)) {
241 		if (is_bad_inode(inode2)) {
242 			iput(inode2);
243 		} else {
244 			sbi->objid.ni = ntfs_i(inode2);
245 			sbi->objid_no = inode2->i_ino;
246 		}
247 	}
248 
249 	/* Try to find $Quota */
250 	inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
251 	if (inode2 && !IS_ERR(inode2)) {
252 		sbi->quota_no = inode2->i_ino;
253 		iput(inode2);
254 	}
255 
256 	/* Try to find $Reparse */
257 	inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
258 	if (inode2 && !IS_ERR(inode2)) {
259 		sbi->reparse.ni = ntfs_i(inode2);
260 		sbi->reparse_no = inode2->i_ino;
261 	}
262 
263 	/* Try to find $UsnJrnl */
264 	inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
265 	if (inode2 && !IS_ERR(inode2)) {
266 		sbi->usn_jrnl_no = inode2->i_ino;
267 		iput(inode2);
268 	}
269 
270 	err = 0;
271 out:
272 	iput(inode);
273 	return err;
274 }
275 
276 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
277 {
278 	int err = 0;
279 	struct super_block *sb = sbi->sb;
280 	bool initialized = false;
281 	struct MFT_REF ref;
282 	struct inode *inode;
283 
284 	/* Check for 4GB. */
285 	if (ni->vfs_inode.i_size >= 0x100000000ull) {
286 		ntfs_err(sb, "\x24LogFile is large than 4G.");
287 		err = -EINVAL;
288 		goto out;
289 	}
290 
291 	sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 
293 	ref.low = cpu_to_le32(MFT_REC_MFT);
294 	ref.high = 0;
295 	ref.seq = cpu_to_le16(1);
296 
297 	inode = ntfs_iget5(sb, &ref, NULL);
298 
299 	if (IS_ERR(inode))
300 		inode = NULL;
301 
302 	if (!inode) {
303 		/* Try to use MFT copy. */
304 		u64 t64 = sbi->mft.lbo;
305 
306 		sbi->mft.lbo = sbi->mft.lbo2;
307 		inode = ntfs_iget5(sb, &ref, NULL);
308 		sbi->mft.lbo = t64;
309 		if (IS_ERR(inode))
310 			inode = NULL;
311 	}
312 
313 	if (!inode) {
314 		err = -EINVAL;
315 		ntfs_err(sb, "Failed to load $MFT.");
316 		goto out;
317 	}
318 
319 	sbi->mft.ni = ntfs_i(inode);
320 
321 	/* LogFile should not contains attribute list. */
322 	err = ni_load_all_mi(sbi->mft.ni);
323 	if (!err)
324 		err = log_replay(ni, &initialized);
325 
326 	iput(inode);
327 	sbi->mft.ni = NULL;
328 
329 	sync_blockdev(sb->s_bdev);
330 	invalidate_bdev(sb->s_bdev);
331 
332 	if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
333 		err = 0;
334 		goto out;
335 	}
336 
337 	if (sb_rdonly(sb) || !initialized)
338 		goto out;
339 
340 	/* Fill LogFile by '-1' if it is initialized. */
341 	err = ntfs_bio_fill_1(sbi, &ni->file.run);
342 
343 out:
344 	sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
345 
346 	return err;
347 }
348 
349 /*
350  * ntfs_look_for_free_space - Look for a free space in bitmap.
351  */
352 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
353 			     CLST *new_lcn, CLST *new_len,
354 			     enum ALLOCATE_OPT opt)
355 {
356 	int err;
357 	CLST alen;
358 	struct super_block *sb = sbi->sb;
359 	size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
360 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 
362 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
363 	if (opt & ALLOCATE_MFT) {
364 		zlen = wnd_zone_len(wnd);
365 
366 		if (!zlen) {
367 			err = ntfs_refresh_zone(sbi);
368 			if (err)
369 				goto up_write;
370 
371 			zlen = wnd_zone_len(wnd);
372 		}
373 
374 		if (!zlen) {
375 			ntfs_err(sbi->sb, "no free space to extend mft");
376 			err = -ENOSPC;
377 			goto up_write;
378 		}
379 
380 		lcn = wnd_zone_bit(wnd);
381 		alen = min_t(CLST, len, zlen);
382 
383 		wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 
385 		err = wnd_set_used(wnd, lcn, alen);
386 		if (err)
387 			goto up_write;
388 
389 		alcn = lcn;
390 		goto space_found;
391 	}
392 	/*
393 	 * 'Cause cluster 0 is always used this value means that we should use
394 	 * cached value of 'next_free_lcn' to improve performance.
395 	 */
396 	if (!lcn)
397 		lcn = sbi->used.next_free_lcn;
398 
399 	if (lcn >= wnd->nbits)
400 		lcn = 0;
401 
402 	alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
403 	if (alen)
404 		goto space_found;
405 
406 	/* Try to use clusters from MftZone. */
407 	zlen = wnd_zone_len(wnd);
408 	zeroes = wnd_zeroes(wnd);
409 
410 	/* Check too big request */
411 	if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
412 		err = -ENOSPC;
413 		goto up_write;
414 	}
415 
416 	/* How many clusters to cat from zone. */
417 	zlcn = wnd_zone_bit(wnd);
418 	zlen2 = zlen >> 1;
419 	ztrim = clamp_val(len, zlen2, zlen);
420 	new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 
422 	wnd_zone_set(wnd, zlcn, new_zlen);
423 
424 	/* Allocate continues clusters. */
425 	alen = wnd_find(wnd, len, 0,
426 			BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
427 	if (!alen) {
428 		err = -ENOSPC;
429 		goto up_write;
430 	}
431 
432 space_found:
433 	err = 0;
434 	*new_len = alen;
435 	*new_lcn = alcn;
436 
437 	ntfs_unmap_meta(sb, alcn, alen);
438 
439 	/* Set hint for next requests. */
440 	if (!(opt & ALLOCATE_MFT))
441 		sbi->used.next_free_lcn = alcn + alen;
442 up_write:
443 	up_write(&wnd->rw_lock);
444 	return err;
445 }
446 
447 /*
448  * ntfs_check_for_free_space
449  *
450  * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451  */
452 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 {
454 	size_t free, zlen, avail;
455 	struct wnd_bitmap *wnd;
456 
457 	wnd = &sbi->used.bitmap;
458 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
459 	free = wnd_zeroes(wnd);
460 	zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
461 	up_read(&wnd->rw_lock);
462 
463 	if (free < zlen + clen)
464 		return false;
465 
466 	avail = free - (zlen + clen);
467 
468 	wnd = &sbi->mft.bitmap;
469 	down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
470 	free = wnd_zeroes(wnd);
471 	zlen = wnd_zone_len(wnd);
472 	up_read(&wnd->rw_lock);
473 
474 	if (free >= zlen + mlen)
475 		return true;
476 
477 	return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
478 }
479 
480 /*
481  * ntfs_extend_mft - Allocate additional MFT records.
482  *
483  * sbi->mft.bitmap is locked for write.
484  *
485  * NOTE: recursive:
486  *	ntfs_look_free_mft ->
487  *	ntfs_extend_mft ->
488  *	attr_set_size ->
489  *	ni_insert_nonresident ->
490  *	ni_insert_attr ->
491  *	ni_ins_attr_ext ->
492  *	ntfs_look_free_mft ->
493  *	ntfs_extend_mft
494  *
495  * To avoid recursive always allocate space for two new MFT records
496  * see attrib.c: "at least two MFT to avoid recursive loop".
497  */
498 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 {
500 	int err;
501 	struct ntfs_inode *ni = sbi->mft.ni;
502 	size_t new_mft_total;
503 	u64 new_mft_bytes, new_bitmap_bytes;
504 	struct ATTRIB *attr;
505 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 
507 	new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
508 	new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 
510 	/* Step 1: Resize $MFT::DATA. */
511 	down_write(&ni->file.run_lock);
512 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
513 			    new_mft_bytes, NULL, false, &attr);
514 
515 	if (err) {
516 		up_write(&ni->file.run_lock);
517 		goto out;
518 	}
519 
520 	attr->nres.valid_size = attr->nres.data_size;
521 	new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 	ni->mi.dirty = true;
523 
524 	/* Step 2: Resize $MFT::BITMAP. */
525 	new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
526 
527 	err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
528 			    new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 
530 	/* Refresh MFT Zone if necessary. */
531 	down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 
533 	ntfs_refresh_zone(sbi);
534 
535 	up_write(&sbi->used.bitmap.rw_lock);
536 	up_write(&ni->file.run_lock);
537 
538 	if (err)
539 		goto out;
540 
541 	err = wnd_extend(wnd, new_mft_total);
542 
543 	if (err)
544 		goto out;
545 
546 	ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 
548 	err = _ni_write_inode(&ni->vfs_inode, 0);
549 out:
550 	return err;
551 }
552 
553 /*
554  * ntfs_look_free_mft - Look for a free MFT record.
555  */
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 		       struct ntfs_inode *ni, struct mft_inode **mi)
558 {
559 	int err = 0;
560 	size_t zbit, zlen, from, to, fr;
561 	size_t mft_total;
562 	struct MFT_REF ref;
563 	struct super_block *sb = sbi->sb;
564 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
565 	u32 ir;
566 
567 	static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 		      MFT_REC_FREE - MFT_REC_RESERVED);
569 
570 	if (!mft)
571 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 
573 	zlen = wnd_zone_len(wnd);
574 
575 	/* Always reserve space for MFT. */
576 	if (zlen) {
577 		if (mft) {
578 			zbit = wnd_zone_bit(wnd);
579 			*rno = zbit;
580 			wnd_zone_set(wnd, zbit + 1, zlen - 1);
581 		}
582 		goto found;
583 	}
584 
585 	/* No MFT zone. Find the nearest to '0' free MFT. */
586 	if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 		/* Resize MFT */
588 		mft_total = wnd->nbits;
589 
590 		err = ntfs_extend_mft(sbi);
591 		if (!err) {
592 			zbit = mft_total;
593 			goto reserve_mft;
594 		}
595 
596 		if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
597 			goto out;
598 
599 		err = 0;
600 
601 		/*
602 		 * Look for free record reserved area [11-16) ==
603 		 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
604 		 * marks it as used.
605 		 */
606 		if (!sbi->mft.reserved_bitmap) {
607 			/* Once per session create internal bitmap for 5 bits. */
608 			sbi->mft.reserved_bitmap = 0xFF;
609 
610 			ref.high = 0;
611 			for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 				struct inode *i;
613 				struct ntfs_inode *ni;
614 				struct MFT_REC *mrec;
615 
616 				ref.low = cpu_to_le32(ir);
617 				ref.seq = cpu_to_le16(ir);
618 
619 				i = ntfs_iget5(sb, &ref, NULL);
620 				if (IS_ERR(i)) {
621 next:
622 					ntfs_notice(
623 						sb,
624 						"Invalid reserved record %x",
625 						ref.low);
626 					continue;
627 				}
628 				if (is_bad_inode(i)) {
629 					iput(i);
630 					goto next;
631 				}
632 
633 				ni = ntfs_i(i);
634 
635 				mrec = ni->mi.mrec;
636 
637 				if (!is_rec_base(mrec))
638 					goto next;
639 
640 				if (mrec->hard_links)
641 					goto next;
642 
643 				if (!ni_std(ni))
644 					goto next;
645 
646 				if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 						 NULL, 0, NULL, NULL))
648 					goto next;
649 
650 				__clear_bit(ir - MFT_REC_RESERVED,
651 					    &sbi->mft.reserved_bitmap);
652 			}
653 		}
654 
655 		/* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 		zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 					  MFT_REC_FREE, MFT_REC_RESERVED);
658 		if (zbit >= MFT_REC_FREE) {
659 			sbi->mft.next_reserved = MFT_REC_FREE;
660 			goto out;
661 		}
662 
663 		zlen = 1;
664 		sbi->mft.next_reserved = zbit;
665 	} else {
666 reserve_mft:
667 		zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 		if (zbit + zlen > wnd->nbits)
669 			zlen = wnd->nbits - zbit;
670 
671 		while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
672 			zlen -= 1;
673 
674 		/* [zbit, zbit + zlen) will be used for MFT itself. */
675 		from = sbi->mft.used;
676 		if (from < zbit)
677 			from = zbit;
678 		to = zbit + zlen;
679 		if (from < to) {
680 			ntfs_clear_mft_tail(sbi, from, to);
681 			sbi->mft.used = to;
682 		}
683 	}
684 
685 	if (mft) {
686 		*rno = zbit;
687 		zbit += 1;
688 		zlen -= 1;
689 	}
690 
691 	wnd_zone_set(wnd, zbit, zlen);
692 
693 found:
694 	if (!mft) {
695 		/* The request to get record for general purpose. */
696 		if (sbi->mft.next_free < MFT_REC_USER)
697 			sbi->mft.next_free = MFT_REC_USER;
698 
699 		for (;;) {
700 			if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 			} else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 				sbi->mft.next_free = sbi->mft.bitmap.nbits;
703 			} else {
704 				*rno = fr;
705 				sbi->mft.next_free = *rno + 1;
706 				break;
707 			}
708 
709 			err = ntfs_extend_mft(sbi);
710 			if (err)
711 				goto out;
712 		}
713 	}
714 
715 	if (ni && !ni_add_subrecord(ni, *rno, mi)) {
716 		err = -ENOMEM;
717 		goto out;
718 	}
719 
720 	/* We have found a record that are not reserved for next MFT. */
721 	if (*rno >= MFT_REC_FREE)
722 		wnd_set_used(wnd, *rno, 1);
723 	else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 		__set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
725 
726 out:
727 	if (!mft)
728 		up_write(&wnd->rw_lock);
729 
730 	return err;
731 }
732 
733 /*
734  * ntfs_mark_rec_free - Mark record as free.
735  * is_mft - true if we are changing MFT
736  */
737 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 {
739 	struct wnd_bitmap *wnd = &sbi->mft.bitmap;
740 
741 	if (!is_mft)
742 		down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 	if (rno >= wnd->nbits)
744 		goto out;
745 
746 	if (rno >= MFT_REC_FREE) {
747 		if (!wnd_is_used(wnd, rno, 1))
748 			ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 		else
750 			wnd_set_free(wnd, rno, 1);
751 	} else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 		__clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
753 	}
754 
755 	if (rno < wnd_zone_bit(wnd))
756 		wnd_zone_set(wnd, rno, 1);
757 	else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 		sbi->mft.next_free = rno;
759 
760 out:
761 	if (!is_mft)
762 		up_write(&wnd->rw_lock);
763 }
764 
765 /*
766  * ntfs_clear_mft_tail - Format empty records [from, to).
767  *
768  * sbi->mft.bitmap is locked for write.
769  */
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
771 {
772 	int err;
773 	u32 rs;
774 	u64 vbo;
775 	struct runs_tree *run;
776 	struct ntfs_inode *ni;
777 
778 	if (from >= to)
779 		return 0;
780 
781 	rs = sbi->record_size;
782 	ni = sbi->mft.ni;
783 	run = &ni->file.run;
784 
785 	down_read(&ni->file.run_lock);
786 	vbo = (u64)from * rs;
787 	for (; from < to; from++, vbo += rs) {
788 		struct ntfs_buffers nb;
789 
790 		err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
791 		if (err)
792 			goto out;
793 
794 		err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
795 		nb_put(&nb);
796 		if (err)
797 			goto out;
798 	}
799 
800 out:
801 	sbi->mft.used = from;
802 	up_read(&ni->file.run_lock);
803 	return err;
804 }
805 
806 /*
807  * ntfs_refresh_zone - Refresh MFT zone.
808  *
809  * sbi->used.bitmap is locked for rw.
810  * sbi->mft.bitmap is locked for write.
811  * sbi->mft.ni->file.run_lock for write.
812  */
813 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
814 {
815 	CLST lcn, vcn, len;
816 	size_t lcn_s, zlen;
817 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
818 	struct ntfs_inode *ni = sbi->mft.ni;
819 
820 	/* Do not change anything unless we have non empty MFT zone. */
821 	if (wnd_zone_len(wnd))
822 		return 0;
823 
824 	vcn = bytes_to_cluster(sbi,
825 			       (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 
827 	if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
828 		lcn = SPARSE_LCN;
829 
830 	/* We should always find Last Lcn for MFT. */
831 	if (lcn == SPARSE_LCN)
832 		return -EINVAL;
833 
834 	lcn_s = lcn + 1;
835 
836 	/* Try to allocate clusters after last MFT run. */
837 	zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
838 	wnd_zone_set(wnd, lcn_s, zlen);
839 
840 	return 0;
841 }
842 
843 /*
844  * ntfs_update_mftmirr - Update $MFTMirr data.
845  */
846 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
847 {
848 	int err;
849 	struct super_block *sb = sbi->sb;
850 	u32 blocksize, bytes;
851 	sector_t block1, block2;
852 
853 	/*
854 	 * sb can be NULL here. In this case sbi->flags should be 0 too.
855 	 */
856 	if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
857 	    unlikely(ntfs3_forced_shutdown(sb)))
858 		return;
859 
860 	blocksize = sb->s_blocksize;
861 	bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 	block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 	block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
864 
865 	for (; bytes >= blocksize; bytes -= blocksize) {
866 		struct buffer_head *bh1, *bh2;
867 
868 		bh1 = sb_bread(sb, block1++);
869 		if (!bh1)
870 			return;
871 
872 		bh2 = sb_getblk(sb, block2++);
873 		if (!bh2) {
874 			put_bh(bh1);
875 			return;
876 		}
877 
878 		if (buffer_locked(bh2))
879 			__wait_on_buffer(bh2);
880 
881 		lock_buffer(bh2);
882 		memcpy(bh2->b_data, bh1->b_data, blocksize);
883 		set_buffer_uptodate(bh2);
884 		mark_buffer_dirty(bh2);
885 		unlock_buffer(bh2);
886 
887 		put_bh(bh1);
888 		bh1 = NULL;
889 
890 		err = wait ? sync_dirty_buffer(bh2) : 0;
891 
892 		put_bh(bh2);
893 		if (err)
894 			return;
895 	}
896 
897 	sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
898 }
899 
900 /*
901  * ntfs_bad_inode
902  *
903  * Marks inode as bad and marks fs as 'dirty'
904  */
905 void ntfs_bad_inode(struct inode *inode, const char *hint)
906 {
907 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
908 	struct ntfs_inode *ni = ntfs_i(inode);
909 
910 	ntfs_inode_err(inode, "%s", hint);
911 
912 	/* Do not call make_bad_inode()! */
913 	ni->ni_bad = true;
914 
915 	/* Avoid recursion if bad inode is $Volume. */
916 	if (inode->i_ino != MFT_REC_VOL &&
917 	    !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) {
918 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
919 	}
920 }
921 
922 /*
923  * ntfs_set_state
924  *
925  * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
926  * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
927  * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
928  */
929 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
930 {
931 	int err;
932 	struct ATTRIB *attr;
933 	struct VOLUME_INFO *info;
934 	struct mft_inode *mi;
935 	struct ntfs_inode *ni;
936 	__le16 info_flags;
937 
938 	/*
939 	 * Do not change state if fs was real_dirty.
940 	 * Do not change state if fs already dirty(clear).
941 	 * Do not change any thing if mounted read only.
942 	 */
943 	if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
944 		return 0;
945 
946 	/* Check cached value. */
947 	if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
948 	    (sbi->volume.flags & VOLUME_FLAG_DIRTY))
949 		return 0;
950 
951 	ni = sbi->volume.ni;
952 	if (!ni)
953 		return -EINVAL;
954 
955 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
956 
957 	attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
958 	if (!attr) {
959 		err = -EINVAL;
960 		goto out;
961 	}
962 
963 	info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
964 	if (!info) {
965 		err = -EINVAL;
966 		goto out;
967 	}
968 
969 	info_flags = info->flags;
970 
971 	switch (dirty) {
972 	case NTFS_DIRTY_ERROR:
973 		ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
974 		sbi->volume.real_dirty = true;
975 		fallthrough;
976 	case NTFS_DIRTY_DIRTY:
977 		info->flags |= VOLUME_FLAG_DIRTY;
978 		break;
979 	case NTFS_DIRTY_CLEAR:
980 		info->flags &= ~VOLUME_FLAG_DIRTY;
981 		break;
982 	}
983 	/* Cache current volume flags. */
984 	if (info_flags != info->flags) {
985 		sbi->volume.flags = info->flags;
986 		mi->dirty = true;
987 	}
988 	err = 0;
989 
990 out:
991 	ni_unlock(ni);
992 	if (err)
993 		return err;
994 
995 	mark_inode_dirty_sync(&ni->vfs_inode);
996 	/* verify(!ntfs_update_mftmirr()); */
997 
998 	/* write mft record on disk. */
999 	err = _ni_write_inode(&ni->vfs_inode, 1);
1000 
1001 	return err;
1002 }
1003 
1004 /*
1005  * security_hash - Calculates a hash of security descriptor.
1006  */
1007 static inline __le32 security_hash(const void *sd, size_t bytes)
1008 {
1009 	u32 hash = 0;
1010 	const __le32 *ptr = sd;
1011 
1012 	bytes >>= 2;
1013 	while (bytes--)
1014 		hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1015 	return cpu_to_le32(hash);
1016 }
1017 
1018 /*
1019  * simple wrapper for sb_bread_unmovable.
1020  */
1021 struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
1022 {
1023 	struct ntfs_sb_info *sbi = sb->s_fs_info;
1024 	struct buffer_head *bh;
1025 
1026 	if (unlikely(block >= sbi->volume.blocks)) {
1027 		/* prevent generic message "attempt to access beyond end of device" */
1028 		ntfs_err(sb, "try to read out of volume at offset 0x%llx",
1029 			 (u64)block << sb->s_blocksize_bits);
1030 		return NULL;
1031 	}
1032 
1033 	bh = sb_bread_unmovable(sb, block);
1034 	if (bh)
1035 		return bh;
1036 
1037 	ntfs_err(sb, "failed to read volume at offset 0x%llx",
1038 		 (u64)block << sb->s_blocksize_bits);
1039 	return NULL;
1040 }
1041 
1042 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1043 		  const void *buf, int wait)
1044 {
1045 	u32 blocksize = sb->s_blocksize;
1046 	struct block_device *bdev = sb->s_bdev;
1047 	sector_t block = lbo >> sb->s_blocksize_bits;
1048 	u32 off = lbo & (blocksize - 1);
1049 	u32 op = blocksize - off;
1050 	struct buffer_head *bh;
1051 
1052 	if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1053 		wait = 1;
1054 
1055 	for (; bytes; block += 1, off = 0, op = blocksize) {
1056 		if (op > bytes)
1057 			op = bytes;
1058 
1059 		if (op < blocksize) {
1060 			bh = __bread(bdev, block, blocksize);
1061 			if (!bh) {
1062 				ntfs_err(sb, "failed to read block %llx",
1063 					 (u64)block);
1064 				return -EIO;
1065 			}
1066 		} else {
1067 			bh = __getblk(bdev, block, blocksize);
1068 			if (!bh)
1069 				return -ENOMEM;
1070 		}
1071 
1072 		if (buffer_locked(bh))
1073 			__wait_on_buffer(bh);
1074 
1075 		lock_buffer(bh);
1076 		if (buf) {
1077 			memcpy(bh->b_data + off, buf, op);
1078 			buf = Add2Ptr(buf, op);
1079 		} else {
1080 			memset(bh->b_data + off, -1, op);
1081 		}
1082 
1083 		set_buffer_uptodate(bh);
1084 		mark_buffer_dirty(bh);
1085 		unlock_buffer(bh);
1086 
1087 		if (wait) {
1088 			int err = sync_dirty_buffer(bh);
1089 
1090 			if (err) {
1091 				ntfs_err(
1092 					sb,
1093 					"failed to sync buffer at block %llx, error %d",
1094 					(u64)block, err);
1095 				put_bh(bh);
1096 				return err;
1097 			}
1098 		}
1099 
1100 		put_bh(bh);
1101 
1102 		bytes -= op;
1103 	}
1104 	return 0;
1105 }
1106 
1107 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1108 		      u64 vbo, const void *buf, size_t bytes, int sync)
1109 {
1110 	struct super_block *sb = sbi->sb;
1111 	u8 cluster_bits = sbi->cluster_bits;
1112 	u32 off = vbo & sbi->cluster_mask;
1113 	CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1114 	u64 lbo, len;
1115 	size_t idx;
1116 
1117 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1118 		return -ENOENT;
1119 
1120 	if (lcn == SPARSE_LCN)
1121 		return -EINVAL;
1122 
1123 	lbo = ((u64)lcn << cluster_bits) + off;
1124 	len = ((u64)clen << cluster_bits) - off;
1125 
1126 	for (;;) {
1127 		u32 op = min_t(u64, len, bytes);
1128 		int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1129 
1130 		if (err)
1131 			return err;
1132 
1133 		bytes -= op;
1134 		if (!bytes)
1135 			break;
1136 
1137 		vcn_next = vcn + clen;
1138 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1139 		    vcn != vcn_next)
1140 			return -ENOENT;
1141 
1142 		if (lcn == SPARSE_LCN)
1143 			return -EINVAL;
1144 
1145 		if (buf)
1146 			buf = Add2Ptr(buf, op);
1147 
1148 		lbo = ((u64)lcn << cluster_bits);
1149 		len = ((u64)clen << cluster_bits);
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1156 				   const struct runs_tree *run, u64 vbo)
1157 {
1158 	struct super_block *sb = sbi->sb;
1159 	u8 cluster_bits = sbi->cluster_bits;
1160 	CLST lcn;
1161 	u64 lbo;
1162 
1163 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1164 		return ERR_PTR(-ENOENT);
1165 
1166 	lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1167 
1168 	return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1169 }
1170 
1171 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1172 		     u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1173 {
1174 	int err;
1175 	struct super_block *sb = sbi->sb;
1176 	u32 blocksize = sb->s_blocksize;
1177 	u8 cluster_bits = sbi->cluster_bits;
1178 	u32 off = vbo & sbi->cluster_mask;
1179 	u32 nbh = 0;
1180 	CLST vcn_next, vcn = vbo >> cluster_bits;
1181 	CLST lcn, clen;
1182 	u64 lbo, len;
1183 	size_t idx;
1184 	struct buffer_head *bh;
1185 
1186 	if (!run) {
1187 		/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1188 		if (vbo > MFT_REC_VOL * sbi->record_size) {
1189 			err = -ENOENT;
1190 			goto out;
1191 		}
1192 
1193 		/* Use absolute boot's 'MFTCluster' to read record. */
1194 		lbo = vbo + sbi->mft.lbo;
1195 		len = sbi->record_size;
1196 	} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1197 		err = -ENOENT;
1198 		goto out;
1199 	} else {
1200 		if (lcn == SPARSE_LCN) {
1201 			err = -EINVAL;
1202 			goto out;
1203 		}
1204 
1205 		lbo = ((u64)lcn << cluster_bits) + off;
1206 		len = ((u64)clen << cluster_bits) - off;
1207 	}
1208 
1209 	off = lbo & (blocksize - 1);
1210 	if (nb) {
1211 		nb->off = off;
1212 		nb->bytes = bytes;
1213 	}
1214 
1215 	for (;;) {
1216 		u32 len32 = len >= bytes ? bytes : len;
1217 		sector_t block = lbo >> sb->s_blocksize_bits;
1218 
1219 		do {
1220 			u32 op = blocksize - off;
1221 
1222 			if (op > len32)
1223 				op = len32;
1224 
1225 			bh = ntfs_bread(sb, block);
1226 			if (!bh) {
1227 				err = -EIO;
1228 				goto out;
1229 			}
1230 
1231 			if (buf) {
1232 				memcpy(buf, bh->b_data + off, op);
1233 				buf = Add2Ptr(buf, op);
1234 			}
1235 
1236 			if (!nb) {
1237 				put_bh(bh);
1238 			} else if (nbh >= ARRAY_SIZE(nb->bh)) {
1239 				err = -EINVAL;
1240 				goto out;
1241 			} else {
1242 				nb->bh[nbh++] = bh;
1243 				nb->nbufs = nbh;
1244 			}
1245 
1246 			bytes -= op;
1247 			if (!bytes)
1248 				return 0;
1249 			len32 -= op;
1250 			block += 1;
1251 			off = 0;
1252 
1253 		} while (len32);
1254 
1255 		vcn_next = vcn + clen;
1256 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1257 		    vcn != vcn_next) {
1258 			err = -ENOENT;
1259 			goto out;
1260 		}
1261 
1262 		if (lcn == SPARSE_LCN) {
1263 			err = -EINVAL;
1264 			goto out;
1265 		}
1266 
1267 		lbo = ((u64)lcn << cluster_bits);
1268 		len = ((u64)clen << cluster_bits);
1269 	}
1270 
1271 out:
1272 	if (!nbh)
1273 		return err;
1274 
1275 	while (nbh) {
1276 		put_bh(nb->bh[--nbh]);
1277 		nb->bh[nbh] = NULL;
1278 	}
1279 
1280 	nb->nbufs = 0;
1281 	return err;
1282 }
1283 
1284 /*
1285  * ntfs_read_bh
1286  *
1287  * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1288  */
1289 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1290 		 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1291 		 struct ntfs_buffers *nb)
1292 {
1293 	int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1294 
1295 	if (err)
1296 		return err;
1297 	return ntfs_fix_post_read(rhdr, nb->bytes, true);
1298 }
1299 
1300 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1301 		u32 bytes, struct ntfs_buffers *nb)
1302 {
1303 	int err = 0;
1304 	struct super_block *sb = sbi->sb;
1305 	u32 blocksize = sb->s_blocksize;
1306 	u8 cluster_bits = sbi->cluster_bits;
1307 	CLST vcn_next, vcn = vbo >> cluster_bits;
1308 	u32 off;
1309 	u32 nbh = 0;
1310 	CLST lcn, clen;
1311 	u64 lbo, len;
1312 	size_t idx;
1313 
1314 	nb->bytes = bytes;
1315 
1316 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1317 		err = -ENOENT;
1318 		goto out;
1319 	}
1320 
1321 	off = vbo & sbi->cluster_mask;
1322 	lbo = ((u64)lcn << cluster_bits) + off;
1323 	len = ((u64)clen << cluster_bits) - off;
1324 
1325 	nb->off = off = lbo & (blocksize - 1);
1326 
1327 	for (;;) {
1328 		u32 len32 = min_t(u64, len, bytes);
1329 		sector_t block = lbo >> sb->s_blocksize_bits;
1330 
1331 		do {
1332 			u32 op;
1333 			struct buffer_head *bh;
1334 
1335 			if (nbh >= ARRAY_SIZE(nb->bh)) {
1336 				err = -EINVAL;
1337 				goto out;
1338 			}
1339 
1340 			op = blocksize - off;
1341 			if (op > len32)
1342 				op = len32;
1343 
1344 			if (op == blocksize) {
1345 				bh = sb_getblk(sb, block);
1346 				if (!bh) {
1347 					err = -ENOMEM;
1348 					goto out;
1349 				}
1350 				if (buffer_locked(bh))
1351 					__wait_on_buffer(bh);
1352 
1353 				lock_buffer(bh);
1354 				if (!buffer_uptodate(bh))
1355 				{
1356 					memset(bh->b_data, 0, blocksize);
1357 					set_buffer_uptodate(bh);
1358 				}
1359 				unlock_buffer(bh);
1360 			} else {
1361 				bh = ntfs_bread(sb, block);
1362 				if (!bh) {
1363 					err = -EIO;
1364 					goto out;
1365 				}
1366 			}
1367 
1368 			nb->bh[nbh++] = bh;
1369 			bytes -= op;
1370 			if (!bytes) {
1371 				nb->nbufs = nbh;
1372 				return 0;
1373 			}
1374 
1375 			block += 1;
1376 			len32 -= op;
1377 			off = 0;
1378 		} while (len32);
1379 
1380 		vcn_next = vcn + clen;
1381 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1382 		    vcn != vcn_next) {
1383 			err = -ENOENT;
1384 			goto out;
1385 		}
1386 
1387 		lbo = ((u64)lcn << cluster_bits);
1388 		len = ((u64)clen << cluster_bits);
1389 	}
1390 
1391 out:
1392 	while (nbh) {
1393 		put_bh(nb->bh[--nbh]);
1394 		nb->bh[nbh] = NULL;
1395 	}
1396 
1397 	nb->nbufs = 0;
1398 
1399 	return err;
1400 }
1401 
1402 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1403 		  struct ntfs_buffers *nb, int sync)
1404 {
1405 	int err = 0;
1406 	struct super_block *sb = sbi->sb;
1407 	u32 block_size = sb->s_blocksize;
1408 	u32 bytes = nb->bytes;
1409 	u32 off = nb->off;
1410 	u16 fo = le16_to_cpu(rhdr->fix_off);
1411 	u16 fn = le16_to_cpu(rhdr->fix_num);
1412 	u32 idx;
1413 	__le16 *fixup;
1414 	__le16 sample;
1415 
1416 	if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1417 	    fn * SECTOR_SIZE > bytes) {
1418 		return -EINVAL;
1419 	}
1420 
1421 	for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1422 		u32 op = block_size - off;
1423 		char *bh_data;
1424 		struct buffer_head *bh = nb->bh[idx];
1425 		__le16 *ptr, *end_data;
1426 
1427 		if (op > bytes)
1428 			op = bytes;
1429 
1430 		if (buffer_locked(bh))
1431 			__wait_on_buffer(bh);
1432 
1433 		lock_buffer(bh);
1434 
1435 		bh_data = bh->b_data + off;
1436 		end_data = Add2Ptr(bh_data, op);
1437 		memcpy(bh_data, rhdr, op);
1438 
1439 		if (!idx) {
1440 			u16 t16;
1441 
1442 			fixup = Add2Ptr(bh_data, fo);
1443 			sample = *fixup;
1444 			t16 = le16_to_cpu(sample);
1445 			if (t16 >= 0x7FFF) {
1446 				sample = *fixup = cpu_to_le16(1);
1447 			} else {
1448 				sample = cpu_to_le16(t16 + 1);
1449 				*fixup = sample;
1450 			}
1451 
1452 			*(__le16 *)Add2Ptr(rhdr, fo) = sample;
1453 		}
1454 
1455 		ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1456 
1457 		do {
1458 			*++fixup = *ptr;
1459 			*ptr = sample;
1460 			ptr += SECTOR_SIZE / sizeof(short);
1461 		} while (ptr < end_data);
1462 
1463 		set_buffer_uptodate(bh);
1464 		mark_buffer_dirty(bh);
1465 		unlock_buffer(bh);
1466 
1467 		if (sync) {
1468 			int err2 = sync_dirty_buffer(bh);
1469 
1470 			if (!err && err2)
1471 				err = err2;
1472 		}
1473 
1474 		bytes -= op;
1475 		rhdr = Add2Ptr(rhdr, op);
1476 	}
1477 
1478 	return err;
1479 }
1480 
1481 /*
1482  * ntfs_read_write_run - Read/Write disk's page cache.
1483  */
1484 int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1485 			void *buf, u64 vbo, size_t bytes, int wr)
1486 {
1487 	struct super_block *sb = sbi->sb;
1488 	struct address_space *mapping = sb->s_bdev->bd_mapping;
1489 	u8 cluster_bits = sbi->cluster_bits;
1490 	CLST vcn_next, vcn = vbo >> cluster_bits;
1491 	CLST lcn, clen;
1492 	u64 lbo, len;
1493 	size_t idx;
1494 	u32 off, op;
1495 	struct folio *folio;
1496 	char *kaddr;
1497 
1498 	if (!bytes)
1499 		return 0;
1500 
1501 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1502 		return -ENOENT;
1503 
1504 	if (lcn == SPARSE_LCN)
1505 		return -EINVAL;
1506 
1507 	off = vbo & sbi->cluster_mask;
1508 	lbo = ((u64)lcn << cluster_bits) + off;
1509 	len = ((u64)clen << cluster_bits) - off;
1510 
1511 	for (;;) {
1512 		/* Read range [lbo, lbo+len). */
1513 		folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
1514 
1515 		if (IS_ERR(folio))
1516 			return PTR_ERR(folio);
1517 
1518 		off = offset_in_page(lbo);
1519 		op = PAGE_SIZE - off;
1520 
1521 		if (op > len)
1522 			op = len;
1523 		if (op > bytes)
1524 			op = bytes;
1525 
1526 		kaddr = kmap_local_folio(folio, 0);
1527 		if (wr) {
1528 			memcpy(kaddr + off, buf, op);
1529 			folio_mark_dirty(folio);
1530 		} else {
1531 			memcpy(buf, kaddr + off, op);
1532 			flush_dcache_folio(folio);
1533 		}
1534 		kunmap_local(kaddr);
1535 		folio_put(folio);
1536 
1537 		bytes -= op;
1538 		if (!bytes)
1539 			return 0;
1540 
1541 		buf += op;
1542 		len -= op;
1543 		if (len) {
1544 			/* next volume's page. */
1545 			lbo += op;
1546 			continue;
1547 		}
1548 
1549 		/* get next range. */
1550 		vcn_next = vcn + clen;
1551 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1552 		    vcn != vcn_next) {
1553 			return -ENOENT;
1554 		}
1555 
1556 		if (lcn == SPARSE_LCN)
1557 			return -EINVAL;
1558 
1559 		lbo = ((u64)lcn << cluster_bits);
1560 		len = ((u64)clen << cluster_bits);
1561 	}
1562 }
1563 
1564 /*
1565  * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1566  *
1567  * Fill on-disk logfile range by (-1)
1568  * this means empty logfile.
1569  */
1570 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1571 {
1572 	int err = 0;
1573 	struct super_block *sb = sbi->sb;
1574 	struct block_device *bdev = sb->s_bdev;
1575 	u8 cluster_bits = sbi->cluster_bits;
1576 	struct bio *new, *bio = NULL;
1577 	CLST lcn, clen;
1578 	u64 lbo, len;
1579 	size_t run_idx;
1580 	struct page *fill;
1581 	void *kaddr;
1582 	struct blk_plug plug;
1583 
1584 	fill = alloc_page(GFP_KERNEL);
1585 	if (!fill)
1586 		return -ENOMEM;
1587 
1588 	kaddr = kmap_atomic(fill);
1589 	memset(kaddr, -1, PAGE_SIZE);
1590 	kunmap_atomic(kaddr);
1591 	flush_dcache_page(fill);
1592 	lock_page(fill);
1593 
1594 	if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1595 		err = -ENOENT;
1596 		goto out;
1597 	}
1598 
1599 	/*
1600 	 * TODO: Try blkdev_issue_write_same.
1601 	 */
1602 	blk_start_plug(&plug);
1603 	do {
1604 		lbo = (u64)lcn << cluster_bits;
1605 		len = (u64)clen << cluster_bits;
1606 new_bio:
1607 		new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1608 		if (bio) {
1609 			bio_chain(bio, new);
1610 			submit_bio(bio);
1611 		}
1612 		bio = new;
1613 		bio->bi_iter.bi_sector = lbo >> 9;
1614 
1615 		for (;;) {
1616 			u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1617 
1618 			if (bio_add_page(bio, fill, add, 0) < add)
1619 				goto new_bio;
1620 
1621 			lbo += add;
1622 			if (len <= add)
1623 				break;
1624 			len -= add;
1625 		}
1626 	} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1627 
1628 	if (!err)
1629 		err = submit_bio_wait(bio);
1630 	bio_put(bio);
1631 
1632 	blk_finish_plug(&plug);
1633 out:
1634 	unlock_page(fill);
1635 	put_page(fill);
1636 
1637 	return err;
1638 }
1639 
1640 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1641 		    u64 vbo, u64 *lbo, u64 *bytes)
1642 {
1643 	u32 off;
1644 	CLST lcn, len;
1645 	u8 cluster_bits = sbi->cluster_bits;
1646 
1647 	if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1648 		return -ENOENT;
1649 
1650 	off = vbo & sbi->cluster_mask;
1651 	*lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1652 	*bytes = ((u64)len << cluster_bits) - off;
1653 
1654 	return 0;
1655 }
1656 
1657 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
1658 				  enum RECORD_FLAG flag)
1659 {
1660 	int err = 0;
1661 	struct super_block *sb = sbi->sb;
1662 	struct inode *inode = new_inode(sb);
1663 	struct ntfs_inode *ni;
1664 
1665 	if (!inode)
1666 		return ERR_PTR(-ENOMEM);
1667 
1668 	ni = ntfs_i(inode);
1669 
1670 	err = mi_format_new(&ni->mi, sbi, rno, flag, false);
1671 	if (err)
1672 		goto out;
1673 
1674 	inode->i_ino = rno;
1675 	if (insert_inode_locked(inode) < 0) {
1676 		err = -EIO;
1677 		goto out;
1678 	}
1679 
1680 out:
1681 	if (err) {
1682 		make_bad_inode(inode);
1683 		iput(inode);
1684 		ni = ERR_PTR(err);
1685 	}
1686 	return ni;
1687 }
1688 
1689 /*
1690  * O:BAG:BAD:(A;OICI;FA;;;WD)
1691  * Owner S-1-5-32-544 (Administrators)
1692  * Group S-1-5-32-544 (Administrators)
1693  * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1694  */
1695 const u8 s_default_security[] __aligned(8) = {
1696 	0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1697 	0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1698 	0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1699 	0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1700 	0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1701 	0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1702 	0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1703 };
1704 
1705 static_assert(sizeof(s_default_security) == 0x50);
1706 
1707 static inline u32 sid_length(const struct SID *sid)
1708 {
1709 	return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1710 }
1711 
1712 /*
1713  * is_acl_valid
1714  *
1715  * Thanks Mark Harmstone for idea.
1716  */
1717 static bool is_acl_valid(const struct ACL *acl, u32 len)
1718 {
1719 	const struct ACE_HEADER *ace;
1720 	u32 i;
1721 	u16 ace_count, ace_size;
1722 
1723 	if (acl->AclRevision != ACL_REVISION &&
1724 	    acl->AclRevision != ACL_REVISION_DS) {
1725 		/*
1726 		 * This value should be ACL_REVISION, unless the ACL contains an
1727 		 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1728 		 * All ACEs in an ACL must be at the same revision level.
1729 		 */
1730 		return false;
1731 	}
1732 
1733 	if (acl->Sbz1)
1734 		return false;
1735 
1736 	if (le16_to_cpu(acl->AclSize) > len)
1737 		return false;
1738 
1739 	if (acl->Sbz2)
1740 		return false;
1741 
1742 	len -= sizeof(struct ACL);
1743 	ace = (struct ACE_HEADER *)&acl[1];
1744 	ace_count = le16_to_cpu(acl->AceCount);
1745 
1746 	for (i = 0; i < ace_count; i++) {
1747 		if (len < sizeof(struct ACE_HEADER))
1748 			return false;
1749 
1750 		ace_size = le16_to_cpu(ace->AceSize);
1751 		if (len < ace_size)
1752 			return false;
1753 
1754 		len -= ace_size;
1755 		ace = Add2Ptr(ace, ace_size);
1756 	}
1757 
1758 	return true;
1759 }
1760 
1761 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1762 {
1763 	u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1764 
1765 	if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1766 		return false;
1767 
1768 	if (sd->Revision != 1)
1769 		return false;
1770 
1771 	if (sd->Sbz1)
1772 		return false;
1773 
1774 	if (!(sd->Control & SE_SELF_RELATIVE))
1775 		return false;
1776 
1777 	sd_owner = le32_to_cpu(sd->Owner);
1778 	if (sd_owner) {
1779 		const struct SID *owner = Add2Ptr(sd, sd_owner);
1780 
1781 		if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1782 			return false;
1783 
1784 		if (owner->Revision != 1)
1785 			return false;
1786 
1787 		if (sd_owner + sid_length(owner) > len)
1788 			return false;
1789 	}
1790 
1791 	sd_group = le32_to_cpu(sd->Group);
1792 	if (sd_group) {
1793 		const struct SID *group = Add2Ptr(sd, sd_group);
1794 
1795 		if (sd_group + offsetof(struct SID, SubAuthority) > len)
1796 			return false;
1797 
1798 		if (group->Revision != 1)
1799 			return false;
1800 
1801 		if (sd_group + sid_length(group) > len)
1802 			return false;
1803 	}
1804 
1805 	sd_sacl = le32_to_cpu(sd->Sacl);
1806 	if (sd_sacl) {
1807 		const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1808 
1809 		if (sd_sacl + sizeof(struct ACL) > len)
1810 			return false;
1811 
1812 		if (!is_acl_valid(sacl, len - sd_sacl))
1813 			return false;
1814 	}
1815 
1816 	sd_dacl = le32_to_cpu(sd->Dacl);
1817 	if (sd_dacl) {
1818 		const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1819 
1820 		if (sd_dacl + sizeof(struct ACL) > len)
1821 			return false;
1822 
1823 		if (!is_acl_valid(dacl, len - sd_dacl))
1824 			return false;
1825 	}
1826 
1827 	return true;
1828 }
1829 
1830 /*
1831  * ntfs_security_init - Load and parse $Secure.
1832  */
1833 int ntfs_security_init(struct ntfs_sb_info *sbi)
1834 {
1835 	int err;
1836 	struct super_block *sb = sbi->sb;
1837 	struct inode *inode;
1838 	struct ntfs_inode *ni;
1839 	struct MFT_REF ref;
1840 	struct ATTRIB *attr;
1841 	struct ATTR_LIST_ENTRY *le;
1842 	u64 sds_size;
1843 	size_t off;
1844 	struct NTFS_DE *ne;
1845 	struct NTFS_DE_SII *sii_e;
1846 	struct ntfs_fnd *fnd_sii = NULL;
1847 	const struct INDEX_ROOT *root_sii;
1848 	const struct INDEX_ROOT *root_sdh;
1849 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1850 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
1851 
1852 	ref.low = cpu_to_le32(MFT_REC_SECURE);
1853 	ref.high = 0;
1854 	ref.seq = cpu_to_le16(MFT_REC_SECURE);
1855 
1856 	inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1857 	if (IS_ERR(inode)) {
1858 		err = PTR_ERR(inode);
1859 		ntfs_err(sb, "Failed to load $Secure (%d).", err);
1860 		inode = NULL;
1861 		goto out;
1862 	}
1863 
1864 	ni = ntfs_i(inode);
1865 
1866 	le = NULL;
1867 
1868 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1869 			    ARRAY_SIZE(SDH_NAME), NULL, NULL);
1870 	if (!attr ||
1871 	    !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1872 	    root_sdh->type != ATTR_ZERO ||
1873 	    root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1874 	    offsetof(struct INDEX_ROOT, ihdr) +
1875 			    le32_to_cpu(root_sdh->ihdr.used) >
1876 		    le32_to_cpu(attr->res.data_size)) {
1877 		ntfs_err(sb, "$Secure::$SDH is corrupted.");
1878 		err = -EINVAL;
1879 		goto out;
1880 	}
1881 
1882 	err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1883 	if (err) {
1884 		ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err);
1885 		goto out;
1886 	}
1887 
1888 	attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1889 			    ARRAY_SIZE(SII_NAME), NULL, NULL);
1890 	if (!attr ||
1891 	    !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) ||
1892 	    root_sii->type != ATTR_ZERO ||
1893 	    root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1894 	    offsetof(struct INDEX_ROOT, ihdr) +
1895 			    le32_to_cpu(root_sii->ihdr.used) >
1896 		    le32_to_cpu(attr->res.data_size)) {
1897 		ntfs_err(sb, "$Secure::$SII is corrupted.");
1898 		err = -EINVAL;
1899 		goto out;
1900 	}
1901 
1902 	err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1903 	if (err) {
1904 		ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err);
1905 		goto out;
1906 	}
1907 
1908 	fnd_sii = fnd_get();
1909 	if (!fnd_sii) {
1910 		err = -ENOMEM;
1911 		goto out;
1912 	}
1913 
1914 	sds_size = inode->i_size;
1915 
1916 	/* Find the last valid Id. */
1917 	sbi->security.next_id = SECURITY_ID_FIRST;
1918 	/* Always write new security at the end of bucket. */
1919 	sbi->security.next_off =
1920 		ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1921 
1922 	off = 0;
1923 	ne = NULL;
1924 
1925 	for (;;) {
1926 		u32 next_id;
1927 
1928 		err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1929 		if (err || !ne)
1930 			break;
1931 
1932 		sii_e = (struct NTFS_DE_SII *)ne;
1933 		if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
1934 			continue;
1935 
1936 		next_id = le32_to_cpu(sii_e->sec_id) + 1;
1937 		if (next_id >= sbi->security.next_id)
1938 			sbi->security.next_id = next_id;
1939 	}
1940 
1941 	sbi->security.ni = ni;
1942 	inode = NULL;
1943 out:
1944 	iput(inode);
1945 	fnd_put(fnd_sii);
1946 
1947 	return err;
1948 }
1949 
1950 /*
1951  * ntfs_get_security_by_id - Read security descriptor by id.
1952  */
1953 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1954 			    struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1955 			    size_t *size)
1956 {
1957 	int err;
1958 	int diff;
1959 	struct ntfs_inode *ni = sbi->security.ni;
1960 	struct ntfs_index *indx = &sbi->security.index_sii;
1961 	void *p = NULL;
1962 	struct NTFS_DE_SII *sii_e;
1963 	struct ntfs_fnd *fnd_sii;
1964 	struct SECURITY_HDR d_security;
1965 	const struct INDEX_ROOT *root_sii;
1966 	u32 t32;
1967 
1968 	*sd = NULL;
1969 
1970 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1971 
1972 	fnd_sii = fnd_get();
1973 	if (!fnd_sii) {
1974 		err = -ENOMEM;
1975 		goto out;
1976 	}
1977 
1978 	root_sii = indx_get_root(indx, ni, NULL, NULL);
1979 	if (!root_sii) {
1980 		err = -EINVAL;
1981 		goto out;
1982 	}
1983 
1984 	/* Try to find this SECURITY descriptor in SII indexes. */
1985 	err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1986 			NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1987 	if (err)
1988 		goto out;
1989 
1990 	if (diff)
1991 		goto out;
1992 
1993 	t32 = le32_to_cpu(sii_e->sec_hdr.size);
1994 	if (t32 < sizeof(struct SECURITY_HDR)) {
1995 		err = -EINVAL;
1996 		goto out;
1997 	}
1998 
1999 	if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
2000 		/* Looks like too big security. 0x10000 - is arbitrary big number. */
2001 		err = -EFBIG;
2002 		goto out;
2003 	}
2004 
2005 	*size = t32 - sizeof(struct SECURITY_HDR);
2006 
2007 	p = kmalloc(*size, GFP_NOFS);
2008 	if (!p) {
2009 		err = -ENOMEM;
2010 		goto out;
2011 	}
2012 
2013 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2014 			       le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2015 			       sizeof(d_security), NULL);
2016 	if (err)
2017 		goto out;
2018 
2019 	if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
2020 		err = -EINVAL;
2021 		goto out;
2022 	}
2023 
2024 	err = ntfs_read_run_nb(sbi, &ni->file.run,
2025 			       le64_to_cpu(sii_e->sec_hdr.off) +
2026 				       sizeof(struct SECURITY_HDR),
2027 			       p, *size, NULL);
2028 	if (err)
2029 		goto out;
2030 
2031 	*sd = p;
2032 	p = NULL;
2033 
2034 out:
2035 	kfree(p);
2036 	fnd_put(fnd_sii);
2037 	ni_unlock(ni);
2038 
2039 	return err;
2040 }
2041 
2042 /*
2043  * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2044  *
2045  * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2046  * and it contains a mirror copy of each security descriptor.  When writing
2047  * to a security descriptor at location X, another copy will be written at
2048  * location (X+256K).
2049  * When writing a security descriptor that will cross the 256K boundary,
2050  * the pointer will be advanced by 256K to skip
2051  * over the mirror portion.
2052  */
2053 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2054 			 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2055 			 u32 size_sd, __le32 *security_id, bool *inserted)
2056 {
2057 	int err, diff;
2058 	struct ntfs_inode *ni = sbi->security.ni;
2059 	struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2060 	struct ntfs_index *indx_sii = &sbi->security.index_sii;
2061 	struct NTFS_DE_SDH *e;
2062 	struct NTFS_DE_SDH sdh_e;
2063 	struct NTFS_DE_SII sii_e;
2064 	struct SECURITY_HDR *d_security;
2065 	u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
2066 	u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2067 	struct SECURITY_KEY hash_key;
2068 	struct ntfs_fnd *fnd_sdh = NULL;
2069 	const struct INDEX_ROOT *root_sdh;
2070 	const struct INDEX_ROOT *root_sii;
2071 	u64 mirr_off, new_sds_size;
2072 	u32 next, left;
2073 
2074 	static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2075 		      SecurityDescriptorsBlockSize);
2076 
2077 	hash_key.hash = security_hash(sd, size_sd);
2078 	hash_key.sec_id = SECURITY_ID_INVALID;
2079 
2080 	if (inserted)
2081 		*inserted = false;
2082 	*security_id = SECURITY_ID_INVALID;
2083 
2084 	/* Allocate a temporal buffer. */
2085 	d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2086 	if (!d_security)
2087 		return -ENOMEM;
2088 
2089 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2090 
2091 	fnd_sdh = fnd_get();
2092 	if (!fnd_sdh) {
2093 		err = -ENOMEM;
2094 		goto out;
2095 	}
2096 
2097 	root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2098 	if (!root_sdh) {
2099 		err = -EINVAL;
2100 		goto out;
2101 	}
2102 
2103 	root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2104 	if (!root_sii) {
2105 		err = -EINVAL;
2106 		goto out;
2107 	}
2108 
2109 	/*
2110 	 * Check if such security already exists.
2111 	 * Use "SDH" and hash -> to get the offset in "SDS".
2112 	 */
2113 	err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2114 			&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2115 			fnd_sdh);
2116 	if (err)
2117 		goto out;
2118 
2119 	while (e) {
2120 		if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2121 			err = ntfs_read_run_nb(sbi, &ni->file.run,
2122 					       le64_to_cpu(e->sec_hdr.off),
2123 					       d_security, new_sec_size, NULL);
2124 			if (err)
2125 				goto out;
2126 
2127 			if (le32_to_cpu(d_security->size) == new_sec_size &&
2128 			    d_security->key.hash == hash_key.hash &&
2129 			    !memcmp(d_security + 1, sd, size_sd)) {
2130 				/* Such security already exists. */
2131 				*security_id = d_security->key.sec_id;
2132 				err = 0;
2133 				goto out;
2134 			}
2135 		}
2136 
2137 		err = indx_find_sort(indx_sdh, ni, root_sdh,
2138 				     (struct NTFS_DE **)&e, fnd_sdh);
2139 		if (err)
2140 			goto out;
2141 
2142 		if (!e || e->key.hash != hash_key.hash)
2143 			break;
2144 	}
2145 
2146 	/* Zero unused space. */
2147 	next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2148 	left = SecurityDescriptorsBlockSize - next;
2149 
2150 	/* Zero gap until SecurityDescriptorsBlockSize. */
2151 	if (left < new_sec_size) {
2152 		/* Zero "left" bytes from sbi->security.next_off. */
2153 		sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2154 	}
2155 
2156 	/* Zero tail of previous security. */
2157 	//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2158 
2159 	/*
2160 	 * Example:
2161 	 * 0x40438 == ni->vfs_inode.i_size
2162 	 * 0x00440 == sbi->security.next_off
2163 	 * need to zero [0x438-0x440)
2164 	 * if (next > used) {
2165 	 *  u32 tozero = next - used;
2166 	 *  zero "tozero" bytes from sbi->security.next_off - tozero
2167 	 */
2168 
2169 	/* Format new security descriptor. */
2170 	d_security->key.hash = hash_key.hash;
2171 	d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2172 	d_security->off = cpu_to_le64(sbi->security.next_off);
2173 	d_security->size = cpu_to_le32(new_sec_size);
2174 	memcpy(d_security + 1, sd, size_sd);
2175 
2176 	/* Write main SDS bucket. */
2177 	err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2178 				d_security, aligned_sec_size, 0);
2179 
2180 	if (err)
2181 		goto out;
2182 
2183 	mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2184 	new_sds_size = mirr_off + aligned_sec_size;
2185 
2186 	if (new_sds_size > ni->vfs_inode.i_size) {
2187 		err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2188 				    ARRAY_SIZE(SDS_NAME), &ni->file.run,
2189 				    new_sds_size, &new_sds_size, false, NULL);
2190 		if (err)
2191 			goto out;
2192 	}
2193 
2194 	/* Write copy SDS bucket. */
2195 	err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2196 				aligned_sec_size, 0);
2197 	if (err)
2198 		goto out;
2199 
2200 	/* Fill SII entry. */
2201 	sii_e.de.view.data_off =
2202 		cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2203 	sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2204 	sii_e.de.view.res = 0;
2205 	sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
2206 	sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2207 	sii_e.de.flags = 0;
2208 	sii_e.de.res = 0;
2209 	sii_e.sec_id = d_security->key.sec_id;
2210 	memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2211 
2212 	err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2213 	if (err)
2214 		goto out;
2215 
2216 	/* Fill SDH entry. */
2217 	sdh_e.de.view.data_off =
2218 		cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2219 	sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
2220 	sdh_e.de.view.res = 0;
2221 	sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2222 	sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2223 	sdh_e.de.flags = 0;
2224 	sdh_e.de.res = 0;
2225 	sdh_e.key.hash = d_security->key.hash;
2226 	sdh_e.key.sec_id = d_security->key.sec_id;
2227 	memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
2228 	sdh_e.magic[0] = cpu_to_le16('I');
2229 	sdh_e.magic[1] = cpu_to_le16('I');
2230 
2231 	fnd_clear(fnd_sdh);
2232 	err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2233 				fnd_sdh, 0);
2234 	if (err)
2235 		goto out;
2236 
2237 	*security_id = d_security->key.sec_id;
2238 	if (inserted)
2239 		*inserted = true;
2240 
2241 	/* Update Id and offset for next descriptor. */
2242 	sbi->security.next_id += 1;
2243 	sbi->security.next_off += aligned_sec_size;
2244 
2245 out:
2246 	fnd_put(fnd_sdh);
2247 	mark_inode_dirty(&ni->vfs_inode);
2248 	ni_unlock(ni);
2249 	kfree(d_security);
2250 
2251 	return err;
2252 }
2253 
2254 /*
2255  * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2256  */
2257 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2258 {
2259 	int err;
2260 	struct ntfs_inode *ni = sbi->reparse.ni;
2261 	struct ntfs_index *indx = &sbi->reparse.index_r;
2262 	struct ATTRIB *attr;
2263 	struct ATTR_LIST_ENTRY *le;
2264 	const struct INDEX_ROOT *root_r;
2265 
2266 	if (!ni)
2267 		return 0;
2268 
2269 	le = NULL;
2270 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2271 			    ARRAY_SIZE(SR_NAME), NULL, NULL);
2272 	if (!attr) {
2273 		err = -EINVAL;
2274 		goto out;
2275 	}
2276 
2277 	root_r = resident_data(attr);
2278 	if (root_r->type != ATTR_ZERO ||
2279 	    root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2280 		err = -EINVAL;
2281 		goto out;
2282 	}
2283 
2284 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2285 	if (err)
2286 		goto out;
2287 
2288 out:
2289 	return err;
2290 }
2291 
2292 /*
2293  * ntfs_objid_init - Load and parse $Extend/$ObjId.
2294  */
2295 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2296 {
2297 	int err;
2298 	struct ntfs_inode *ni = sbi->objid.ni;
2299 	struct ntfs_index *indx = &sbi->objid.index_o;
2300 	struct ATTRIB *attr;
2301 	struct ATTR_LIST_ENTRY *le;
2302 	const struct INDEX_ROOT *root;
2303 
2304 	if (!ni)
2305 		return 0;
2306 
2307 	le = NULL;
2308 	attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2309 			    ARRAY_SIZE(SO_NAME), NULL, NULL);
2310 	if (!attr) {
2311 		err = -EINVAL;
2312 		goto out;
2313 	}
2314 
2315 	root = resident_data(attr);
2316 	if (root->type != ATTR_ZERO ||
2317 	    root->rule != NTFS_COLLATION_TYPE_UINTS) {
2318 		err = -EINVAL;
2319 		goto out;
2320 	}
2321 
2322 	err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2323 	if (err)
2324 		goto out;
2325 
2326 out:
2327 	return err;
2328 }
2329 
2330 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2331 {
2332 	int err;
2333 	struct ntfs_inode *ni = sbi->objid.ni;
2334 	struct ntfs_index *indx = &sbi->objid.index_o;
2335 
2336 	if (!ni)
2337 		return -EINVAL;
2338 
2339 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2340 
2341 	err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2342 
2343 	mark_inode_dirty(&ni->vfs_inode);
2344 	ni_unlock(ni);
2345 
2346 	return err;
2347 }
2348 
2349 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2350 			const struct MFT_REF *ref)
2351 {
2352 	int err;
2353 	struct ntfs_inode *ni = sbi->reparse.ni;
2354 	struct ntfs_index *indx = &sbi->reparse.index_r;
2355 	struct NTFS_DE_R re;
2356 
2357 	if (!ni)
2358 		return -EINVAL;
2359 
2360 	memset(&re, 0, sizeof(re));
2361 
2362 	re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2363 	re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2364 	re.de.key_size = cpu_to_le16(sizeof(re.key));
2365 
2366 	re.key.ReparseTag = rtag;
2367 	memcpy(&re.key.ref, ref, sizeof(*ref));
2368 
2369 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2370 
2371 	err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2372 
2373 	mark_inode_dirty(&ni->vfs_inode);
2374 	ni_unlock(ni);
2375 
2376 	return err;
2377 }
2378 
2379 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2380 			const struct MFT_REF *ref)
2381 {
2382 	int err, diff;
2383 	struct ntfs_inode *ni = sbi->reparse.ni;
2384 	struct ntfs_index *indx = &sbi->reparse.index_r;
2385 	struct ntfs_fnd *fnd = NULL;
2386 	struct REPARSE_KEY rkey;
2387 	struct NTFS_DE_R *re;
2388 	struct INDEX_ROOT *root_r;
2389 
2390 	if (!ni)
2391 		return -EINVAL;
2392 
2393 	rkey.ReparseTag = rtag;
2394 	rkey.ref = *ref;
2395 
2396 	mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2397 
2398 	if (rtag) {
2399 		err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2400 		goto out1;
2401 	}
2402 
2403 	fnd = fnd_get();
2404 	if (!fnd) {
2405 		err = -ENOMEM;
2406 		goto out1;
2407 	}
2408 
2409 	root_r = indx_get_root(indx, ni, NULL, NULL);
2410 	if (!root_r) {
2411 		err = -EINVAL;
2412 		goto out;
2413 	}
2414 
2415 	/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2416 	err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2417 			(struct NTFS_DE **)&re, fnd);
2418 	if (err)
2419 		goto out;
2420 
2421 	if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2422 		/* Impossible. Looks like volume corrupt? */
2423 		goto out;
2424 	}
2425 
2426 	memcpy(&rkey, &re->key, sizeof(rkey));
2427 
2428 	fnd_put(fnd);
2429 	fnd = NULL;
2430 
2431 	err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2432 	if (err)
2433 		goto out;
2434 
2435 out:
2436 	fnd_put(fnd);
2437 
2438 out1:
2439 	mark_inode_dirty(&ni->vfs_inode);
2440 	ni_unlock(ni);
2441 
2442 	return err;
2443 }
2444 
2445 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2446 					  CLST len)
2447 {
2448 	ntfs_unmap_meta(sbi->sb, lcn, len);
2449 	ntfs_discard(sbi, lcn, len);
2450 }
2451 
2452 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2453 {
2454 	CLST end, i, zone_len, zlen;
2455 	struct wnd_bitmap *wnd = &sbi->used.bitmap;
2456 	bool dirty = false;
2457 
2458 	down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2459 	if (!wnd_is_used(wnd, lcn, len)) {
2460 		/* mark volume as dirty out of wnd->rw_lock */
2461 		dirty = true;
2462 
2463 		end = lcn + len;
2464 		len = 0;
2465 		for (i = lcn; i < end; i++) {
2466 			if (wnd_is_used(wnd, i, 1)) {
2467 				if (!len)
2468 					lcn = i;
2469 				len += 1;
2470 				continue;
2471 			}
2472 
2473 			if (!len)
2474 				continue;
2475 
2476 			if (trim)
2477 				ntfs_unmap_and_discard(sbi, lcn, len);
2478 
2479 			wnd_set_free(wnd, lcn, len);
2480 			len = 0;
2481 		}
2482 
2483 		if (!len)
2484 			goto out;
2485 	}
2486 
2487 	if (trim)
2488 		ntfs_unmap_and_discard(sbi, lcn, len);
2489 	wnd_set_free(wnd, lcn, len);
2490 
2491 	/* append to MFT zone, if possible. */
2492 	zone_len = wnd_zone_len(wnd);
2493 	zlen = min(zone_len + len, sbi->zone_max);
2494 
2495 	if (zlen == zone_len) {
2496 		/* MFT zone already has maximum size. */
2497 	} else if (!zone_len) {
2498 		/* Create MFT zone only if 'zlen' is large enough. */
2499 		if (zlen == sbi->zone_max)
2500 			wnd_zone_set(wnd, lcn, zlen);
2501 	} else {
2502 		CLST zone_lcn = wnd_zone_bit(wnd);
2503 
2504 		if (lcn + len == zone_lcn) {
2505 			/* Append into head MFT zone. */
2506 			wnd_zone_set(wnd, lcn, zlen);
2507 		} else if (zone_lcn + zone_len == lcn) {
2508 			/* Append into tail MFT zone. */
2509 			wnd_zone_set(wnd, zone_lcn, zlen);
2510 		}
2511 	}
2512 
2513 out:
2514 	up_write(&wnd->rw_lock);
2515 	if (dirty)
2516 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2517 }
2518 
2519 /*
2520  * run_deallocate - Deallocate clusters.
2521  */
2522 int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
2523 		   bool trim)
2524 {
2525 	CLST lcn, len;
2526 	size_t idx = 0;
2527 
2528 	while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2529 		if (lcn == SPARSE_LCN)
2530 			continue;
2531 
2532 		mark_as_free_ex(sbi, lcn, len, trim);
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2539 {
2540 	int i, ch;
2541 
2542 	/* check for forbidden chars */
2543 	for (i = 0; i < fname->len; ++i) {
2544 		ch = le16_to_cpu(fname->name[i]);
2545 
2546 		/* control chars */
2547 		if (ch < 0x20)
2548 			return true;
2549 
2550 		switch (ch) {
2551 		/* disallowed by Windows */
2552 		case '\\':
2553 		case '/':
2554 		case ':':
2555 		case '*':
2556 		case '?':
2557 		case '<':
2558 		case '>':
2559 		case '|':
2560 		case '\"':
2561 			return true;
2562 
2563 		default:
2564 			/* allowed char */
2565 			break;
2566 		}
2567 	}
2568 
2569 	/* file names cannot end with space or . */
2570 	if (fname->len > 0) {
2571 		ch = le16_to_cpu(fname->name[fname->len - 1]);
2572 		if (ch == ' ' || ch == '.')
2573 			return true;
2574 	}
2575 
2576 	return false;
2577 }
2578 
2579 static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
2580 				    const struct le_str *fname)
2581 {
2582 	int port_digit;
2583 	const __le16 *name = fname->name;
2584 	int len = fname->len;
2585 	const u16 *upcase = sbi->upcase;
2586 
2587 	/* check for 3 chars reserved names (device names) */
2588 	/* name by itself or with any extension is forbidden */
2589 	if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2590 		if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2591 		    !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2592 		    !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2593 		    !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2594 			return true;
2595 
2596 	/* check for 4 chars reserved names (port name followed by 1..9) */
2597 	/* name by itself or with any extension is forbidden */
2598 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2599 		port_digit = le16_to_cpu(name[3]);
2600 		if (port_digit >= '1' && port_digit <= '9')
2601 			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
2602 					    false) ||
2603 			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
2604 					    false))
2605 				return true;
2606 	}
2607 
2608 	return false;
2609 }
2610 
2611 /*
2612  * valid_windows_name - Check if a file name is valid in Windows.
2613  */
2614 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2615 {
2616 	return !name_has_forbidden_chars(fname) &&
2617 	       !is_reserved_name(sbi, fname);
2618 }
2619 
2620 /*
2621  * ntfs_set_label - updates current ntfs label.
2622  */
2623 int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
2624 {
2625 	int err;
2626 	struct ATTRIB *attr;
2627 	u32 uni_bytes;
2628 	struct ntfs_inode *ni = sbi->volume.ni;
2629 	/* Allocate PATH_MAX bytes. */
2630 	struct cpu_str *uni = __getname();
2631 
2632 	if (!uni)
2633 		return -ENOMEM;
2634 
2635 	err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
2636 				UTF16_LITTLE_ENDIAN);
2637 	if (err < 0)
2638 		goto out;
2639 
2640 	uni_bytes = uni->len * sizeof(u16);
2641 	if (uni_bytes > NTFS_LABEL_MAX_LENGTH * sizeof(u16)) {
2642 		ntfs_warn(sbi->sb, "new label is too long");
2643 		err = -EFBIG;
2644 		goto out;
2645 	}
2646 
2647 	ni_lock(ni);
2648 
2649 	/* Ignore any errors. */
2650 	ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
2651 
2652 	err = ni_insert_resident(ni, uni_bytes, ATTR_LABEL, NULL, 0, &attr,
2653 				 NULL, NULL);
2654 	if (err < 0)
2655 		goto unlock_out;
2656 
2657 	/* write new label in on-disk struct. */
2658 	memcpy(resident_data(attr), uni->name, uni_bytes);
2659 
2660 	/* update cached value of current label. */
2661 	if (len >= ARRAY_SIZE(sbi->volume.label))
2662 		len = ARRAY_SIZE(sbi->volume.label) - 1;
2663 	memcpy(sbi->volume.label, label, len);
2664 	sbi->volume.label[len] = 0;
2665 	mark_inode_dirty_sync(&ni->vfs_inode);
2666 
2667 unlock_out:
2668 	ni_unlock(ni);
2669 
2670 	if (!err)
2671 		err = _ni_write_inode(&ni->vfs_inode, 0);
2672 
2673 out:
2674 	__putname(uni);
2675 	return err;
2676 }
2677