xref: /linux/fs/ntfs3/record.c (revision ce335806b5ecc5132aed0a1af8bd48ae3b2ea178)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/fs.h>
9 
10 #include "debug.h"
11 #include "ntfs.h"
12 #include "ntfs_fs.h"
13 
compare_attr(const struct ATTRIB * left,enum ATTR_TYPE type,const __le16 * name,u8 name_len,const u16 * upcase)14 static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15 			       const __le16 *name, u8 name_len,
16 			       const u16 *upcase)
17 {
18 	/* First, compare the type codes. */
19 	int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
20 
21 	if (diff)
22 		return diff;
23 
24 	/* They have the same type code, so we have to compare the names. */
25 	return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
26 			      upcase, true);
27 }
28 
29 /*
30  * mi_new_attt_id
31  *
32  * Return: Unused attribute id that is less than mrec->next_attr_id.
33  */
mi_new_attt_id(struct ntfs_inode * ni,struct mft_inode * mi)34 static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi)
35 {
36 	u16 free_id, max_id, t16;
37 	struct MFT_REC *rec = mi->mrec;
38 	struct ATTRIB *attr;
39 	__le16 id;
40 
41 	id = rec->next_attr_id;
42 	free_id = le16_to_cpu(id);
43 	if (free_id < 0x7FFF) {
44 		rec->next_attr_id = cpu_to_le16(free_id + 1);
45 		return id;
46 	}
47 
48 	/* One record can store up to 1024/24 ~= 42 attributes. */
49 	free_id = 0;
50 	max_id = 0;
51 
52 	attr = NULL;
53 
54 	for (;;) {
55 		attr = mi_enum_attr(ni, mi, attr);
56 		if (!attr) {
57 			rec->next_attr_id = cpu_to_le16(max_id + 1);
58 			mi->dirty = true;
59 			return cpu_to_le16(free_id);
60 		}
61 
62 		t16 = le16_to_cpu(attr->id);
63 		if (t16 == free_id) {
64 			free_id += 1;
65 			attr = NULL;
66 		} else if (max_id < t16)
67 			max_id = t16;
68 	}
69 }
70 
mi_get(struct ntfs_sb_info * sbi,CLST rno,struct mft_inode ** mi)71 int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
72 {
73 	int err;
74 	struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
75 
76 	if (!m)
77 		return -ENOMEM;
78 
79 	err = mi_init(m, sbi, rno);
80 	if (err) {
81 		kfree(m);
82 		return err;
83 	}
84 
85 	err = mi_read(m, false);
86 	if (err) {
87 		mi_put(m);
88 		return err;
89 	}
90 
91 	*mi = m;
92 	return 0;
93 }
94 
mi_put(struct mft_inode * mi)95 void mi_put(struct mft_inode *mi)
96 {
97 	mi_clear(mi);
98 	kfree(mi);
99 }
100 
mi_init(struct mft_inode * mi,struct ntfs_sb_info * sbi,CLST rno)101 int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
102 {
103 	mi->sbi = sbi;
104 	mi->rno = rno;
105 	mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
106 	if (!mi->mrec)
107 		return -ENOMEM;
108 
109 	return 0;
110 }
111 
112 /*
113  * mi_read - Read MFT data.
114  */
mi_read(struct mft_inode * mi,bool is_mft)115 int mi_read(struct mft_inode *mi, bool is_mft)
116 {
117 	int err;
118 	struct MFT_REC *rec = mi->mrec;
119 	struct ntfs_sb_info *sbi = mi->sbi;
120 	u32 bpr = sbi->record_size;
121 	u64 vbo = (u64)mi->rno << sbi->record_bits;
122 	struct ntfs_inode *mft_ni = sbi->mft.ni;
123 	struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124 	struct rw_semaphore *rw_lock = NULL;
125 
126 	if (is_mounted(sbi)) {
127 		if (!is_mft && mft_ni) {
128 			rw_lock = &mft_ni->file.run_lock;
129 			down_read(rw_lock);
130 		}
131 	}
132 
133 	err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
134 	if (rw_lock)
135 		up_read(rw_lock);
136 	if (!err)
137 		goto ok;
138 
139 	if (err == -E_NTFS_FIXUP) {
140 		mi->dirty = true;
141 		goto ok;
142 	}
143 
144 	if (err != -ENOENT)
145 		goto out;
146 
147 	if (rw_lock) {
148 		ni_lock(mft_ni);
149 		down_write(rw_lock);
150 	}
151 	err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
152 				 vbo >> sbi->cluster_bits);
153 	if (rw_lock) {
154 		up_write(rw_lock);
155 		ni_unlock(mft_ni);
156 	}
157 	if (err)
158 		goto out;
159 
160 	if (rw_lock)
161 		down_read(rw_lock);
162 	err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
163 	if (rw_lock)
164 		up_read(rw_lock);
165 
166 	if (err == -E_NTFS_FIXUP) {
167 		mi->dirty = true;
168 		goto ok;
169 	}
170 	if (err)
171 		goto out;
172 
173 ok:
174 	/* Check field 'total' only here. */
175 	if (le32_to_cpu(rec->total) != bpr) {
176 		err = -EINVAL;
177 		goto out;
178 	}
179 
180 	return 0;
181 
182 out:
183 	if (err == -E_NTFS_CORRUPT) {
184 		ntfs_err(sbi->sb, "mft corrupted");
185 		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
186 		err = -EINVAL;
187 	}
188 
189 	return err;
190 }
191 
192 /*
193  * mi_enum_attr - start/continue attributes enumeration in record.
194  *
195  * NOTE: mi->mrec - memory of size sbi->record_size
196  * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
197  */
mi_enum_attr(struct ntfs_inode * ni,struct mft_inode * mi,struct ATTRIB * attr)198 struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi,
199 			    struct ATTRIB *attr)
200 {
201 	const struct MFT_REC *rec = mi->mrec;
202 	u32 used = le32_to_cpu(rec->used);
203 	u32 t32, off, asize, prev_type;
204 	u16 t16;
205 	u64 data_size, alloc_size, tot_size;
206 
207 	if (!attr) {
208 		u32 total = le32_to_cpu(rec->total);
209 
210 		off = le16_to_cpu(rec->attr_off);
211 
212 		if (used > total)
213 			goto out;
214 
215 		if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
216 		    !IS_ALIGNED(off, 8)) {
217 			goto out;
218 		}
219 
220 		/* Skip non-resident records. */
221 		if (!is_rec_inuse(rec))
222 			return NULL;
223 
224 		prev_type = 0;
225 		attr = Add2Ptr(rec, off);
226 	} else {
227 		/*
228 		 * We don't need to check previous attr here. There is
229 		 * a bounds checking in the previous round.
230 		 */
231 		off = PtrOffset(rec, attr);
232 
233 		asize = le32_to_cpu(attr->size);
234 
235 		prev_type = le32_to_cpu(attr->type);
236 		attr = Add2Ptr(attr, asize);
237 		off += asize;
238 	}
239 
240 	/*
241 	 * Can we use the first fields:
242 	 * attr->type,
243 	 * attr->size
244 	 */
245 	if (off + 8 > used) {
246 		static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
247 		goto out;
248 	}
249 
250 	if (attr->type == ATTR_END) {
251 		/* End of enumeration. */
252 		return NULL;
253 	}
254 
255 	/* 0x100 is last known attribute for now. */
256 	t32 = le32_to_cpu(attr->type);
257 	if (!t32 || (t32 & 0xf) || (t32 > 0x100))
258 		goto out;
259 
260 	/* attributes in record must be ordered by type */
261 	if (t32 < prev_type)
262 		goto out;
263 
264 	asize = le32_to_cpu(attr->size);
265 
266 	if (!IS_ALIGNED(asize, 8))
267 		goto out;
268 
269 	/* Check overflow and boundary. */
270 	if (off + asize < off || off + asize > used)
271 		goto out;
272 
273 	/* Can we use the field attr->non_res. */
274 	if (off + 9 > used)
275 		goto out;
276 
277 	/* Check size of attribute. */
278 	if (!attr->non_res) {
279 		/* Check resident fields. */
280 		if (asize < SIZEOF_RESIDENT)
281 			goto out;
282 
283 		t16 = le16_to_cpu(attr->res.data_off);
284 		if (t16 > asize)
285 			goto out;
286 
287 		if (le32_to_cpu(attr->res.data_size) > asize - t16)
288 			goto out;
289 
290 		t32 = sizeof(short) * attr->name_len;
291 		if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
292 			goto out;
293 
294 		return attr;
295 	}
296 
297 	/* Check nonresident fields. */
298 	if (attr->non_res != 1)
299 		goto out;
300 
301 	/* Can we use memory including attr->nres.valid_size? */
302 	if (asize < SIZEOF_NONRESIDENT)
303 		goto out;
304 
305 	t16 = le16_to_cpu(attr->nres.run_off);
306 	if (t16 > asize)
307 		goto out;
308 
309 	t32 = sizeof(short) * attr->name_len;
310 	if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
311 		goto out;
312 
313 	/* Check start/end vcn. */
314 	if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
315 		goto out;
316 
317 	data_size = le64_to_cpu(attr->nres.data_size);
318 	if (le64_to_cpu(attr->nres.valid_size) > data_size)
319 		goto out;
320 
321 	alloc_size = le64_to_cpu(attr->nres.alloc_size);
322 	if (data_size > alloc_size)
323 		goto out;
324 
325 	t32 = mi->sbi->cluster_mask;
326 	if (alloc_size & t32)
327 		goto out;
328 
329 	if (!attr->nres.svcn && is_attr_ext(attr)) {
330 		/* First segment of sparse/compressed attribute */
331 		/* Can we use memory including attr->nres.total_size? */
332 		if (asize < SIZEOF_NONRESIDENT_EX)
333 			goto out;
334 
335 		tot_size = le64_to_cpu(attr->nres.total_size);
336 		if (tot_size & t32)
337 			goto out;
338 
339 		if (tot_size > alloc_size)
340 			goto out;
341 	} else {
342 		if (attr->nres.c_unit)
343 			goto out;
344 
345 		if (alloc_size > mi->sbi->volume.size)
346 			goto out;
347 	}
348 
349 	return attr;
350 
351 out:
352 	_ntfs_bad_inode(&ni->vfs_inode);
353 	return NULL;
354 }
355 
356 /*
357  * mi_find_attr - Find the attribute by type and name and id.
358  */
mi_find_attr(struct ntfs_inode * ni,struct mft_inode * mi,struct ATTRIB * attr,enum ATTR_TYPE type,const __le16 * name,u8 name_len,const __le16 * id)359 struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi,
360 			    struct ATTRIB *attr, enum ATTR_TYPE type,
361 			    const __le16 *name, u8 name_len, const __le16 *id)
362 {
363 	u32 type_in = le32_to_cpu(type);
364 	u32 atype;
365 
366 next_attr:
367 	attr = mi_enum_attr(ni, mi, attr);
368 	if (!attr)
369 		return NULL;
370 
371 	atype = le32_to_cpu(attr->type);
372 	if (atype > type_in)
373 		return NULL;
374 
375 	if (atype < type_in)
376 		goto next_attr;
377 
378 	if (attr->name_len != name_len)
379 		goto next_attr;
380 
381 	if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
382 		goto next_attr;
383 
384 	if (id && *id != attr->id)
385 		goto next_attr;
386 
387 	return attr;
388 }
389 
mi_write(struct mft_inode * mi,int wait)390 int mi_write(struct mft_inode *mi, int wait)
391 {
392 	struct MFT_REC *rec;
393 	int err;
394 	struct ntfs_sb_info *sbi;
395 
396 	if (!mi->dirty)
397 		return 0;
398 
399 	sbi = mi->sbi;
400 	rec = mi->mrec;
401 
402 	err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
403 	if (err)
404 		return err;
405 
406 	if (mi->rno < sbi->mft.recs_mirr)
407 		sbi->flags |= NTFS_FLAGS_MFTMIRR;
408 
409 	mi->dirty = false;
410 
411 	return 0;
412 }
413 
mi_format_new(struct mft_inode * mi,struct ntfs_sb_info * sbi,CLST rno,__le16 flags,bool is_mft)414 int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
415 		  __le16 flags, bool is_mft)
416 {
417 	int err;
418 	u16 seq = 1;
419 	struct MFT_REC *rec;
420 	u64 vbo = (u64)rno << sbi->record_bits;
421 
422 	err = mi_init(mi, sbi, rno);
423 	if (err)
424 		return err;
425 
426 	rec = mi->mrec;
427 
428 	if (rno == MFT_REC_MFT) {
429 		;
430 	} else if (rno < MFT_REC_FREE) {
431 		seq = rno;
432 	} else if (rno >= sbi->mft.used) {
433 		;
434 	} else if (mi_read(mi, is_mft)) {
435 		;
436 	} else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
437 		/* Record is reused. Update its sequence number. */
438 		seq = le16_to_cpu(rec->seq) + 1;
439 		if (!seq)
440 			seq = 1;
441 	}
442 
443 	memcpy(rec, sbi->new_rec, sbi->record_size);
444 
445 	rec->seq = cpu_to_le16(seq);
446 	rec->flags = RECORD_FLAG_IN_USE | flags;
447 	if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
448 		rec->mft_record = cpu_to_le32(rno);
449 
450 	mi->dirty = true;
451 
452 	if (!mi->nb.nbufs) {
453 		struct ntfs_inode *ni = sbi->mft.ni;
454 		bool lock = false;
455 
456 		if (is_mounted(sbi) && !is_mft) {
457 			down_read(&ni->file.run_lock);
458 			lock = true;
459 		}
460 
461 		err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
462 				  &mi->nb);
463 		if (lock)
464 			up_read(&ni->file.run_lock);
465 	}
466 
467 	return err;
468 }
469 
470 /*
471  * mi_insert_attr - Reserve space for new attribute.
472  *
473  * Return: Not full constructed attribute or NULL if not possible to create.
474  */
mi_insert_attr(struct ntfs_inode * ni,struct mft_inode * mi,enum ATTR_TYPE type,const __le16 * name,u8 name_len,u32 asize,u16 name_off)475 struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi,
476 			      enum ATTR_TYPE type, const __le16 *name,
477 			      u8 name_len, u32 asize, u16 name_off)
478 {
479 	size_t tail;
480 	struct ATTRIB *attr;
481 	__le16 id;
482 	struct MFT_REC *rec = mi->mrec;
483 	struct ntfs_sb_info *sbi = mi->sbi;
484 	u32 used = le32_to_cpu(rec->used);
485 	const u16 *upcase = sbi->upcase;
486 
487 	/* Can we insert mi attribute? */
488 	if (used + asize > sbi->record_size)
489 		return NULL;
490 
491 	/*
492 	 * Scan through the list of attributes to find the point
493 	 * at which we should insert it.
494 	 */
495 	attr = NULL;
496 	while ((attr = mi_enum_attr(ni, mi, attr))) {
497 		int diff = compare_attr(attr, type, name, name_len, upcase);
498 
499 		if (diff < 0)
500 			continue;
501 
502 		if (!diff && !is_attr_indexed(attr))
503 			return NULL;
504 		break;
505 	}
506 
507 	if (!attr) {
508 		/* Append. */
509 		tail = 8;
510 		attr = Add2Ptr(rec, used - 8);
511 	} else {
512 		/* Insert before 'attr'. */
513 		tail = used - PtrOffset(rec, attr);
514 	}
515 
516 	id = mi_new_attt_id(ni, mi);
517 
518 	memmove(Add2Ptr(attr, asize), attr, tail);
519 	memset(attr, 0, asize);
520 
521 	attr->type = type;
522 	attr->size = cpu_to_le32(asize);
523 	attr->name_len = name_len;
524 	attr->name_off = cpu_to_le16(name_off);
525 	attr->id = id;
526 
527 	memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
528 	rec->used = cpu_to_le32(used + asize);
529 
530 	mi->dirty = true;
531 
532 	return attr;
533 }
534 
535 /*
536  * mi_remove_attr - Remove the attribute from record.
537  *
538  * NOTE: The source attr will point to next attribute.
539  */
mi_remove_attr(struct ntfs_inode * ni,struct mft_inode * mi,struct ATTRIB * attr)540 bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
541 		    struct ATTRIB *attr)
542 {
543 	struct MFT_REC *rec = mi->mrec;
544 	u32 aoff = PtrOffset(rec, attr);
545 	u32 used = le32_to_cpu(rec->used);
546 	u32 asize = le32_to_cpu(attr->size);
547 
548 	if (aoff + asize > used)
549 		return false;
550 
551 	if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
552 		u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
553 		if (!links) {
554 			/* minor error. Not critical. */
555 		} else {
556 			ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
557 			ni->mi.dirty = true;
558 		}
559 	}
560 
561 	used -= asize;
562 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
563 	rec->used = cpu_to_le32(used);
564 	mi->dirty = true;
565 
566 	return true;
567 }
568 
569 /* bytes = "new attribute size" - "old attribute size" */
mi_resize_attr(struct mft_inode * mi,struct ATTRIB * attr,int bytes)570 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
571 {
572 	struct MFT_REC *rec = mi->mrec;
573 	u32 aoff = PtrOffset(rec, attr);
574 	u32 total, used = le32_to_cpu(rec->used);
575 	u32 nsize, asize = le32_to_cpu(attr->size);
576 	u32 rsize = le32_to_cpu(attr->res.data_size);
577 	int tail = (int)(used - aoff - asize);
578 	int dsize;
579 	char *next;
580 
581 	if (tail < 0 || aoff >= used)
582 		return false;
583 
584 	if (!bytes)
585 		return true;
586 
587 	total = le32_to_cpu(rec->total);
588 	next = Add2Ptr(attr, asize);
589 
590 	if (bytes > 0) {
591 		dsize = ALIGN(bytes, 8);
592 		if (used + dsize > total)
593 			return false;
594 		nsize = asize + dsize;
595 		/* Move tail */
596 		memmove(next + dsize, next, tail);
597 		memset(next, 0, dsize);
598 		used += dsize;
599 		rsize += dsize;
600 	} else {
601 		dsize = ALIGN(-bytes, 8);
602 		if (dsize > asize)
603 			return false;
604 		nsize = asize - dsize;
605 		memmove(next - dsize, next, tail);
606 		used -= dsize;
607 		rsize -= dsize;
608 	}
609 
610 	rec->used = cpu_to_le32(used);
611 	attr->size = cpu_to_le32(nsize);
612 	if (!attr->non_res)
613 		attr->res.data_size = cpu_to_le32(rsize);
614 	mi->dirty = true;
615 
616 	return true;
617 }
618 
619 /*
620  * Pack runs in MFT record.
621  * If failed record is not changed.
622  */
mi_pack_runs(struct mft_inode * mi,struct ATTRIB * attr,struct runs_tree * run,CLST len)623 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
624 		 struct runs_tree *run, CLST len)
625 {
626 	int err = 0;
627 	struct ntfs_sb_info *sbi = mi->sbi;
628 	u32 new_run_size;
629 	CLST plen;
630 	struct MFT_REC *rec = mi->mrec;
631 	CLST svcn = le64_to_cpu(attr->nres.svcn);
632 	u32 used = le32_to_cpu(rec->used);
633 	u32 aoff = PtrOffset(rec, attr);
634 	u32 asize = le32_to_cpu(attr->size);
635 	char *next = Add2Ptr(attr, asize);
636 	u16 run_off = le16_to_cpu(attr->nres.run_off);
637 	u32 run_size = asize - run_off;
638 	u32 tail = used - aoff - asize;
639 	u32 dsize = sbi->record_size - used;
640 
641 	/* Make a maximum gap in current record. */
642 	memmove(next + dsize, next, tail);
643 
644 	/* Pack as much as possible. */
645 	err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
646 		       &plen);
647 	if (err < 0) {
648 		memmove(next, next + dsize, tail);
649 		return err;
650 	}
651 
652 	new_run_size = ALIGN(err, 8);
653 
654 	memmove(next + new_run_size - run_size, next + dsize, tail);
655 
656 	attr->size = cpu_to_le32(asize + new_run_size - run_size);
657 	attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
658 	rec->used = cpu_to_le32(used + new_run_size - run_size);
659 	mi->dirty = true;
660 
661 	return 0;
662 }
663